Created
January 31, 2020 06:15
-
-
Save dotcypress/074228a17b22c433a6917821efc73677 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
use byteorder::{BigEndian, ByteOrder}; | |
use eeprom24x::{ic, Eeprom24x, SlaveAddr}; | |
use hal::hal::blocking::delay::DelayMs; | |
use hal::hal::blocking::i2c::{Write, WriteRead}; | |
use hash32::{Hasher, Murmur3Hasher}; | |
// EEPROM: 512 pages * 32 bytes = 16,384 bytes | |
// Index size: 32 pages * 8 refs = 256 slots | |
// Max key size: 12 bytes | |
// Max record size: 4095 bytes | |
// Store header layout | |
// | magic | version | index pages | | |
// | 32 | 8 | 8 | | |
// Record ref layout | |
// | page no | len | hash | | |
// | 9 | 12 | 11 | | |
// Record page layout | |
// | key len | key | value | | |
// | 8 | key len * 8 | val len * 8 | | |
const MAGIC: &[u8; 4] = b"fpgb"; | |
const VERSION: u8 = 1; | |
const TOTAL_PAGES: usize = 512; | |
const INDEX_PAGES: usize = 32; | |
const PAGE_SIZE: usize = 32; | |
const REF_SIZE: usize = 4; | |
const REFS_PER_PAGE: usize = PAGE_SIZE / REF_SIZE; | |
const RECORD_PAGES: usize = TOTAL_PAGES - INDEX_PAGES - 1; | |
const REFS_LEN: usize = INDEX_PAGES * REFS_PER_PAGE; | |
const HOLES_LEN: usize = REFS_LEN / 2; | |
const MAX_KEY_LEN: usize = 12; | |
const MAX_VAL_LEN: usize = 4095; | |
const WRITE_DELAY_MS: u32 = 5; | |
#[derive(Debug)] | |
pub enum StoreError<E> { | |
StoreNotFound, | |
StoreClosed, | |
Overflow, | |
ValueOverflow, | |
KeyNofFound, | |
InvalidVersion, | |
BusError(eeprom24x::Error<E>), | |
} | |
pub struct Store<I2C, D> { | |
is_open: bool, | |
delay: D, | |
eeprom: Eeprom24x<I2C, ic::IC24x128>, | |
holes: [Hole; HOLES_LEN], | |
refs: [RecordRef; REFS_LEN], | |
} | |
impl<I2C, D, E> Store<I2C, D> | |
where | |
I2C: Write<Error = E> + WriteRead<Error = E>, | |
E: core::fmt::Debug, | |
D: DelayMs<u32>, | |
{ | |
pub fn new(dev: I2C, delay: D) -> Store<I2C, D> { | |
let mut store = Store { | |
eeprom: Eeprom24x::new_24x128(dev, SlaveAddr::Default), | |
refs: [RecordRef::default(); REFS_LEN], | |
holes: [Hole::default(); HOLES_LEN], | |
delay, | |
is_open: false, | |
}; | |
store.reset(); | |
store | |
} | |
pub fn create(&mut self) -> Result<(), StoreError<E>> { | |
self.reset(); | |
let mut buf = [0; PAGE_SIZE]; | |
for ref_page_idx in 0..=INDEX_PAGES { | |
self.write(ref_page_idx as u16, 0, &buf)?; | |
} | |
buf[0..4].copy_from_slice(MAGIC); | |
buf[4] = VERSION; | |
buf[5] = INDEX_PAGES as u8; | |
self.write(0, 0, &buf)?; | |
self.is_open = true; | |
Ok(()) | |
} | |
pub fn open(&mut self) -> Result<(), StoreError<E>> { | |
self.reset(); | |
let mut page = [0; PAGE_SIZE]; | |
self.read(0, 0, &mut page)?; | |
if &page[0..4] != MAGIC { | |
return Err(StoreError::StoreNotFound); | |
} | |
let ver = page[4]; | |
let ref_pages = page[5] as usize; | |
if ver != VERSION || ref_pages != INDEX_PAGES { | |
return Err(StoreError::InvalidVersion); | |
} | |
for page_idx in 0..INDEX_PAGES { | |
self.read(page_idx as u16 + 1, 0, &mut page)?; | |
for ref_idx in 0..REFS_PER_PAGE { | |
let offset = REF_SIZE * ref_idx; | |
let ref_idx = ref_idx + page_idx * REFS_PER_PAGE; | |
let mut rec_ref = RecordRef::deserialize(&page[offset..(offset + REF_SIZE)]); | |
rec_ref.idx = ref_idx; | |
if rec_ref.active() { | |
self.alloc(Some(rec_ref.page), rec_ref.pages()) | |
.ok_or(StoreError::Overflow)?; | |
} | |
self.refs[ref_idx] = rec_ref; | |
} | |
} | |
self.is_open = true; | |
Ok(()) | |
} | |
pub fn contains_key(&mut self, key: &[u8]) -> Result<bool, StoreError<E>> { | |
self.find_ref(key).map(|r| r.is_some()) | |
} | |
pub fn insert(&mut self, key: &[u8], val: &[u8]) -> Result<(), StoreError<E>> { | |
assert!(!val.is_empty()); | |
if !self.is_open { | |
return Err(StoreError::StoreClosed); | |
} | |
if self.contains_key(key)? { | |
self.remove(key)?; | |
} | |
let mut rec_ref = if let Some(rec_ref) = self.refs.iter().find(|r| !r.active()) { | |
*rec_ref | |
} else { | |
return Err(StoreError::Overflow); | |
}; | |
rec_ref.len = 1 + key.len() as u16 + val.len() as u16; | |
assert!(rec_ref.len as usize <= MAX_VAL_LEN); | |
if let Some(free_page) = self.alloc(None, rec_ref.pages()) { | |
rec_ref.page = free_page; | |
} else { | |
return Err(StoreError::Overflow); | |
} | |
let mut buf = [0; PAGE_SIZE]; | |
let rec_end = usize::min(PAGE_SIZE, rec_ref.len as usize); | |
let val_start = key.len() + 1; | |
let chunk_len = rec_end - val_start; | |
buf[0] = key.len() as u8; | |
buf[1..val_start].copy_from_slice(key); | |
buf[val_start..rec_end].copy_from_slice(&val[..chunk_len]); | |
self.write(rec_ref.page, 0, &buf[0..rec_end])?; | |
if rec_ref.len as usize > PAGE_SIZE { | |
let val_offset = val.len() - (rec_ref.len as usize - PAGE_SIZE); | |
for (idx, chunk) in val[val_offset..].chunks(PAGE_SIZE).enumerate() { | |
let page = rec_ref.page + idx as u16 + 1; | |
self.write(page, 0, &chunk)? | |
} | |
} | |
rec_ref.hash = RecordRef::hash(key); | |
self.refs[rec_ref.idx] = rec_ref; | |
self.save_ref(&rec_ref)?; | |
Ok(()) | |
} | |
pub fn append(&mut self, key: &[u8], val: &[u8]) -> Result<(), StoreError<E>> { | |
assert!(!val.is_empty()); | |
if !self.is_open { | |
return Err(StoreError::StoreClosed); | |
} | |
let mut rec_ref = if let Some(rec_ref) = self.find_ref(key)? { | |
rec_ref | |
} else { | |
return Err(StoreError::KeyNofFound); | |
}; | |
assert!(rec_ref.len + val.len() as u16 <= MAX_VAL_LEN as u16); | |
let initial_pages = rec_ref.pages(); | |
let initial_len = rec_ref.len as usize; | |
let last_page = rec_ref.page + rec_ref.pages(); | |
rec_ref.len += val.len() as u16; | |
let new_pages = rec_ref.pages() - initial_pages; | |
if new_pages > 0 && self.alloc(Some(last_page), new_pages).is_none() { | |
return Err(StoreError::Overflow); | |
} | |
let offset = initial_len % PAGE_SIZE; | |
let first_chunk = if offset > 0 { | |
usize::min(val.len(), PAGE_SIZE - offset as usize) | |
} else { | |
0 | |
}; | |
if first_chunk > 0 { | |
self.write(last_page - 1, offset as u8, &val[0..first_chunk])?; | |
} | |
for (idx, chunk) in val[first_chunk..].chunks(PAGE_SIZE).enumerate() { | |
let page = last_page + idx as u16; | |
self.write(page, 0, &chunk)? | |
} | |
self.refs[rec_ref.idx] = rec_ref; | |
self.save_ref(&rec_ref)?; | |
Ok(()) | |
} | |
pub fn remove(&mut self, key: &[u8]) -> Result<(), StoreError<E>> { | |
if !self.is_open { | |
return Err(StoreError::StoreClosed); | |
} | |
if let Some(mut rec_ref) = self.find_ref(key)? { | |
self.dealloc(rec_ref.page, rec_ref.pages()); | |
rec_ref.page = 0; | |
rec_ref.len = 0; | |
rec_ref.hash = 0; | |
self.refs[rec_ref.idx] = rec_ref; | |
self.save_ref(&rec_ref)?; | |
} | |
Ok(()) | |
} | |
pub fn load_val( | |
&mut self, | |
key: &[u8], | |
offset: u16, | |
buf: &mut [u8], | |
) -> Result<u16, StoreError<E>> { | |
assert!(!buf.is_empty()); | |
if !self.is_open { | |
return Err(StoreError::StoreClosed); | |
} | |
let rec_ref = if let Some(rec_ref) = self.find_ref(key)? { | |
rec_ref | |
} else { | |
return Err(StoreError::KeyNofFound); | |
}; | |
assert!( | |
offset < (rec_ref.len - key.len() as u16 - 1), | |
"Offset >= Value length" | |
); | |
let val_offset = offset as usize + key.len() + 1; | |
let read_len = usize::min( | |
rec_ref.len.saturating_sub(val_offset as u16) as usize, | |
buf.len(), | |
); | |
let page = rec_ref.page as usize + (val_offset / PAGE_SIZE); | |
let offset = val_offset % PAGE_SIZE; | |
self.read(page as u16, offset as u8, &mut buf[0..read_len])?; | |
Ok(read_len as u16) | |
} | |
fn alloc(&mut self, begin: Option<u16>, pages: u16) -> Option<u16> { | |
assert!(pages > 0); | |
if let Some(begin) = begin { | |
assert!(begin > INDEX_PAGES as u16); | |
match self | |
.holes | |
.iter_mut() | |
.find(|h| begin >= h.from && begin < h.to && h.size() - (begin - h.from) >= pages) | |
{ | |
Some(hole) if hole.from == begin => { | |
hole.from += pages; | |
Some(begin) | |
} | |
Some(hole) => { | |
let hole_end = hole.to; | |
hole.to = begin; | |
if let Some(slot) = self.holes.iter_mut().find(|h| h.size() == 0) { | |
slot.from = begin + pages; | |
slot.to = hole_end; | |
} else { | |
return None; | |
}; | |
Some(begin) | |
} | |
_ => None, | |
} | |
} else { | |
match self.holes.iter_mut().filter(|h| h.size() >= pages).max() { | |
Some(hole) => { | |
let start = hole.from; | |
hole.from += pages; | |
Some(start) | |
} | |
_ => None, | |
} | |
} | |
} | |
fn dealloc(&mut self, begin: u16, size: u16) { | |
if let Some(hole) = self.holes.iter_mut().find(|h| h.to == begin) { | |
hole.to += size; | |
} else if let Some(hole) = self.holes.iter_mut().find(|h| h.from == begin + size) { | |
hole.from = begin; | |
} else if let Some(slot) = self.holes.iter_mut().find(|h| h.size() == 0) { | |
slot.from = begin; | |
slot.to = begin + size; | |
} | |
} | |
fn find_ref(&mut self, key: &[u8]) -> Result<Option<RecordRef>, StoreError<E>> { | |
assert!(!key.is_empty() && key.len() <= MAX_KEY_LEN); | |
if !self.is_open { | |
return Err(StoreError::StoreClosed); | |
} | |
let hash = RecordRef::hash(key); | |
let mut buf = [0; MAX_KEY_LEN + 1]; | |
for skip in 0..REFS_LEN { | |
if let Some(rec_ref) = self | |
.refs | |
.iter() | |
.filter(|r| r.hash == hash) | |
.nth(skip) | |
.copied() | |
{ | |
self.read(rec_ref.page, 0, &mut buf)?; | |
let key_len = buf[0] as usize; | |
if &buf[1..(key_len + 1)] == key { | |
return Ok(Some(rec_ref)); | |
} | |
} else { | |
return Ok(None); | |
} | |
} | |
Ok(None) | |
} | |
fn save_ref(&mut self, rec_ref: &RecordRef) -> Result<(), StoreError<E>> { | |
let mut buf = [0; REF_SIZE]; | |
rec_ref.serialize(&mut buf); | |
let ref_page = rec_ref.idx / REFS_PER_PAGE; | |
let ref_offset = rec_ref.idx - ref_page * REFS_PER_PAGE; | |
self.write(ref_page as u16 + 1, (ref_offset * REF_SIZE) as u8, &buf) | |
} | |
fn read(&mut self, page: u16, offset: u8, buff: &mut [u8]) -> Result<(), StoreError<E>> { | |
let mut address: [u8; 2] = [0; 2]; | |
let offset = offset as usize + page as usize * PAGE_SIZE; | |
BigEndian::write_u16(&mut address, offset as u16); | |
self.eeprom | |
.read_data(&address, buff) | |
.map_err(StoreError::BusError)?; | |
Ok(()) | |
} | |
fn write(&mut self, page: u16, offset: u8, data: &[u8]) -> Result<(), StoreError<E>> { | |
assert!(PAGE_SIZE >= data.len() + offset as usize); | |
let mut address: [u8; 2] = [0; 2]; | |
let offset = offset as usize + page as usize * PAGE_SIZE; | |
BigEndian::write_u16(&mut address, offset as u16); | |
self.eeprom | |
.write_page(&address, &data) | |
.map_err(StoreError::BusError)?; | |
self.delay.delay_ms(WRITE_DELAY_MS); | |
Ok(()) | |
} | |
fn reset(&mut self) { | |
let mut refs = [RecordRef::default(); REFS_LEN]; | |
for (idx, rec_ref) in refs.iter_mut().enumerate() { | |
rec_ref.idx = idx; | |
rec_ref.page = 0; | |
} | |
for (idx, hole) in self.holes.iter_mut().enumerate() { | |
hole.idx = idx; | |
hole.from = 0; | |
} | |
self.holes[0].from = INDEX_PAGES as u16 + 1; | |
self.holes[0].to = INDEX_PAGES as u16 + RECORD_PAGES as u16 + 1; | |
} | |
} | |
#[derive(Debug, Default, Copy, Clone)] | |
pub struct Hole { | |
idx: usize, | |
from: u16, | |
to: u16, | |
} | |
impl core::cmp::Ord for Hole { | |
fn cmp(&self, other: &Self) -> core::cmp::Ordering { | |
self.size().cmp(&other.size()) | |
} | |
} | |
impl core::cmp::PartialOrd for Hole { | |
fn partial_cmp(&self, other: &Hole) -> Option<core::cmp::Ordering> { | |
Some(self.cmp(other)) | |
} | |
} | |
impl core::cmp::PartialEq for Hole { | |
fn eq(&self, other: &Hole) -> bool { | |
self.from == other.from && self.to == other.to | |
} | |
} | |
impl core::cmp::Eq for Hole {} | |
impl Hole { | |
pub fn size(self) -> u16 { | |
self.to - self.from | |
} | |
} | |
#[derive(Debug, Default, Copy, Clone)] | |
pub struct RecordRef { | |
idx: usize, | |
hash: u16, | |
page: u16, | |
len: u16, | |
} | |
impl RecordRef { | |
pub fn hash(buf: &[u8]) -> u16 { | |
let mut hasher = Murmur3Hasher::default(); | |
hasher.write(buf); | |
hasher.finish() as u16 & 2047 | |
} | |
pub fn active(self) -> bool { | |
self.page != 0 | |
} | |
pub fn pages(self) -> u16 { | |
let page_size = PAGE_SIZE as u16; | |
let pages = self.len / page_size; | |
if self.len - (pages * page_size) > 0 { | |
pages + 1 | |
} else { | |
pages | |
} | |
} | |
pub fn deserialize(buf: &[u8]) -> RecordRef { | |
assert!(buf.len() >= 4); | |
let val = BigEndian::read_u32(&buf[0..4]); | |
let hash = val as u16 & 2047; | |
let len = (val >> 11) as u16 & 4095; | |
let page = (val >> 23) as u16 & 511; | |
RecordRef { | |
idx: 0, | |
len, | |
page, | |
hash, | |
} | |
} | |
pub fn serialize(self, buf: &mut [u8]) { | |
assert!(buf.len() >= 4); | |
let val = (self.hash as u32 & 2047) | |
| (self.len as u32 & 4095) << 11 | |
| (self.page as u32 & 511) << 23; | |
BigEndian::write_u32(&mut buf[0..4], val); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment