Move the last of our Matcha-specific stuff out of the Tock tree
Change-Id: Ieb4e987dc4153bb5e873cc8c7c983db2df2b87d4
diff --git a/blob_fs/.gitignore b/blob_fs/.gitignore
new file mode 100644
index 0000000..a9d37c5
--- /dev/null
+++ b/blob_fs/.gitignore
@@ -0,0 +1,2 @@
+target
+Cargo.lock
diff --git a/blob_fs/Cargo.toml b/blob_fs/Cargo.toml
new file mode 100644
index 0000000..b99aa17
--- /dev/null
+++ b/blob_fs/Cargo.toml
@@ -0,0 +1,4 @@
+[package]
+name = "blob_fs"
+version = "0.1.0"
+edition = "2018"
diff --git a/blob_fs/src/bit_vector.rs b/blob_fs/src/bit_vector.rs
new file mode 100644
index 0000000..336b4a2
--- /dev/null
+++ b/blob_fs/src/bit_vector.rs
@@ -0,0 +1,293 @@
+use crate::errors::*;
+use crate::utils::*;
+
+pub struct BitVector<'a> {
+ pub bits: &'a mut [u32],
+}
+
+impl<'a> BitVector<'a> {
+ pub fn new(bits: &'a mut [u32]) -> Self {
+ return BitVector { bits: bits };
+ }
+
+ pub fn check_pos(&self, pos: usize) -> Result<(), BFSErr> {
+ dcheck!(pos <= self.bits.len() * 32, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_range(&self, begin: usize, end: usize) -> Result<(), BFSErr> {
+ self.check_pos(begin)?;
+ self.check_pos(end)?;
+ dcheck!(end >= begin, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn clear_all(&mut self) {
+ for i in 0..self.bits.len() {
+ self.bits[i] = 0x00000000;
+ }
+ }
+
+ pub fn set_all(&mut self) {
+ for i in 0..self.bits.len() {
+ self.bits[i] = 0xFFFFFFFF;
+ }
+ }
+
+ pub fn get_bit(&self, pos: usize) -> Result<u32, BFSErr> {
+ self.check_pos(pos)?;
+ return Ok((self.bits[pos >> 5] >> (pos & 31)) & 1);
+ }
+
+ pub fn set_bit(&mut self, pos: usize) -> Result<(), BFSErr> {
+ self.check_pos(pos)?;
+ self.bits[pos >> 5] |= 1 << (pos & 31);
+ return Ok(());
+ }
+
+ pub fn clear_bit(&mut self, pos: usize) -> Result<(), BFSErr> {
+ self.check_pos(pos)?;
+ self.bits[pos >> 5] &= !(1 << (pos & 31));
+ return Ok(());
+ }
+
+ fn bit_mask(head: usize, tail: usize) -> Result<u32, BFSErr> {
+ dcheck!(head < 32, BFSErr::OutOfBounds);
+ dcheck!(tail < 32, BFSErr::OutOfBounds);
+
+ let a = 0xFFFFFFFF << head;
+ let b = 0xFFFFFFFF >> (32 - tail - 1);
+ return Ok(a & b);
+ }
+
+ pub fn set_range(&mut self, begin: usize, end: usize) -> Result<(), BFSErr> {
+ self.check_range(begin, end)?;
+
+ let block_head = begin >> 5;
+ let block_tail = (end - 1) >> 5;
+ let bit_head = begin & 31;
+ let bit_tail = (end - 1) & 31;
+
+ if block_head == block_tail {
+ let mask = BitVector::bit_mask(bit_head, bit_tail)?;
+ self.bits[block_head as usize] |= mask;
+ } else {
+ let mask_head = BitVector::bit_mask(bit_head, 31)?;
+ self.bits[block_head as usize] |= mask_head;
+
+ for i in block_head + 1..block_tail {
+ self.bits[i as usize] = 0xFFFFFFFF;
+ }
+
+ let mask_tail = BitVector::bit_mask(0, bit_tail)?;
+ self.bits[block_tail as usize] |= mask_tail;
+ }
+
+ return Ok(());
+ }
+
+ pub fn clear_range(&mut self, begin: usize, end: usize) -> Result<(), BFSErr> {
+ self.check_range(begin, end)?;
+
+ let block_head = begin >> 5;
+ let block_tail = (end - 1) >> 5;
+ let bit_head = begin & 31;
+ let bit_tail = (end - 1) & 31;
+
+ if block_head == block_tail {
+ let mask = BitVector::bit_mask(bit_head, bit_tail)?;
+ self.bits[block_head] &= !mask;
+ } else {
+ let mask_head = BitVector::bit_mask(bit_head, 31)?;
+ self.bits[block_head] &= !mask_head;
+
+ for i in block_head + 1..block_tail {
+ self.bits[i] = 0x00000000;
+ }
+
+ let mask_tail = BitVector::bit_mask(0, bit_tail)?;
+ self.bits[block_tail] &= !mask_tail;
+ }
+
+ return Ok(());
+ }
+
+ pub fn count_range(&mut self, begin: usize, end: usize) -> Result<usize, BFSErr> {
+ self.check_range(begin, end)?;
+
+ let block_head = begin >> 5;
+ let block_tail = (end - 1) >> 5;
+ let bit_head = begin & 31;
+ let bit_tail = (end - 1) & 31;
+
+ let mut count: usize = 0;
+
+ if block_head == block_tail {
+ let mask = BitVector::bit_mask(bit_head, bit_tail)?;
+ count += (self.bits[block_head] & mask).count_ones() as usize;
+ } else {
+ let mask_head = BitVector::bit_mask(bit_head, 31)?;
+ count += (self.bits[block_head] & mask_head).count_ones() as usize;
+
+ for i in block_head + 1..block_tail {
+ count += self.bits[i].count_ones() as usize;
+ }
+
+ let mask_tail = BitVector::bit_mask(0, bit_tail)?;
+ count += (self.bits[block_tail] & mask_tail).count_ones() as usize;
+ }
+
+ return Ok(count);
+ }
+
+ pub fn find_hole(&self, mut begin: usize, end: usize, width: usize) -> Result<usize, BFSErr> {
+ self.check_range(begin, end)?;
+ dcheck!(width <= end - begin, BFSErr::NotFound);
+
+ let mut skip = false;
+ let mut block_head = begin >> 5;
+ let block_tail = (end - 1) >> 5;
+
+ while (block_head <= block_tail) && (self.bits[block_head] == 0xFFFFFFFF) {
+ skip = true;
+ block_head = block_head + 1;
+ }
+
+ if block_head > block_tail {
+ return Err(BFSErr::NotFound);
+ }
+
+ if skip {
+ begin = block_head << 5;
+ }
+
+ for mut i in begin..=(end - width) {
+ skip = false;
+ for j in (i..i + width).rev() {
+ if self.get_bit(j)? == 1 {
+ i = j + 1;
+ skip = true;
+ break;
+ }
+ }
+ if !skip {
+ return Ok(i);
+ }
+ }
+
+ return Err(BFSErr::NotFound);
+ }
+
+ pub fn find_span(&self, mut begin: usize, end: usize, width: usize) -> Result<usize, BFSErr> {
+ self.check_range(begin, end)?;
+ dcheck!(width <= end - begin, BFSErr::NotFound);
+
+ let mut skip = false;
+ let mut block_head = begin >> 5;
+ let block_tail = (end - 1) >> 5;
+
+ while (block_head <= block_tail) && (self.bits[block_head] == 0x00000000) {
+ skip = true;
+ block_head = block_head + 1;
+ }
+
+ if block_head > block_tail {
+ return Err(BFSErr::NotFound);
+ }
+
+ if skip {
+ begin = block_head << 5;
+ }
+
+ for mut i in begin..=(end - width) {
+ skip = false;
+ for j in (i..i + width).rev() {
+ if self.get_bit(j)? == 0 {
+ i = j + 1;
+ skip = true;
+ break;
+ }
+ }
+ if !skip {
+ return Ok(i);
+ }
+ }
+
+ return Err(BFSErr::NotFound);
+ }
+}
+
+/// For all vector sizes up to <N> and all possible hole sizes & positions in
+/// that range, create a vector consisting of set bits outside the hole and
+/// cleared bits inside the hole.
+///
+/// Verify that findHole() always finds holes equal to or smaller than the one
+/// punched and fails to find holes larger than the one punched.
+///
+/// (This test is a bit slow, so we limit maximum bit vector size to 96 and run
+/// tests in this crate in optimized mode)
+
+#[test]
+fn test_find_hole() {
+ for size in 1..=96 as usize {
+ let block_count = (size + 31) / 32;
+ let mut bits: Vec<u32> = vec![0xFFFFFFFF as u32; block_count];
+ let mut bit_vec = BitVector::new(bits.as_mut());
+ for width in 1..=size - 1 {
+ for begin in 0..=(size - width) {
+ // Punch a hole in the bit vector.
+ let end = begin + width;
+ assert_ok!(bit_vec.clear_range(begin, end));
+
+ // We should be able to find the hole if we look for it.
+ assert_ok!(bit_vec.find_hole(0, size, width));
+
+ // We should be able to find a hole smaller than the one we punched.
+ assert_ok!(bit_vec.find_hole(0, size, width - 1));
+
+ // If we look for a hole larger than the one we punched, we should
+ // find nothing.
+ assert_err!(bit_vec.find_hole(0, size, width + 1));
+
+ // Fill the hole back up.
+ bit_vec.set_range(begin, end).unwrap();
+
+ // We should no longer be able to find it.
+ assert_err!(bit_vec.find_hole(0, size, width));
+ }
+ }
+ }
+}
+
+/// Same as above, but set bits (spans) instead of holes.
+#[test]
+fn test_find_span() {
+ for size in 1..=96 {
+ let block_count = (size + 31) / 32;
+ let mut bits: Vec<u32> = vec![0x00000000 as u32; block_count];
+ let mut bit_vec = BitVector::new(bits.as_mut());
+ for width in 1..=size - 1 {
+ for begin in 0..=(size - width) {
+ // Create a span in the bit vector.
+ let end = begin + width;
+ assert_ok!(bit_vec.set_range(begin, end));
+
+ // We should be able to find the hole if we look for it.
+ assert_ok!(bit_vec.find_span(0, size, width));
+
+ // We should be able to find a hole smaller than the one we punched.
+ assert_ok!(bit_vec.find_span(0, size, width - 1));
+
+ // If we look for a hole larger than the one we punched, we should
+ // find nothing.
+ assert_err!(bit_vec.find_span(0, size, width + 1));
+
+ // Erase the span
+ bit_vec.clear_range(begin, end).unwrap();
+
+ // We should no longer be able to find it.
+ assert_err!(bit_vec.find_span(0, size, width));
+ }
+ }
+ }
+}
diff --git a/blob_fs/src/blob_device.rs b/blob_fs/src/blob_device.rs
new file mode 100644
index 0000000..0a6c48f
--- /dev/null
+++ b/blob_fs/src/blob_device.rs
@@ -0,0 +1,206 @@
+/// A wrapper around BlockDevice that adds helper methods for navigating a
+/// BlobFS filesystem.
+use crate::bit_vector::*;
+use crate::block_device::*;
+use crate::errors::*;
+use crate::structs::*;
+use crate::utils::*;
+
+use core::mem;
+
+pub struct BlobDevice<'a> {
+ pub bd: &'a mut dyn BlockDevice,
+}
+
+impl<'a> BlobDevice<'a> {
+ const MAGIC_0: u64 = 0xac2153479e694d21;
+ const MAGIC_1: u64 = 0x985000d4d4d3d314;
+
+ pub fn new(bd: &'a mut dyn BlockDevice) -> Self {
+ return BlobDevice { bd: bd };
+ }
+
+ pub fn sanity_check(&self) -> Result<(), BFSErr> {
+ return Ok(());
+ }
+
+ /// Base address fetchers
+
+ pub fn superblock_base(&self) -> usize {
+ return self.bd.geom().block_size * 0;
+ }
+
+ pub fn blockmap_base(&self) -> usize {
+ return self.bd.geom().block_size * 1;
+ }
+
+ pub fn nodemap_base(&self) -> usize {
+ return self.bd.geom().block_size * 2;
+ }
+
+ pub fn journal_base(&self) -> usize {
+ return self.bd.geom().block_size * 3;
+ }
+
+ pub fn block_base(&self) -> usize {
+ return self.bd.geom().block_size * 4;
+ }
+
+ /// Format a whole device to support BlobFS.
+
+ pub fn format(&mut self) -> Result<(), BFSErr> {
+ // Erase the whole device.
+ for i in 0..self.bd.geom().block_count {
+ self.bd.erase_block(i)?;
+ }
+
+ // Write the initial superblock.
+ self.format_superblock()?;
+
+ // Write the initial blockmap.
+ self.format_blockmap()?;
+
+ return Ok(());
+ }
+
+ pub fn read_superblock(&self, superblock: &mut Superblock) -> Result<(), BFSErr> {
+ let blob = as_unsafe_blob_mut(superblock);
+ self.bd.read_range(0, blob)?;
+
+ // Sanity check the superblock
+ dcheck!(superblock.magic0 == BlobDevice::MAGIC_0, BFSErr::Corrupt);
+ dcheck!(superblock.magic1 == BlobDevice::MAGIC_1, BFSErr::Corrupt);
+
+ return Ok(());
+ }
+
+ pub fn write_superblock(&mut self, superblock: &Superblock) -> Result<(), BFSErr> {
+ // Sanity check the superblock
+ dcheck!(superblock.magic0 == BlobDevice::MAGIC_0, BFSErr::Corrupt);
+ dcheck!(superblock.magic1 == BlobDevice::MAGIC_1, BFSErr::Corrupt);
+
+ let blob = as_unsafe_blob(superblock);
+ self.bd.write_range(0, blob)?;
+ return Ok(());
+ }
+
+ pub fn format_superblock(&mut self) -> Result<(), BFSErr> {
+ let block_size = self.bd.geom().block_size;
+ let block_count = self.bd.geom().block_count;
+
+ let superblock_count = 1;
+ let bitmap_block_count = 1;
+ let node_block_count = 1;
+ let journal_block_count = 1;
+ let data_block_count = block_count
+ - superblock_count
+ - bitmap_block_count
+ - node_block_count
+ - journal_block_count;
+
+ let inodes_per_block = block_size / mem::size_of::<Inode>();
+
+ // Write the superblock to offset 0
+ let superblock = Superblock {
+ magic0: BlobDevice::MAGIC_0,
+ magic1: BlobDevice::MAGIC_1,
+ version: 0,
+ flags: 0,
+ block_size: block_size as u32,
+ unused: 0,
+ data_block_count: data_block_count as u64,
+ journal_block_count: journal_block_count as u64,
+ inode_count: inodes_per_block as u64,
+ alloc_block_count: 0,
+ alloc_inode_count: 0,
+ };
+ self.write_superblock(&superblock)?;
+ return Ok(());
+ }
+
+ pub fn format_blockmap(&mut self) -> Result<(), BFSErr> {
+ let bits: [u8; 1] = [0xF0];
+ self.bd.write_range(self.blockmap_base(), &bits)?;
+ return Ok(());
+ }
+
+ pub fn read_blockmap(&self, bitmap: &mut BitVector) -> Result<(), BFSErr> {
+ self.bd.read_range(self.blockmap_base(), slice_as_unsafe_blob_mut(bitmap.bits))?;
+ return Ok(());
+ }
+
+ pub fn write_blockmap(&mut self, bitmap: &mut BitVector) -> Result<(), BFSErr> {
+ self.bd.write_range(self.blockmap_base(), slice_as_unsafe_blob(bitmap.bits))?;
+ return Ok(());
+ }
+
+ pub fn read_node_header(&self, index: usize, node: &mut NodeHeader) -> Result<(), BFSErr> {
+ let base = self.nodemap_base();
+ let offset = index * mem::size_of::<Inode>() * index;
+ let blob = as_unsafe_blob_mut(node);
+ self.bd.read_range(base + offset, blob)?;
+ return Ok(());
+ }
+
+ pub fn read_inode(&self, index: usize, inode: &mut Inode) -> Result<(), BFSErr> {
+ let base = self.nodemap_base();
+ let offset = index * mem::size_of::<Inode>();
+ let blob = as_unsafe_blob_mut(inode);
+ self.bd.read_range(base + offset, blob)?;
+ return Ok(());
+ }
+
+ pub fn write_inode(&mut self, index: usize, inode: &Inode) -> Result<(), BFSErr> {
+ dcheck!((inode.header.flags & NodeHeader::FLAG_INODE) != 0, BFSErr::InvalidArg);
+ let base = self.nodemap_base();
+ let offset = index * mem::size_of::<Inode>();
+ let blob = as_unsafe_blob(inode);
+ self.bd.write_range(base + offset, blob)?;
+ return Ok(());
+ }
+
+ /// Invalidate an inode by zeroing out its header. The inode cannot be reused
+ /// until the block containing it is erased.
+ pub fn invalidate_inode(&mut self, index: usize) -> Result<(), BFSErr> {
+ let base = self.nodemap_base();
+ let offset = index * mem::size_of::<Inode>();
+ let blob = [0; mem::size_of::<NodeHeader>()];
+ self.bd.overwrite_range(base + offset, &blob)?;
+ return Ok(());
+ }
+
+ pub fn blob_size_in_blocks(&self, blob: &[u8]) -> u16 {
+ let block_size = self.bd.geom().block_size;
+ let result = (blob.len() + block_size + 1) / block_size;
+ assert!(result < u16::MAX as usize);
+ return result as u16;
+ }
+
+ pub fn read_blob(&self, extent: Extent, blob_out: &mut [u8]) -> Result<(), BFSErr> {
+ let block_size = self.bd.geom().block_size;
+ let mut cursor = extent.offset();
+ for chunk in blob_out.chunks_mut(block_size) {
+ self.bd.read_block(cursor, chunk)?;
+ cursor = cursor + 1;
+ }
+ return Ok(());
+ }
+
+ pub fn write_blob(&mut self, extent: Extent, blob_in: &[u8]) -> Result<(), BFSErr> {
+ let block_size = self.bd.geom().block_size;
+ let mut cursor = extent.offset();
+ for chunk in blob_in.chunks(block_size) {
+ self.bd.write_block(cursor, &chunk)?;
+ cursor = cursor + 1;
+ }
+ return Ok(());
+ }
+
+ pub fn delete_blob(&mut self, extent: Extent) -> Result<(), BFSErr> {
+ let offset = extent.offset();
+ for i in 0..extent.size as usize {
+ self.bd.erase_block(offset + i)?;
+ }
+ return Ok(());
+ }
+}
diff --git a/blob_fs/src/blob_fs.rs b/blob_fs/src/blob_fs.rs
new file mode 100644
index 0000000..72e72d5
--- /dev/null
+++ b/blob_fs/src/blob_fs.rs
@@ -0,0 +1,234 @@
+use crate::bit_vector::*;
+use crate::blob_device::*;
+use crate::block_device::*;
+use crate::errors::*;
+use crate::structs::*;
+use crate::utils::*;
+
+pub struct BlobFS<'a> {
+ pub bd: BlobDevice<'a>,
+ pub superblock: Superblock,
+ pub blockmap: BitVector<'a>,
+}
+
+// Public impl
+
+impl<'a> BlobFS<'a> {
+ pub fn new(bd: &'a mut dyn BlockDevice, blockmap_bits: &'a mut [u32]) -> Self {
+ let result = BlobFS {
+ bd: BlobDevice::new(bd),
+ superblock: Superblock::default(),
+ blockmap: BitVector::new(blockmap_bits),
+ };
+
+ return result;
+ }
+
+ pub fn format(&mut self) -> Result<(), BFSErr> {
+ // Format the device.
+ self.bd.format()?;
+
+ return Ok(());
+ }
+
+ pub fn mount(&mut self) -> Result<(), BFSErr> {
+ // Read the superblock from the device.
+ self.bd.read_superblock(&mut self.superblock)?;
+
+ // Read the allocation bitmap from the device
+ self.bd.read_blockmap(&mut self.blockmap)?;
+
+ return Ok(());
+ }
+
+ pub fn sanity_check(&self) -> Result<(), BFSErr> {
+ return Ok(());
+ }
+
+ pub fn get_blob_size(&self, hash: u64) -> Result<usize, BFSErr> {
+ let result = self.find_inode(hash)?;
+ let inode = result.1;
+ return Ok(inode.blob_size as usize);
+ }
+
+ pub fn get_blob(&self, hash: u64, blob_out: &mut [u8]) -> Result<(), BFSErr> {
+ let result = self.find_inode(hash)?;
+ let inode = result.1;
+ dcheck!(inode.blob_size as usize <= blob_out.len(), BFSErr::OutOfBounds);
+
+ let dst = &mut blob_out[..inode.blob_size as usize];
+ self.bd.read_blob(inode.inline_extent, dst)?;
+
+ return Ok(());
+ }
+
+ pub fn put_blob(&mut self, hash: u64, blob_in: &[u8]) -> Result<(), BFSErr> {
+ if self.find_inode(hash).is_ok() {
+ return Err(BFSErr::Duplicate);
+ }
+
+ let blob_block_count = self.bd.blob_size_in_blocks(blob_in);
+
+ // Find a place to put the blob.
+ let extent = self.find_extent(blob_block_count)?;
+
+ // Copy the blob to the block device.
+ self.bd.write_blob(extent, blob_in)?;
+
+ // Set the corresponding bits in our local bitmap
+ self.blockmap.clear_range(extent.offset(), extent.offset() + extent.size as usize)?;
+ // FIXME flush bitmap to disk?
+
+ // Create the inode for the new blob
+ let inode = Inode {
+ header: NodeHeader {
+ flags: NodeHeader::FLAG_INODE,
+ version: 0x0,
+ next_node: 0xFFFFFFFF, // FIXME
+ },
+ hash0: hash, // FIXME just using 64-bit hash
+ hash1: hash,
+ hash2: hash,
+ hash3: hash,
+ blob_size: blob_in.len() as u64,
+ block_count: blob_block_count as u32,
+ extent_count: 1,
+ padding: 0xFFFF,
+ inline_extent: extent,
+ };
+
+ // Put the inode in the inode table
+ let inode_idx = self.find_free_inode()?;
+ self.bd.write_inode(inode_idx, &inode)?;
+
+ return Ok(());
+ }
+
+ pub fn delete_blob(&mut self, hash: u64) -> Result<(), BFSErr> {
+ let result = self.find_inode(hash)?;
+ let inode_idx = result.0;
+ let extent = result.1.inline_extent;
+
+ self.bd.invalidate_inode(inode_idx)?;
+ self.blockmap.set_range(extent.offset(), extent.offset() + extent.size as usize)?;
+ self.bd.delete_blob(extent)?;
+ self.bd.delete_blob(extent)?;
+ return Ok(());
+ }
+}
+
+// Private impl
+
+impl<'a> BlobFS<'a> {
+ fn find_inode(&self, hash: u64) -> Result<(usize, Inode), BFSErr> {
+ for i in 0..self.superblock.inode_count as usize {
+ let mut inode = Inode::default();
+ self.bd.read_inode(i, &mut inode)?;
+ if inode.header.flags == NodeHeader::FLAG_INODE && inode.hash0 == hash {
+ return Ok((i, inode));
+ }
+ }
+ return Err(BFSErr::NotFound);
+ }
+
+ // FIXME quick and dirty scan entire inode table for empty slot
+
+ fn find_free_inode(&self) -> Result<usize, BFSErr> {
+ for i in 0..self.superblock.inode_count as usize {
+ let mut header = NodeHeader::default();
+ self.bd.read_node_header(i, &mut header)?;
+ if header.flags == u16::MAX {
+ return Ok(i);
+ }
+ }
+ assert!(false);
+ return Err(BFSErr::NotFound);
+ }
+
+ #[allow(dead_code)]
+ fn count_inodes(&self) -> Result<usize, BFSErr> {
+ let mut count = 0;
+ for i in 0..self.superblock.inode_count as usize {
+ let mut header = NodeHeader::default();
+ self.bd.read_node_header(i, &mut header)?;
+ if header.flags == NodeHeader::FLAG_INODE {
+ count = count + 1;
+ }
+ }
+ return Ok(count);
+ }
+
+ fn find_extent(&self, blob_block_count: u16) -> Result<Extent, BFSErr> {
+ let block_count = self.bd.bd.geom().block_count;
+ let offset = self.blockmap.find_span(0, block_count, blob_block_count as usize)?;
+
+ return Ok(Extent {
+ size: blob_block_count as u16,
+ //offset_hi: (offset >> 32) as u16,
+ offset_hi: 0 as u16,
+ offset_lo: offset as u32,
+ });
+ }
+}
+
+// Unit tests
+
+#[test]
+fn test_basic() {
+ use crate::test_device::*;
+ const BLOCK_SIZE: usize = 8192;
+ const BLOCK_COUNT: usize = 32;
+
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let mut blockmap_bits: [u32; BLOCK_COUNT / 32] = [0xFFFFFFFF; BLOCK_COUNT / 32];
+
+ let mut fs = BlobFS::new(bd, blockmap_bits.as_mut());
+
+ assert_ok!(fs.format());
+ assert_ok!(fs.mount());
+ assert_ok!(fs.sanity_check());
+
+ // Block map should start with 4 reserved blocks for metadata
+ assert_eq!(fs.blockmap.count_range(0, BLOCK_COUNT).unwrap(), BLOCK_COUNT - 4);
+
+ // Store a small blob in the filesystem
+ let blob_hash: u64 = 0xDEADBEEFF00DCAFE;
+ let blob_text = "This is the contents of a blob";
+ let blob_contents = blob_text.as_bytes();
+ assert_ok!(fs.put_blob(blob_hash, blob_contents));
+
+ // Block map should have lost one free block
+ assert_eq!(fs.blockmap.count_range(0, BLOCK_COUNT).unwrap(), BLOCK_COUNT - 5);
+
+ // Storing it a second time should fail.
+ assert_err!(fs.put_blob(blob_hash, blob_contents));
+
+ // Read it back out
+ let blob_len = fs.get_blob_size(blob_hash).unwrap();
+ let mut blob_contents: Vec<u8> = vec![0; blob_len];
+ assert_ok!(fs.get_blob(blob_hash, &mut blob_contents));
+
+ // Contents should match.
+ let new_blob_text = core::str::from_utf8(&blob_contents).unwrap();
+ assert_eq!(blob_text, new_blob_text);
+
+ // Delete it and lookups should fail
+ assert_ok!(fs.delete_blob(blob_hash));
+ assert_err!(fs.get_blob_size(blob_hash));
+
+ // Deleting it a second time should also fail.
+ assert_err!(fs.delete_blob(blob_hash));
+
+ // Block map should have gained one free block
+ assert_eq!(fs.blockmap.count_range(0, BLOCK_COUNT).unwrap(), BLOCK_COUNT - 4);
+
+ // Reading a non-existent blob should cause an error
+ let bad_hash: u64 = 0xAAAAAAAAAAAAAAAA;
+ assert_err!(fs.get_blob_size(bad_hash));
+ let mut blob_contents: [u8; 256] = [0; 256];
+ assert_err!(fs.get_blob(bad_hash, &mut blob_contents));
+}
diff --git a/blob_fs/src/block_device.rs b/blob_fs/src/block_device.rs
new file mode 100644
index 0000000..71c76f2
--- /dev/null
+++ b/blob_fs/src/block_device.rs
@@ -0,0 +1,18 @@
+use crate::errors::*;
+
+#[derive(Debug, Copy, Clone)]
+pub struct BlockDeviceGeometry {
+ pub block_size: usize,
+ pub block_count: usize,
+}
+
+pub trait BlockDevice {
+ fn geom(&self) -> BlockDeviceGeometry;
+ fn read_block(&self, block: usize, block: &mut [u8]) -> Result<(), BFSErr>;
+ fn write_block(&mut self, block: usize, block: &[u8]) -> Result<(), BFSErr>;
+ fn erase_block(&mut self, block: usize) -> Result<(), BFSErr>;
+
+ fn read_range(&self, addr: usize, data: &mut [u8]) -> Result<(), BFSErr>;
+ fn write_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr>;
+ fn overwrite_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr>;
+}
diff --git a/blob_fs/src/errors.rs b/blob_fs/src/errors.rs
new file mode 100644
index 0000000..cf92156
--- /dev/null
+++ b/blob_fs/src/errors.rs
@@ -0,0 +1,14 @@
+#[derive(Debug, PartialEq)]
+pub enum BFSErr {
+ DeviceErr, // The underlying block device had an error.
+ NotFound, // The resource was not found.
+ CleanRead, // Tried to read a byte that had never been written.
+ DirtyWrite, // Tried to write a byte that had already been written.
+ OutOfBounds, // Tried to read off the end of the device.
+ BadOverwrite, // Tried to change a bit from 0->1 without an erase.
+ BadErase, // Tried to erase a clean block
+ Corrupt, // Something was corrupt...
+ Full, // No free blocks left.
+ Duplicate, // Tried to add a blob that already existed.
+ InvalidArg, // Bad argument passed to API
+}
diff --git a/blob_fs/src/lib.rs b/blob_fs/src/lib.rs
new file mode 100644
index 0000000..ce93b78
--- /dev/null
+++ b/blob_fs/src/lib.rs
@@ -0,0 +1,14 @@
+#![cfg_attr(not(test), no_std)]
+#![macro_use]
+
+pub mod bit_vector;
+pub mod blob_device;
+pub mod blob_fs;
+pub mod block_device;
+pub mod errors;
+pub mod memmap_device;
+pub mod structs;
+pub mod utils;
+
+#[cfg(test)]
+pub mod test_device;
diff --git a/blob_fs/src/memmap_device.rs b/blob_fs/src/memmap_device.rs
new file mode 100644
index 0000000..3e570d5
--- /dev/null
+++ b/blob_fs/src/memmap_device.rs
@@ -0,0 +1,276 @@
+use crate::block_device::*;
+use crate::errors::*;
+use crate::utils::*;
+
+pub struct MemmapDevice {
+ pub geom: BlockDeviceGeometry,
+ pub flash_base: *mut u8,
+}
+
+impl MemmapDevice {
+ pub fn new(geom: BlockDeviceGeometry, flash_base: *mut u8) -> Self {
+ let result = MemmapDevice { geom: geom, flash_base: flash_base };
+ return result;
+ }
+}
+
+impl MemmapDevice {
+ pub fn check_is_block_sized(&self, data: &[u8]) -> Result<(), BFSErr> {
+ dcheck!(data.len() == self.geom.block_size, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_block_index(&self, iblock: usize) -> Result<(), BFSErr> {
+ dcheck!(iblock < self.geom.block_count, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_fits_in_block(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ let block_a = addr / self.geom.block_size;
+ let block_b = (addr + data.len() - 1) / self.geom.block_size;
+ dcheck!(block_a == block_b, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_read_block(&self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ self.check_fits_in_block(iblock * self.geom.block_size, block)?;
+ return Ok(());
+ }
+
+ pub fn check_write_block(&self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ self.check_fits_in_block(iblock * self.geom.block_size, block)?;
+ return Ok(());
+ }
+
+ pub fn check_erase_block(&self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ return Ok(());
+ }
+
+ pub fn check_erase_dirty_block(&self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_erase_block(iblock)?;
+ return Ok(());
+ }
+
+ pub fn check_range(&self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ let bs = self.geom.block_size;
+ let bc = self.geom.block_count;
+ dcheck!(addr + size < (bc * bs), BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_read_range(&self, addr: usize, data: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ return Ok(());
+ }
+
+ pub fn check_write_range(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ return Ok(());
+ }
+
+ /// Check that this overwrite only changes bits from 1->0
+ pub fn check_overwrite_range(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ for i in 0..data.len() {
+ let src = data[i];
+ unsafe {
+ let dst = self.flash_base.add(addr + i).read();
+ dcheck!((src & !dst) == 0, BFSErr::BadOverwrite);
+ }
+ }
+ return Ok(());
+ }
+}
+
+impl BlockDevice for MemmapDevice {
+ fn geom(&self) -> BlockDeviceGeometry {
+ self.geom
+ }
+
+ /// Read a chunk of flash contained in a single block.
+ fn read_block(&self, iblock: usize, block: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_read_block(iblock, block)?;
+
+ let bs = self.geom.block_size;
+ unsafe {
+ self.flash_base.add(iblock * bs).copy_to(block.as_mut_ptr(), block.len());
+ }
+
+ return Ok(());
+ }
+
+ /// Write a a chunk of flash contained in a single block.
+ fn write_block(&mut self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_write_block(iblock, block)?;
+
+ let bs = self.geom.block_size;
+ unsafe {
+ self.flash_base.add(iblock * bs).copy_from(block.as_ptr(), bs);
+ }
+
+ return Ok(());
+ }
+
+ /// Erase an entire block of flash.
+ fn erase_block(&mut self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_erase_block(iblock)?;
+
+ let bs = self.geom.block_size;
+ unsafe {
+ self.flash_base.add(iblock * bs).write_bytes(0xFF, bs);
+ }
+
+ return Ok(());
+ }
+
+ /// Read a range of bytes in flash, checking first that those bytes have
+ /// been written since they were erased.
+ fn read_range(&self, addr: usize, data: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_read_range(addr, data)?;
+
+ unsafe {
+ self.flash_base.add(addr).copy_to(data.as_mut_ptr(), data.len());
+ }
+
+ return Ok(());
+ }
+
+ /// Write to a range of bytes in flash, checking first that those bytes have
+ /// not been written since they were erased.
+ fn write_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_write_range(addr, data)?;
+
+ unsafe {
+ self.flash_base.add(addr).copy_from(data.as_ptr(), data.len());
+ }
+
+ return Ok(());
+ }
+
+ /// Write to a range of bytes in flash, but _do_ allow writing over dirty
+ /// bytes - the result in flash will be the logical AND of the old and new
+ /// data.
+ fn overwrite_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_overwrite_range(addr, data)?;
+
+ unsafe {
+ for i in 0..data.len() {
+ let ptr = self.flash_base.add(addr + i);
+ ptr.write(ptr.read() & data[i]);
+ }
+ }
+
+ return Ok(());
+ }
+}
+
+/// Overwriting existing data should write the logical AND of the old and new
+/// data.
+
+#[test]
+fn test_overwrite() {
+ let geom = BlockDeviceGeometry { block_size: 16, block_count: 4 };
+ let mut buf: Vec<u8> = vec![0; geom.block_count * geom.block_size];
+ let bd: &mut dyn BlockDevice = &mut MemmapDevice::new(geom, buf.as_mut_ptr());
+
+ let write1: u32 = 0xFF0FF0FF;
+ let write2: u32 = 0x0F00F000;
+
+ assert_ok!(bd.write_range(0, &write1.to_le_bytes()));
+ assert_ok!(bd.overwrite_range(0, &write2.to_le_bytes()));
+
+ let mut result: u32 = 0;
+ assert_ok!(bd.read_range(0, as_unsafe_blob_mut(&mut result)));
+ assert_eq!(result, 0xFF0FF0FF & 0x0FF0FF00);
+}
+
+/// Trying to set bits that have already been cleared by a previous write should
+/// cause a panic in the test device.
+
+#[test]
+#[should_panic]
+fn test_bad_overwrite() {
+ let geom = BlockDeviceGeometry { block_size: 16, block_count: 4 };
+ let mut buf: Vec<u8> = vec![0; geom.block_count * geom.block_size];
+ let bd: &mut dyn BlockDevice = &mut MemmapDevice::new(geom, buf.as_mut_ptr());
+
+ let all_1: u32 = 0xFFFFFFFF;
+ let all_0: u32 = 0x00000000;
+
+ assert_ok!(bd.write_range(0, &all_0.to_le_bytes()));
+ assert_err!(bd.overwrite_range(0, &all_1.to_le_bytes()));
+}
+
+/// Trying to read blocks that have never been written should cause a panic in
+/// the test device.
+
+#[test]
+#[should_panic]
+fn test_read_unwritten_block() {
+ let geom = BlockDeviceGeometry { block_size: 16, block_count: 4 };
+ let mut buf: Vec<u8> = vec![0; geom.block_count * geom.block_size];
+ let bd: &mut dyn BlockDevice = &mut MemmapDevice::new(geom, buf.as_mut_ptr());
+
+ let mut dst_block = vec![0; geom.block_size];
+ assert_err!(bd.read_block(0, &mut dst_block));
+}
+
+/// Trying to read blocks that have been written and then erased should cause a
+/// panic in the test device.
+
+#[test]
+#[should_panic]
+fn test_read_erased_block() {
+ let geom = BlockDeviceGeometry { block_size: 16, block_count: 4 };
+ let mut buf: Vec<u8> = vec![0; geom.block_count * geom.block_size];
+ let bd: &mut dyn BlockDevice = &mut MemmapDevice::new(geom, buf.as_mut_ptr());
+
+ let mut src_block = vec![0; geom.block_size];
+ let mut dst_block = vec![0; geom.block_size];
+
+ for i in 0..geom.block_size {
+ src_block[i] = i as u8;
+ }
+
+ assert_ok!(bd.write_block(0, &mut src_block));
+ assert_ok!(bd.erase_block(0));
+ assert_err!(bd.read_block(0, &mut dst_block));
+}
+
+/// All blocks in a device should be readable and writable.
+
+#[test]
+fn test_read_write() {
+ let geom = BlockDeviceGeometry { block_size: 16, block_count: 4 };
+ let mut buf: Vec<u8> = vec![0; geom.block_count * geom.block_size];
+ let bd: &mut dyn BlockDevice = &mut MemmapDevice::new(geom, buf.as_mut_ptr());
+
+ let mut src_block = vec![0; geom.block_size];
+ let mut dst_block = vec![0; geom.block_size];
+
+ for i in 0..geom.block_size {
+ src_block[i] = i as u8;
+ }
+
+ // Writing clean blocks should succeed.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.write_block(i, &src_block));
+ }
+
+ // Reading those blocks back should succeed, and the contents should match
+ // the original block.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.read_block(i, &mut dst_block));
+ for j in 0..geom.block_size {
+ assert_eq!(src_block[j], dst_block[j]);
+ }
+ }
+
+ // Erasing blocks should succeed.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.erase_block(i));
+ }
+}
diff --git a/blob_fs/src/structs.rs b/blob_fs/src/structs.rs
new file mode 100644
index 0000000..6bcd1f7
--- /dev/null
+++ b/blob_fs/src/structs.rs
@@ -0,0 +1,86 @@
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone, Default)]
+pub struct Superblock {
+ pub magic0: u64,
+ pub magic1: u64,
+ pub version: u32,
+ pub flags: u32,
+ pub block_size: u32,
+ pub unused: u32,
+ pub data_block_count: u64,
+ pub journal_block_count: u64,
+ pub inode_count: u64,
+ pub alloc_block_count: u64,
+ pub alloc_inode_count: u64,
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone, Default)]
+pub struct Extent {
+ pub size: u16,
+ pub offset_hi: u16,
+ pub offset_lo: u32,
+}
+
+impl Extent {
+ pub fn offset(&self) -> usize {
+ //let hi = self.offset_hi as usize;
+ //let lo = self.offset_lo as usize;
+ //return (hi << 32) | lo;
+ return self.offset_lo as usize;
+ }
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone, Default)]
+pub struct NodeHeader {
+ pub flags: u16,
+ pub version: u16,
+ pub next_node: u32,
+}
+
+impl NodeHeader {
+ pub const FLAG_INODE: u16 = 0b0000000000000001;
+ pub const FLAG_EXTENT: u16 = 0b0000000000000010;
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone, Default)]
+pub struct Inode {
+ pub header: NodeHeader,
+ pub hash0: u64,
+ pub hash1: u64,
+ pub hash2: u64,
+ pub hash3: u64,
+ pub blob_size: u64,
+ pub block_count: u32,
+ pub extent_count: u16,
+ pub padding: u16,
+ pub inline_extent: Extent,
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct ExtentContainer {
+ pub header: NodeHeader,
+ pub previous_node: u32,
+ pub extent_count: u16,
+ pub reserved: u16,
+ pub extents: [Extent; 6],
+}
+
+#[repr(C, packed)]
+#[derive(Debug, Copy, Clone)]
+pub struct Transaction {
+ pub op: u16,
+ pub size: u16,
+ pub addr: u32,
+}
+
+#[test]
+fn size_check() {
+ use core::mem::*;
+ assert_eq!(size_of::<Extent>(), 8);
+ assert_eq!(size_of::<Inode>(), 64);
+ assert_eq!(size_of::<ExtentContainer>(), 64);
+}
diff --git a/blob_fs/src/test_device.rs b/blob_fs/src/test_device.rs
new file mode 100644
index 0000000..69fbf2e
--- /dev/null
+++ b/blob_fs/src/test_device.rs
@@ -0,0 +1,341 @@
+use crate::bit_vector::*;
+use crate::block_device::*;
+use crate::errors::*;
+use crate::utils::*;
+
+pub struct TestDevice<'a> {
+ pub geom: BlockDeviceGeometry,
+ pub flash_base: *mut u8,
+ pub dirty: BitVector<'a>,
+}
+
+impl<'a> TestDevice<'a> {
+ pub fn new(geom: BlockDeviceGeometry, flash_base: *mut u8, dirty_bits: &'a mut [u32]) -> Self {
+ assert_eq!(geom.block_size * geom.block_count, dirty_bits.len() * 32);
+ let result =
+ TestDevice { geom: geom, flash_base: flash_base, dirty: BitVector::new(dirty_bits) };
+ return result;
+ }
+
+ pub fn dirty_count(&self, addr: usize, size: usize) -> Result<usize, BFSErr> {
+ let mut result: usize = 0;
+ for i in addr..addr + size {
+ result += self.dirty.get_bit(i)? as usize;
+ }
+ return Ok(result);
+ }
+
+ pub fn mark_dirty(&mut self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ self.dirty.set_range(addr, addr + size)?;
+ return Ok(());
+ }
+
+ pub fn mark_clean(&mut self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ self.dirty.clear_range(addr, addr + size)?;
+ return Ok(());
+ }
+}
+
+/// Preconditions for TestDevice.
+/// FIXME - I loosened these up because they were annoying, not sure how strict
+/// we should be...
+
+impl<'a> TestDevice<'a> {
+ pub fn check_dirty(&self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ for i in addr..addr + size {
+ dcheck!(self.dirty.get_bit(i)? == 1, BFSErr::CleanRead);
+ }
+ return Ok(());
+ }
+
+ pub fn check_clean(&self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ for i in addr..addr + size {
+ dcheck!(self.dirty.get_bit(i)? == 0, BFSErr::DirtyWrite);
+ }
+ return Ok(());
+ }
+
+ pub fn check_is_block_sized(&self, data: &[u8]) -> Result<(), BFSErr> {
+ dcheck!(data.len() == self.geom.block_size, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_block_index(&self, iblock: usize) -> Result<(), BFSErr> {
+ dcheck!(iblock < self.geom.block_count, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_fits_in_block(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ let block_a = addr / self.geom.block_size;
+ let block_b = (addr + data.len() - 1) / self.geom.block_size;
+ dcheck!(block_a == block_b, BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_read_block(&self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ self.check_fits_in_block(iblock * self.geom.block_size, block)?;
+ //self.check_dirty(iblock * self.geom.block_size, block.len())?;
+ return Ok(());
+ }
+
+ pub fn check_write_block(&self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ self.check_fits_in_block(iblock * self.geom.block_size, block)?;
+ self.check_clean(iblock * self.geom.block_size, block.len())?;
+ return Ok(());
+ }
+
+ pub fn check_erase_block(&self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_block_index(iblock)?;
+ return Ok(());
+ }
+
+ pub fn check_erase_dirty_block(&self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_erase_block(iblock)?;
+ let dirty_count = self.dirty_count(iblock * self.geom.block_size, self.geom.block_size)?;
+ dcheck!(dirty_count > 0, BFSErr::BadErase);
+ return Ok(());
+ }
+
+ pub fn check_range(&self, addr: usize, size: usize) -> Result<(), BFSErr> {
+ let bs = self.geom.block_size;
+ let bc = self.geom.block_count;
+ dcheck!(addr + size < (bc * bs), BFSErr::OutOfBounds);
+ return Ok(());
+ }
+
+ pub fn check_read_range(&self, addr: usize, data: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ //self.check_dirty(addr, data.len())?;
+ return Ok(());
+ }
+
+ pub fn check_write_range(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ self.check_clean(addr, data.len())?;
+ return Ok(());
+ }
+
+ /// Check that this overwrite only changes bits from 1->0
+ pub fn check_overwrite_range(&self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_range(addr, data.len())?;
+ for i in 0..data.len() {
+ let src = data[i];
+ unsafe {
+ let dst = self.flash_base.add(addr + i).read();
+ dcheck!((src & !dst) == 0, BFSErr::BadOverwrite);
+ }
+ }
+ return Ok(());
+ }
+}
+
+impl<'a> BlockDevice for TestDevice<'a> {
+ fn geom(&self) -> BlockDeviceGeometry {
+ self.geom
+ }
+
+ /// Read a chunk of flash contained in a single block.
+ fn read_block(&self, iblock: usize, block: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_read_block(iblock, block)?;
+
+ let bs = self.geom.block_size;
+ unsafe {
+ self.flash_base.add(iblock * bs).copy_to(block.as_mut_ptr(), block.len());
+ }
+
+ return Ok(());
+ }
+
+ /// Write a a chunk of flash contained in a single block.
+ fn write_block(&mut self, iblock: usize, block: &[u8]) -> Result<(), BFSErr> {
+ self.check_write_block(iblock, block)?;
+
+ let bs = self.geom.block_size;
+ unsafe {
+ self.flash_base.add(iblock * bs).copy_from(block.as_ptr(), bs);
+ }
+
+ self.mark_dirty(iblock * bs, bs)?;
+ return Ok(());
+ }
+
+ /// Erase an entire block of flash.
+ fn erase_block(&mut self, iblock: usize) -> Result<(), BFSErr> {
+ self.check_erase_block(iblock)?;
+
+ let bs = self.geom.block_size;
+ self.mark_clean(iblock * bs, bs)?;
+ unsafe {
+ self.flash_base.add(iblock * bs).write_bytes(0xFF, bs);
+ }
+
+ return Ok(());
+ }
+
+ /// Read a range of bytes in flash, checking first that those bytes have
+ /// been written since they were erased.
+ fn read_range(&self, addr: usize, data: &mut [u8]) -> Result<(), BFSErr> {
+ self.check_read_range(addr, data)?;
+
+ unsafe {
+ self.flash_base.add(addr).copy_to(data.as_mut_ptr(), data.len());
+ }
+
+ return Ok(());
+ }
+
+ /// Write to a range of bytes in flash, checking first that those bytes have
+ /// not been written since they were erased.
+ fn write_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_write_range(addr, data)?;
+
+ unsafe {
+ self.flash_base.add(addr).copy_from(data.as_ptr(), data.len());
+ }
+
+ self.mark_dirty(addr, data.len())?;
+ return Ok(());
+ }
+
+ /// Write to a range of bytes in flash, but _do_ allow writing over dirty
+ /// bytes - the result in flash will be the logical AND of the old and new
+ /// data.
+ fn overwrite_range(&mut self, addr: usize, data: &[u8]) -> Result<(), BFSErr> {
+ self.check_overwrite_range(addr, data)?;
+
+ unsafe {
+ for i in 0..data.len() {
+ let ptr = self.flash_base.add(addr + i);
+ ptr.write(ptr.read() & data[i]);
+ }
+ }
+
+ self.mark_dirty(addr, data.len())?;
+ return Ok(());
+ }
+}
+
+/// Overwriting existing data should write the logical AND of the old and new
+/// data.
+
+#[test]
+fn test_overwrite() {
+ const BLOCK_SIZE: usize = 16;
+ const BLOCK_COUNT: usize = 4;
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let write1: u32 = 0xFF0FF0FF;
+ let write2: u32 = 0x0F00F000;
+
+ assert_ok!(bd.write_range(0, &write1.to_le_bytes()));
+ assert_ok!(bd.overwrite_range(0, &write2.to_le_bytes()));
+
+ let mut result: u32 = 0;
+ assert_ok!(bd.read_range(0, as_unsafe_blob_mut(&mut result)));
+ assert_eq!(result, 0xFF0FF0FF & 0x0FF0FF00);
+}
+
+/// Trying to set bits that have already been cleared by a previous write should
+/// cause a panic in the test device.
+
+#[test]
+#[should_panic]
+fn test_bad_overwrite() {
+ const BLOCK_SIZE: usize = 16;
+ const BLOCK_COUNT: usize = 4;
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let all_1: u32 = 0xFFFFFFFF;
+ let all_0: u32 = 0x00000000;
+
+ assert_ok!(bd.write_range(0, &all_0.to_le_bytes()));
+ assert_err!(bd.overwrite_range(0, &all_1.to_le_bytes()));
+}
+
+/// Trying to read blocks that have never been written should cause a panic in
+/// the test device.
+
+#[test]
+#[should_panic]
+fn test_read_unwritten_block() {
+ const BLOCK_SIZE: usize = 16;
+ const BLOCK_COUNT: usize = 4;
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let mut dst_block = [0; BLOCK_SIZE];
+ assert_err!(bd.read_block(0, &mut dst_block));
+}
+
+/// Trying to read blocks that have been written and then erased should cause a
+/// panic in the test device.
+
+#[test]
+#[should_panic]
+fn test_read_erased_block() {
+ const BLOCK_SIZE: usize = 16;
+ const BLOCK_COUNT: usize = 4;
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let mut src_block = [0; BLOCK_SIZE];
+ let mut dst_block = [0; BLOCK_SIZE];
+
+ for i in 0..geom.block_size {
+ src_block[i] = i as u8;
+ }
+
+ assert_ok!(bd.write_block(0, &mut src_block));
+ assert_ok!(bd.erase_block(0));
+ assert_err!(bd.read_block(0, &mut dst_block));
+}
+
+/// All blocks in a device should be readable and writable.
+
+#[test]
+fn test_read_write() {
+ const BLOCK_SIZE: usize = 16;
+ const BLOCK_COUNT: usize = 4;
+ let geom = BlockDeviceGeometry { block_size: BLOCK_SIZE, block_count: BLOCK_COUNT };
+ let mut buf: [u8; BLOCK_SIZE * BLOCK_COUNT] = [0; BLOCK_SIZE * BLOCK_COUNT];
+ let mut dirty_bits = [0; BLOCK_SIZE * BLOCK_COUNT / 32];
+ let bd: &mut dyn BlockDevice = &mut TestDevice::new(geom, buf.as_mut_ptr(), &mut dirty_bits);
+
+ let mut src_block = [0; BLOCK_SIZE];
+ let mut dst_block = [0; BLOCK_SIZE];
+
+ for i in 0..geom.block_size {
+ src_block[i] = i as u8;
+ }
+
+ // Writing clean blocks should succeed.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.write_block(i, &src_block));
+ }
+
+ // Reading those blocks back should succeed, and the contents should match
+ // the original block.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.read_block(i, &mut dst_block));
+ for j in 0..geom.block_size {
+ assert_eq!(src_block[j], dst_block[j]);
+ }
+ }
+
+ // Erasing blocks should succeed.
+ for i in 0..geom.block_count {
+ assert_ok!(bd.erase_block(i));
+ }
+}
diff --git a/blob_fs/src/utils.rs b/blob_fs/src/utils.rs
new file mode 100644
index 0000000..e0c9814
--- /dev/null
+++ b/blob_fs/src/utils.rs
@@ -0,0 +1,88 @@
+use core::mem;
+use core::slice;
+
+/// Trigger an assertion if "cond" fails in debug builds, return an error in
+/// release builds.
+macro_rules! dcheck {
+ ($cond:expr, $err:expr) => {
+ let cond = $cond;
+ debug_assert!(cond);
+ if !(cond) {
+ return Err($err);
+ }
+ };
+}
+pub(crate) use dcheck;
+
+/// Assert that a Result is_ok(). Why is this not in the standard library?
+#[cfg(test)]
+macro_rules! assert_ok {
+ ($cond:expr) => {
+ let result = $cond;
+ assert!(result.is_ok());
+ };
+}
+#[cfg(test)]
+pub(crate) use assert_ok;
+
+/// Assert that a Result is_err(). Why is this not in the standard library?
+#[cfg(test)]
+macro_rules! assert_err {
+ ($cond:expr) => {
+ let result = $cond;
+ assert!(result.is_err());
+ };
+}
+#[cfg(test)]
+pub(crate) use assert_err;
+
+// FIXME these are quick and dirty hacks to make serialization of POD types
+// easier
+
+pub fn as_unsafe_blob<T: Sized>(p: &T) -> &[u8] {
+ unsafe {
+ let tp: *const T = p as *const T;
+ let pp: *const u8 = tp as *const u8;
+ slice::from_raw_parts(pp, mem::size_of::<T>())
+ }
+}
+
+pub fn as_unsafe_blob_mut<T: Sized>(p: &mut T) -> &mut [u8] {
+ unsafe {
+ let tp: *mut T = p as *mut T;
+ let pp: *mut u8 = tp as *mut u8;
+ slice::from_raw_parts_mut(pp, mem::size_of::<T>())
+ }
+}
+
+pub fn from_unsafe_blob<T: Sized>(p: &[u8]) -> &T {
+ unsafe {
+ let pp: *const u8 = p.as_ptr();
+ let tp: *const T = pp as *const T;
+ return &*tp;
+ }
+}
+
+pub fn from_unsafe_blob_mut<T: Sized>(p: &mut [u8]) -> &mut T {
+ unsafe {
+ let pp: *mut u8 = p.as_mut_ptr();
+ let tp: *mut T = pp as *mut T;
+ return &mut *tp;
+ }
+}
+
+pub fn slice_as_unsafe_blob_mut<T: Sized>(p: &mut [T]) -> &mut [u8] {
+ unsafe {
+ let tp: *mut T = p.as_mut_ptr();
+ let pp: *mut u8 = tp as *mut u8;
+ slice::from_raw_parts_mut(pp, mem::size_of::<T>())
+ }
+}
+
+pub fn slice_as_unsafe_blob<T: Sized>(p: &[T]) -> &[u8] {
+ unsafe {
+ let tp: *const T = p.as_ptr();
+ let pp: *const u8 = tp as *const u8;
+ slice::from_raw_parts(pp, mem::size_of::<T>())
+ }
+}
diff --git a/board/Cargo.toml b/board/Cargo.toml
index 414bb3c..2d4ec52 100644
--- a/board/Cargo.toml
+++ b/board/Cargo.toml
@@ -10,9 +10,11 @@
rv32i = { path = "../../tock/arch/rv32i" }
capsules = { path = "../../tock/capsules" }
kernel = { path = "../../tock/kernel" }
-matcha = { path = "../chip" }
lowrisc = { path = "../../tock/chips/lowrisc" }
-blob_fs = { path = "../../tock/libraries/blob_fs" }
+
+matcha = { path = "../chip" }
+blob_fs = { path = "../blob_fs" }
+matcha-capsules = { path = "../capsules" }
[features]
# OpenTitan Matcha SoC design can be synthesized or compiled for different targets. A
@@ -22,10 +24,6 @@
#
# OpenTitan Matcha CPU and possibly other components must be configured appropriately
# for a specific target:
-# - fpga_nexysvideo:
-# OpenTitan Matcha SoC design running on Nexys Video Artix-7 FPGA.
-#
# - sim_verilator:
# OpenTitan Matcha SoC design simulated in Verilator.
-fpga_nexysvideo = ["matcha/config_fpga_nexysvideo"]
sim_verilator = ["matcha/config_sim_verilator"]
diff --git a/board/src/main.rs b/board/src/main.rs
index 7a82465..c1dcffb 100644
--- a/board/src/main.rs
+++ b/board/src/main.rs
@@ -8,7 +8,7 @@
#![cfg_attr(not(doc), no_main)]
#![feature(const_in_array_repeat_expressions)]
-use capsules::debug_uart::DebugUart;
+use matcha_capsules::debug_uart::DebugUart;
use capsules::virtual_alarm::{MuxAlarm, VirtualMuxAlarm};
use capsules::virtual_hmac::VirtualMuxHmac;
use kernel::capabilities;
@@ -83,7 +83,7 @@
capsules::alarm::DRIVER_NUM => f(Some(self.alarm)),
capsules::low_level_debug::DRIVER_NUM => f(Some(self.lldb)),
capsules::i2c_master::DRIVER_NUM => f(Some(self.i2c_master)),
- capsules::debug_uart::DRIVER_NUM => f(Some(self.debug_uart)),
+ matcha_capsules::debug_uart::DRIVER_NUM => f(Some(self.debug_uart)),
capsules::storage_manager::DRIVER_NUM => f(Some(self.storage_manager)),
_ => f(None),
}
diff --git a/capsules/Cargo.toml b/capsules/Cargo.toml
new file mode 100644
index 0000000..39159ad
--- /dev/null
+++ b/capsules/Cargo.toml
@@ -0,0 +1,10 @@
+[package]
+name = "matcha-capsules"
+version = "0.1.0"
+edition = "2018"
+
+[dependencies]
+kernel = { path = "../../tock/kernel" }
+enum_primitive = { path = "../../tock/libraries/enum_primitive" }
+blob_fs = { path = "../blob_fs" }
+
diff --git a/capsules/rust-toolchain b/capsules/rust-toolchain
new file mode 100644
index 0000000..b18a3f3
--- /dev/null
+++ b/capsules/rust-toolchain
@@ -0,0 +1 @@
+nightly-2020-06-03
diff --git a/capsules/src/debug_uart.rs b/capsules/src/debug_uart.rs
new file mode 100644
index 0000000..a2b9035
--- /dev/null
+++ b/capsules/src/debug_uart.rs
@@ -0,0 +1,74 @@
+//! Trivial capsule to provide minimal debug printing support for userspace.
+//!
+//! The default TockOS Console class does heavier-weight stuff with UART
+//! multiplexing and asynchronous sending and such, which is usually _not_ what
+//! is wanted in a debug serial port. This capsule implements only one command,
+//! which dumps data from an allow'ed buffer directly to a memory-mapped UART
+//! peripheral.
+//!
+//! Instantiation:
+//! let debug_uart = static_init!(
+//! DebugUart,
+//! DebugUart {
+//! tx_busy: StaticRef::new(TX_BUSY_ADDR as *const ReadOnly<u32>),
+//! tx_port: StaticRef::new(TX_PORT_ADDR as *const WriteOnly<u32>),
+//! app_data_grant: board_kernel.create_grant(&memory_allocation_cap)
+//! }
+//! );
+//!
+//! where TX_BUSY_ADDR is a register whose low bit is 1 if the UART's fifo is
+//! full and TX_PORT_ADDR is the register we write bytes to.
+//!
+//! Usage - send buffer directly to UART:
+//! let driver_num = capsules::debug_uart::DRIVER_NUM;
+//! let allow = syscalls::allow(driver_num, 0, &mut buffer);
+//! let result = syscalls::command(driver_num, 0, buffer.len(), 0);
+//! drop(allow);
+
+//use crate::driver;
+use kernel::common::registers::{ReadOnly, WriteOnly};
+use kernel::common::StaticRef;
+use kernel::{AppId, AppSlice, Callback, Driver, Grant, ReturnCode, Shared};
+
+//pub const DRIVER_NUM: usize = driver::NUM::DebugUart as usize;
+pub const DRIVER_NUM: usize = 0x00009 as usize;
+
+#[derive(Default)]
+pub struct AppData {
+ pub buffer: Option<AppSlice<Shared, u8>>,
+}
+
+pub struct DebugUart {
+ pub tx_busy: StaticRef<ReadOnly<u32>>,
+ pub tx_port: StaticRef<WriteOnly<u32>>,
+ pub app_data_grant: Grant<AppData>,
+}
+
+impl Driver for DebugUart {
+ fn subscribe(&self, _: usize, _: Option<Callback>, _: AppId) -> ReturnCode {
+ ReturnCode::EINVAL
+ }
+
+ fn command(&self, minor_num: usize, r2: usize, _: usize, app_id: AppId) -> ReturnCode {
+ if minor_num != 0 {
+ return ReturnCode::EINVAL;
+ }
+
+ let _ = self.app_data_grant.enter(app_id, |app_data, _| {
+ if let Some(buf) = &app_data.buffer {
+ for i in 0..r2 {
+ while (self.tx_busy.get() & 1) != 0 {}
+ self.tx_port.set(buf.as_ref()[i] as u32);
+ }
+ }
+ });
+ return ReturnCode::SUCCESS;
+ }
+
+ fn allow(&self, app_id: AppId, _: usize, slice: Option<AppSlice<Shared, u8>>) -> ReturnCode {
+ let _ = self.app_data_grant.enter(app_id, |app_data, _| {
+ app_data.buffer = slice;
+ });
+ return ReturnCode::SUCCESS;
+ }
+}
diff --git a/capsules/src/lib.rs b/capsules/src/lib.rs
new file mode 100644
index 0000000..78117c5
--- /dev/null
+++ b/capsules/src/lib.rs
@@ -0,0 +1,5 @@
+#![feature(const_fn)]
+#![forbid(unsafe_code)]
+#![no_std]
+
+pub mod debug_uart;
diff --git a/chip/src/chip_config.rs b/chip/src/chip_config.rs
index 9a26ff9..c4ec321 100644
--- a/chip/src/chip_config.rs
+++ b/chip/src/chip_config.rs
@@ -24,18 +24,6 @@
pub uart_baudrate: u32,
}
-/// Config for running Matcha on an FPGA. Also the default configuration.
-#[cfg(any(
- feature = "config_fpga_nexysvideo",
- not(feature = "config_disable_default")
-))]
-pub const CONFIG: Config = Config {
- name: "fpga_nexysvideo",
- cpu_freq: 10_000_000,
- peripheral_freq: 2_500_000,
- uart_baudrate: 115200,
-};
-
/// Config for running Matcha in a verilog simulator.
#[cfg(feature = "config_sim_verilator")]
pub const CONFIG: Config = Config {