blob: 0a6c48f7ab02fbd9fc63afd1958fedfd45c3468b [file] [log] [blame]
/// A wrapper around BlockDevice that adds helper methods for navigating a
/// BlobFS filesystem.
use crate::bit_vector::*;
use crate::block_device::*;
use crate::errors::*;
use crate::structs::*;
use crate::utils::*;
use core::mem;
pub struct BlobDevice<'a> {
pub bd: &'a mut dyn BlockDevice,
}
impl<'a> BlobDevice<'a> {
const MAGIC_0: u64 = 0xac2153479e694d21;
const MAGIC_1: u64 = 0x985000d4d4d3d314;
pub fn new(bd: &'a mut dyn BlockDevice) -> Self {
return BlobDevice { bd: bd };
}
pub fn sanity_check(&self) -> Result<(), BFSErr> {
return Ok(());
}
/// Base address fetchers
pub fn superblock_base(&self) -> usize {
return self.bd.geom().block_size * 0;
}
pub fn blockmap_base(&self) -> usize {
return self.bd.geom().block_size * 1;
}
pub fn nodemap_base(&self) -> usize {
return self.bd.geom().block_size * 2;
}
pub fn journal_base(&self) -> usize {
return self.bd.geom().block_size * 3;
}
pub fn block_base(&self) -> usize {
return self.bd.geom().block_size * 4;
}
/// Format a whole device to support BlobFS.
pub fn format(&mut self) -> Result<(), BFSErr> {
// Erase the whole device.
for i in 0..self.bd.geom().block_count {
self.bd.erase_block(i)?;
}
// Write the initial superblock.
self.format_superblock()?;
// Write the initial blockmap.
self.format_blockmap()?;
return Ok(());
}
pub fn read_superblock(&self, superblock: &mut Superblock) -> Result<(), BFSErr> {
let blob = as_unsafe_blob_mut(superblock);
self.bd.read_range(0, blob)?;
// Sanity check the superblock
dcheck!(superblock.magic0 == BlobDevice::MAGIC_0, BFSErr::Corrupt);
dcheck!(superblock.magic1 == BlobDevice::MAGIC_1, BFSErr::Corrupt);
return Ok(());
}
pub fn write_superblock(&mut self, superblock: &Superblock) -> Result<(), BFSErr> {
// Sanity check the superblock
dcheck!(superblock.magic0 == BlobDevice::MAGIC_0, BFSErr::Corrupt);
dcheck!(superblock.magic1 == BlobDevice::MAGIC_1, BFSErr::Corrupt);
let blob = as_unsafe_blob(superblock);
self.bd.write_range(0, blob)?;
return Ok(());
}
pub fn format_superblock(&mut self) -> Result<(), BFSErr> {
let block_size = self.bd.geom().block_size;
let block_count = self.bd.geom().block_count;
let superblock_count = 1;
let bitmap_block_count = 1;
let node_block_count = 1;
let journal_block_count = 1;
let data_block_count = block_count
- superblock_count
- bitmap_block_count
- node_block_count
- journal_block_count;
let inodes_per_block = block_size / mem::size_of::<Inode>();
// Write the superblock to offset 0
let superblock = Superblock {
magic0: BlobDevice::MAGIC_0,
magic1: BlobDevice::MAGIC_1,
version: 0,
flags: 0,
block_size: block_size as u32,
unused: 0,
data_block_count: data_block_count as u64,
journal_block_count: journal_block_count as u64,
inode_count: inodes_per_block as u64,
alloc_block_count: 0,
alloc_inode_count: 0,
};
self.write_superblock(&superblock)?;
return Ok(());
}
pub fn format_blockmap(&mut self) -> Result<(), BFSErr> {
let bits: [u8; 1] = [0xF0];
self.bd.write_range(self.blockmap_base(), &bits)?;
return Ok(());
}
pub fn read_blockmap(&self, bitmap: &mut BitVector) -> Result<(), BFSErr> {
self.bd.read_range(self.blockmap_base(), slice_as_unsafe_blob_mut(bitmap.bits))?;
return Ok(());
}
pub fn write_blockmap(&mut self, bitmap: &mut BitVector) -> Result<(), BFSErr> {
self.bd.write_range(self.blockmap_base(), slice_as_unsafe_blob(bitmap.bits))?;
return Ok(());
}
pub fn read_node_header(&self, index: usize, node: &mut NodeHeader) -> Result<(), BFSErr> {
let base = self.nodemap_base();
let offset = index * mem::size_of::<Inode>() * index;
let blob = as_unsafe_blob_mut(node);
self.bd.read_range(base + offset, blob)?;
return Ok(());
}
pub fn read_inode(&self, index: usize, inode: &mut Inode) -> Result<(), BFSErr> {
let base = self.nodemap_base();
let offset = index * mem::size_of::<Inode>();
let blob = as_unsafe_blob_mut(inode);
self.bd.read_range(base + offset, blob)?;
return Ok(());
}
pub fn write_inode(&mut self, index: usize, inode: &Inode) -> Result<(), BFSErr> {
dcheck!((inode.header.flags & NodeHeader::FLAG_INODE) != 0, BFSErr::InvalidArg);
let base = self.nodemap_base();
let offset = index * mem::size_of::<Inode>();
let blob = as_unsafe_blob(inode);
self.bd.write_range(base + offset, blob)?;
return Ok(());
}
/// Invalidate an inode by zeroing out its header. The inode cannot be reused
/// until the block containing it is erased.
pub fn invalidate_inode(&mut self, index: usize) -> Result<(), BFSErr> {
let base = self.nodemap_base();
let offset = index * mem::size_of::<Inode>();
let blob = [0; mem::size_of::<NodeHeader>()];
self.bd.overwrite_range(base + offset, &blob)?;
return Ok(());
}
pub fn blob_size_in_blocks(&self, blob: &[u8]) -> u16 {
let block_size = self.bd.geom().block_size;
let result = (blob.len() + block_size + 1) / block_size;
assert!(result < u16::MAX as usize);
return result as u16;
}
pub fn read_blob(&self, extent: Extent, blob_out: &mut [u8]) -> Result<(), BFSErr> {
let block_size = self.bd.geom().block_size;
let mut cursor = extent.offset();
for chunk in blob_out.chunks_mut(block_size) {
self.bd.read_block(cursor, chunk)?;
cursor = cursor + 1;
}
return Ok(());
}
pub fn write_blob(&mut self, extent: Extent, blob_in: &[u8]) -> Result<(), BFSErr> {
let block_size = self.bd.geom().block_size;
let mut cursor = extent.offset();
for chunk in blob_in.chunks(block_size) {
self.bd.write_block(cursor, &chunk)?;
cursor = cursor + 1;
}
return Ok(());
}
pub fn delete_blob(&mut self, extent: Extent) -> Result<(), BFSErr> {
let offset = extent.offset();
for i in 0..extent.size as usize {
self.bd.erase_block(offset + i)?;
}
return Ok(());
}
}