1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! The library provides APIs for reading/writing with block devices with arbitrary alignment,
16 //! ranges and parsing and manipulation GPT.
17
18 #![cfg_attr(not(test), no_std)]
19 #![allow(async_fn_in_trait)]
20
21 use core::{
22 cell::RefMut,
23 cmp::{max, min},
24 mem::{size_of_val, MaybeUninit},
25 ops::DerefMut,
26 slice::SliceIndex,
27 };
28 use liberror::{Error, Result};
29 use libutils::aligned_subslice;
30 use safemath::SafeNum;
31
32 // Selective export of submodule types.
33 mod gpt;
34 pub use gpt::{
35 gpt_buffer_size, new_gpt_max, new_gpt_n, Gpt, GptBuilder, GptEntry, GptHeader, GptLoadBufferN,
36 GptMax, GptN, GptSyncResult, Partition, PartitionIterator, GPT_GUID_LEN, GPT_MAGIC,
37 GPT_NAME_LEN_U16,
38 };
39
40 mod algorithm;
41 pub use algorithm::{read_async, write_async};
42
43 pub mod ram_block;
44 pub use ram_block::RamBlockIo;
45
46 /// `BlockInfo` contains information for a block device.
47 #[derive(Clone, Copy, Debug)]
48 pub struct BlockInfo {
49 /// Native block size of the block device.
50 pub block_size: u64,
51 /// Total number of blocks of the block device.
52 pub num_blocks: u64,
53 /// The alignment requirement for IO buffers. For example, many block device drivers use DMA
54 /// for data transfer, which typically requires that the buffer address for DMA be aligned to
55 /// 16/32/64 bytes etc. If the block device has no alignment requirement, it can return 1.
56 pub alignment: u64,
57 }
58
59 impl BlockInfo {
60 /// Computes the total size in bytes of the block device.
total_size(&self) -> Result<u64>61 pub fn total_size(&self) -> Result<u64> {
62 Ok((SafeNum::from(self.block_size) * self.num_blocks).try_into()?)
63 }
64 }
65
66 /// `BlockIo` provides interfaces for reading and writing block storage medium.
67 ///
68 /// SAFETY:
69 /// `read_blocks` method must guarantee `out` to be fully initialized on success. Otherwise error
70 /// must be returned.
71 /// This is necessary because unsafe code that uses BlockIo assumes `out` to be fully initialized to
72 /// work with it as with `&mut [u8]`.
73 pub unsafe trait BlockIo {
74 /// Returns the `BlockInfo` for this block device.
info(&mut self) -> BlockInfo75 fn info(&mut self) -> BlockInfo;
76
77 /// Read blocks of data from the block device
78 ///
79 /// # Args
80 ///
81 /// * `blk_offset`: Offset in number of blocks.
82 ///
83 /// * `out`: Buffer to store the read data. Callers of this method ensure that it is
84 /// aligned according to alignment() and `out.len()` is multiples of `block_size()`.
85 ///
86 /// # Returns
87 ///
88 /// Returns true if exactly out.len() number of bytes are read. Otherwise false.
read_blocks( &mut self, blk_offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>89 async fn read_blocks(
90 &mut self,
91 blk_offset: u64,
92 out: &mut (impl SliceMaybeUninit + ?Sized),
93 ) -> Result<()>;
94
95 /// Write blocks of data to the block device
96 ///
97 /// # Args
98 ///
99 /// * `blk_offset`: Offset in number of blocks.
100 ///
101 /// * `data`: Data to write. Callers of this method ensure that it is aligned according to
102 /// `alignment()` and `data.len()` is multiples of `block_size()`.
103 ///
104 /// # Returns
105 ///
106 /// Returns true if exactly data.len() number of bytes are written. Otherwise false.
write_blocks(&mut self, blk_offset: u64, data: &mut [u8]) -> Result<()>107 async fn write_blocks(&mut self, blk_offset: u64, data: &mut [u8]) -> Result<()>;
108 }
109
110 // SAFETY:
111 // `read_blocks` method has same guaranties as `BlockIo` implementation of referenced type T.
112 // Which guaranties `out` to be fully initialized on success.
113 unsafe impl<T: DerefMut> BlockIo for T
114 where
115 T::Target: BlockIo,
116 {
info(&mut self) -> BlockInfo117 fn info(&mut self) -> BlockInfo {
118 self.deref_mut().info()
119 }
120
read_blocks( &mut self, blk_offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>121 async fn read_blocks(
122 &mut self,
123 blk_offset: u64,
124 out: &mut (impl SliceMaybeUninit + ?Sized),
125 ) -> Result<()> {
126 self.deref_mut().read_blocks(blk_offset, out).await
127 }
128
write_blocks(&mut self, blk_offset: u64, data: &mut [u8]) -> Result<()>129 async fn write_blocks(&mut self, blk_offset: u64, data: &mut [u8]) -> Result<()> {
130 self.deref_mut().write_blocks(blk_offset, data).await
131 }
132 }
133
134 /// An implementation of `BlockIo` of where all required methods are `unimplemented!()`
135 pub struct BlockIoNull {}
136
137 // SAFETY:
138 // `read_blocks` never succeeds since it is not implemented and will panic.
139 unsafe impl BlockIo for BlockIoNull {
info(&mut self) -> BlockInfo140 fn info(&mut self) -> BlockInfo {
141 unimplemented!();
142 }
143
read_blocks( &mut self, _: u64, _: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>144 async fn read_blocks(
145 &mut self,
146 _: u64,
147 _: &mut (impl SliceMaybeUninit + ?Sized),
148 ) -> Result<()> {
149 unimplemented!();
150 }
151
write_blocks(&mut self, _: u64, _: &mut [u8]) -> Result<()>152 async fn write_blocks(&mut self, _: u64, _: &mut [u8]) -> Result<()> {
153 unimplemented!();
154 }
155 }
156
157 /// Check if `value` is aligned to (multiples of) `alignment`
158 /// It can fail if the remainider calculation fails overflow check.
is_aligned(value: impl Into<SafeNum>, alignment: impl Into<SafeNum>) -> Result<bool>159 pub fn is_aligned(value: impl Into<SafeNum>, alignment: impl Into<SafeNum>) -> Result<bool> {
160 Ok(u64::try_from(value.into() % alignment.into())? == 0)
161 }
162
163 /// Check if `buffer` address is aligned to `alignment`
164 /// It can fail if the remainider calculation fails overflow check.
is_buffer_aligned<T>(buffer: &[T], alignment: u64) -> Result<bool>165 pub fn is_buffer_aligned<T>(buffer: &[T], alignment: u64) -> Result<bool> {
166 is_aligned(buffer.as_ptr() as usize, alignment)
167 }
168
169 /// Check read/write range and calculate offset in number of blocks.
check_range<T>(info: BlockInfo, offset: u64, buffer: &[T]) -> Result<SafeNum>170 fn check_range<T>(info: BlockInfo, offset: u64, buffer: &[T]) -> Result<SafeNum> {
171 let offset: SafeNum = offset.into();
172 let block_size: SafeNum = info.block_size.into();
173 debug_assert!(is_aligned(offset, block_size)?, "{:?}, {:?}", offset, block_size);
174 debug_assert!(is_aligned(size_of_val(buffer), block_size)?);
175 debug_assert!(is_buffer_aligned(buffer, info.alignment)?);
176 let blk_offset = offset / block_size;
177 let blk_count = SafeNum::from(size_of_val(buffer)) / block_size;
178 let end: u64 = (blk_offset + blk_count).try_into()?;
179 match end <= info.num_blocks {
180 true => Ok(blk_offset),
181 false => Err(Error::BadIndex(end as usize)),
182 }
183 }
184
185 /// Computes the required scratch size for initializing a [AsyncBlockDevice].
scratch_size(io: &mut impl BlockIo) -> Result<usize>186 pub fn scratch_size(io: &mut impl BlockIo) -> Result<usize> {
187 let info = io.info();
188 let block_alignment = match info.block_size {
189 1 => 0,
190 v => v,
191 };
192 Ok(((SafeNum::from(info.alignment) - 1) * 2 + block_alignment).try_into()?)
193 }
194
195 /// `Disk` contains a BlockIO and scratch buffer and provides APIs for reading/writing with
196 /// arbitrary ranges and alignment.
197 pub struct Disk<T, S> {
198 io: T,
199 scratch: S,
200 }
201
202 impl<T: BlockIo, S: DerefMut<Target = [u8]>> Disk<T, S> {
203 /// Creates a new instance with the given IO and scratch buffer.
204 ///
205 /// * The scratch buffer is internally used for handling partial block read/write and unaligned
206 /// input/output user buffers.
207 ///
208 /// * The necessary size for the scratch buffer depends on `BlockInfo::alignment`,
209 /// `BlockInfo::block_size`. It can be computed using the helper API `scratch_size()`. If the
210 /// block device has no alignment requirement, i.e. both alignment and block size are 1, the
211 /// total required scratch size is 0.
new(mut io: T, scratch: S) -> Result<Self>212 pub fn new(mut io: T, scratch: S) -> Result<Self> {
213 let sz = scratch_size(&mut io)?;
214 match scratch.len() < sz {
215 true => Err(Error::BufferTooSmall(Some(sz))),
216 _ => Ok(Self { io, scratch }),
217 }
218 }
219
220 /// Same as `Self::new()` but allocates the necessary scratch buffer.
221 ///
222 /// T must implement Extend<u8> and Default. It should typically be a vector like type.
223 ///
224 /// Allocation is done by extending T one element at a time. In most cases, we don't expect
225 /// block size or alignment to be large values and this is only done once. thus this should be
226 /// low cost. However if that is not the case, it is recommended to use `Self::new()` with
227 /// pre-allocated scratch buffer.
new_alloc_scratch(mut io: T) -> Result<Self> where S: Extend<u8> + Default,228 pub fn new_alloc_scratch(mut io: T) -> Result<Self>
229 where
230 S: Extend<u8> + Default,
231 {
232 let mut scratch = S::default();
233 // Extends the scratch buffer to the required size.
234 // Can call `extend_reserve()` first once it becomes stable.
235 (0..max(scratch.len(), scratch_size(&mut io)?) - scratch.len())
236 .for_each(|_| scratch.extend([0u8]));
237 Self::new(io, scratch)
238 }
239
240 /// Creates a `Disk<&mut T, &mut [u8]>` instance that borrows the internal fields.
as_borrowed(&mut self) -> Disk<&mut T, &mut [u8]>241 pub fn as_borrowed(&mut self) -> Disk<&mut T, &mut [u8]> {
242 Disk::new(&mut self.io, &mut self.scratch[..]).unwrap()
243 }
244
245 /// Gets the [BlockInfo]
block_info(&mut self) -> BlockInfo246 pub fn block_info(&mut self) -> BlockInfo {
247 self.io.info()
248 }
249
250 /// Gets the underlying BlockIo implementation.
io(&mut self) -> &mut T251 pub fn io(&mut self) -> &mut T {
252 &mut self.io
253 }
254
255 /// Reads data from the block device.
256 ///
257 /// # Args
258 ///
259 /// * `offset`: Offset in number of bytes.
260 /// * `out`: Buffer to store the read data.
261 /// * Returns success when exactly `out.len()` number of bytes are read.
read( &mut self, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>262 pub async fn read(
263 &mut self,
264 offset: u64,
265 out: &mut (impl SliceMaybeUninit + ?Sized),
266 ) -> Result<()> {
267 read_async(&mut self.io, offset, out, &mut self.scratch).await
268 }
269
270 /// Writes data to the device.
271 ///
272 /// # Args
273 ///
274 /// * `offset`: Offset in number of bytes.
275 /// * `data`: Data to write.
276 ///
277 /// # Returns
278 ///
279 /// * Returns success when exactly `data.len()` number of bytes are written.
write(&mut self, offset: u64, data: &mut [u8]) -> Result<()>280 pub async fn write(&mut self, offset: u64, data: &mut [u8]) -> Result<()> {
281 write_async(&mut self.io, offset, data, &mut self.scratch).await
282 }
283
284 /// Fills a disk range with the given byte value
285 ///
286 /// # Args
287 ///
288 /// * `offset`: Offset in number of bytes.
289 /// * `size`: Number of bytes to fill.
290 /// * `val`: Fill value.
291 /// * `scratch`: A scratch buffer that will be used for writing `val` in batches.
292 ///
293 /// # Returns
294 ///
295 /// * Returns Err(Error::InvalidInput) if size of `scratch` is 0.
fill( &mut self, mut offset: u64, size: u64, val: u8, scratch: &mut [u8], ) -> Result<()>296 pub async fn fill(
297 &mut self,
298 mut offset: u64,
299 size: u64,
300 val: u8,
301 scratch: &mut [u8],
302 ) -> Result<()> {
303 if scratch.is_empty() {
304 return Err(Error::InvalidInput);
305 }
306 let blk_sz = usize::try_from(self.block_info().block_size)?;
307 // Optimizes by trying to get an aligned and multi-block-size buffer.
308 let buf = match aligned_subslice(scratch, self.block_info().alignment) {
309 Ok(v) => match v.len() / blk_sz {
310 b if b > 0 => &mut v[..b * blk_sz],
311 _ => v,
312 },
313 _ => scratch,
314 };
315 let sz = min(size, buf.len().try_into()?);
316 buf[..usize::try_from(sz).unwrap()].fill(val);
317 let end: u64 = (SafeNum::from(offset) + size).try_into()?;
318 while offset < end {
319 let to_write = min(sz, end - offset);
320 self.write(offset, &mut buf[..usize::try_from(to_write).unwrap()]).await?;
321 offset += to_write;
322 }
323 Ok(())
324 }
325
326 /// Loads and syncs GPT from a block device.
327 ///
328 /// The API validates and restores primary/secondary GPT header.
329 ///
330 /// # Returns
331 ///
332 /// * Returns Ok(sync_result) if disk IO is successful, where `sync_result` contains the GPT
333 /// verification and restoration result.
334 /// * Returns Err() if disk IO encounters errors.
sync_gpt( &mut self, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<GptSyncResult>335 pub async fn sync_gpt(
336 &mut self,
337 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
338 ) -> Result<GptSyncResult> {
339 gpt.load_and_sync(self).await
340 }
341
342 /// Updates GPT to the block device and sync primary and secondary GPT.
343 ///
344 /// # Args
345 ///
346 /// * `mbr_primary`: A buffer containing the MBR block, primary GPT header and entries.
347 /// * `resize`: If set to true, the method updates the last partition to cover the rest of the
348 /// storage.
349 /// * `gpt`: The GPT to update.
350 ///
351 /// # Returns
352 ///
353 /// * Return `Ok(())` if new GPT is valid and device is updated and synced successfully.
update_gpt( &mut self, mbr_primary: &mut [u8], resize: bool, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<()>354 pub async fn update_gpt(
355 &mut self,
356 mbr_primary: &mut [u8],
357 resize: bool,
358 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
359 ) -> Result<()> {
360 gpt::update_gpt(self, mbr_primary, resize, gpt).await
361 }
362
363 /// Erases GPT if the disk has one.
364 ///
365 /// The method will first perform a GPT sync and makes sure that all valid entries are wiped.
366 ///
367 /// # Args
368 ///
369 /// * `gpt`: An instance of GPT.
erase_gpt(&mut self, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>) -> Result<()>370 pub async fn erase_gpt(&mut self, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>) -> Result<()> {
371 gpt::erase_gpt(self, gpt).await
372 }
373
374 /// Reads a GPT partition on a block device
375 ///
376 /// # Args
377 ///
378 /// * `gpt`: A `GptCache` initialized with `Self::sync_gpt()`.
379 /// * `part_name`: Name of the partition.
380 /// * `offset`: Offset in number of bytes into the partition.
381 /// * `out`: Buffer to store the read data.
382 ///
383 /// # Returns
384 ///
385 /// Returns success when exactly `out.len()` of bytes are read successfully.
read_gpt_partition( &mut self, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, part_name: &str, offset: u64, out: &mut (impl SliceMaybeUninit + ?Sized), ) -> Result<()>386 pub async fn read_gpt_partition(
387 &mut self,
388 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
389 part_name: &str,
390 offset: u64,
391 out: &mut (impl SliceMaybeUninit + ?Sized),
392 ) -> Result<()> {
393 let offset = gpt.check_range(part_name, offset, out.len())?;
394 self.read(offset, out).await
395 }
396
397 /// Writes a GPT partition on a block device.
398 ///
399 ///
400 /// # Args
401 ///
402 /// * `gpt`: A `GptCache` initialized with `Self::sync_gpt()`.
403 /// * `part_name`: Name of the partition.
404 /// * `offset`: Offset in number of bytes into the partition.
405 /// * `data`: Data to write. See `data` passed to `BlockIoSync::write()` for details.
406 ///
407 /// # Returns
408 ///
409 /// Returns success when exactly `data.len()` of bytes are written successfully.
write_gpt_partition( &mut self, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, part_name: &str, offset: u64, data: &mut [u8], ) -> Result<()>410 pub async fn write_gpt_partition(
411 &mut self,
412 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
413 part_name: &str,
414 offset: u64,
415 data: &mut [u8],
416 ) -> Result<()> {
417 let offset = gpt.check_range(part_name, offset, data.len())?;
418 self.write(offset, data).await
419 }
420 }
421
422 impl<'a, T: BlockIo> Disk<RefMut<'a, T>, RefMut<'a, [u8]>> {
423 /// Converts a `RefMut<Disk<T, S>>` to `Disk<RefMut<T>, RefMut<[u8]>>`. The scratch buffer
424 /// generic type is eliminated in the return.
from_ref_mut(val: RefMut<'a, Disk<T, impl DerefMut<Target = [u8]>>>) -> Self425 pub fn from_ref_mut(val: RefMut<'a, Disk<T, impl DerefMut<Target = [u8]>>>) -> Self {
426 let (io, scratch) = RefMut::map_split(val, |v| (&mut v.io, &mut v.scratch[..]));
427 Disk::new(io, scratch).unwrap()
428 }
429 }
430
431 impl<T, S> Disk<RamBlockIo<T>, S>
432 where
433 T: DerefMut<Target = [u8]>,
434 S: DerefMut<Target = [u8]> + Extend<u8> + Default,
435 {
436 /// Creates a new ram disk instance with allocated scratch buffer.
new_ram_alloc(block_size: u64, alignment: u64, storage: T) -> Result<Self>437 pub fn new_ram_alloc(block_size: u64, alignment: u64, storage: T) -> Result<Self> {
438 let ram_blk = RamBlockIo::new(block_size, alignment, storage);
439 Self::new_alloc_scratch(ram_blk)
440 }
441 }
442
443 /// Helper trait to implement common logic working with MaybeUninit slices.
444 /// Implemented for [u8] and [MaybeUninit<u8>].
445 ///
446 /// Read functions treats buffer as not initialized using this trait.
447 // AsRef,AsMut implementation added here. Since it is not possible to implement trait from other
448 // crate for trait in this trait. It is possible to implement other trait for `dyn` object of local
449 // trait. But it introduces other issues with lifetime and casting boilerplate.
450 //
451 // Alternatively we considered using wrapper type, which works but requires `into()` call either on
452 // function call. Or inside functions if they accept `impl Into<Wrapper>`.
453 // Using traits seems to be cleaner and potentially more effective.
454 pub trait SliceMaybeUninit {
455 /// Get `&[MaybeUninit<u8>]` representation
as_ref(&self) -> &[MaybeUninit<u8>]456 fn as_ref(&self) -> &[MaybeUninit<u8>];
457
458 // AsMut implementation
459 /// Get `&mut [MaybeUninit<u8>]` representation
as_mut(&mut self) -> &mut [MaybeUninit<u8>]460 fn as_mut(&mut self) -> &mut [MaybeUninit<u8>];
461
462 /// Get slice length
len(&self) -> usize463 fn len(&self) -> usize {
464 self.as_ref().len()
465 }
466
467 /// Returns reference to element or subslice, or Error if index is out of bounds
get<I>(&mut self, index: I) -> Result<&<I>::Output> where I: SliceIndex<[MaybeUninit<u8>]>,468 fn get<I>(&mut self, index: I) -> Result<&<I>::Output>
469 where
470 I: SliceIndex<[MaybeUninit<u8>]>,
471 {
472 self.as_ref().get(index).ok_or(Error::BufferTooSmall(None))
473 }
474
475 /// Returns mutable reference to element or subslice, or Error if index is out of bounds
get_mut<I>(&mut self, index: I) -> Result<&mut <I>::Output> where I: SliceIndex<[MaybeUninit<u8>]>,476 fn get_mut<I>(&mut self, index: I) -> Result<&mut <I>::Output>
477 where
478 I: SliceIndex<[MaybeUninit<u8>]>,
479 {
480 self.as_mut().get_mut(index).ok_or(Error::BufferTooSmall(None))
481 }
482
483 /// Clone from slice
clone_from_slice(&mut self, src: &[u8])484 fn clone_from_slice(&mut self, src: &[u8]) {
485 self.as_mut().clone_from_slice(as_uninit(src))
486 }
487 }
488
489 impl SliceMaybeUninit for [u8] {
as_ref(&self) -> &[MaybeUninit<u8>]490 fn as_ref(&self) -> &[MaybeUninit<u8>] {
491 as_uninit(self)
492 }
as_mut(&mut self) -> &mut [MaybeUninit<u8>]493 fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
494 as_uninit_mut(self)
495 }
496 }
497
498 impl SliceMaybeUninit for [MaybeUninit<u8>] {
as_ref(&self) -> &[MaybeUninit<u8>]499 fn as_ref(&self) -> &[MaybeUninit<u8>] {
500 self
501 }
as_mut(&mut self) -> &mut [MaybeUninit<u8>]502 fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
503 self
504 }
505 }
506
507 /// Present initialized `&mut [u8]` buffer as `&mut [MaybeUninit<u8>]`
as_uninit_mut(buf: &mut [u8]) -> &mut [MaybeUninit<u8>]508 pub fn as_uninit_mut(buf: &mut [u8]) -> &mut [MaybeUninit<u8>] {
509 // SAFETY:
510 // MaybeUninit<u8> has same size and alignment as u8.
511 // `data` is valid pointer to initialised u8 slice of size `buf.len()`
512 unsafe { core::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut MaybeUninit<u8>, buf.len()) }
513 }
514
515 /// Present initialized `&mut [u8]` buffer as `&mut [MaybeUninit<u8>]`
as_uninit(buf: &[u8]) -> &[MaybeUninit<u8>]516 pub fn as_uninit(buf: &[u8]) -> &[MaybeUninit<u8>] {
517 // SAFETY:
518 // MaybeUninit<u8> has same size and alignment as u8.
519 // `data` is valid pointer to initialised u8 slice of size `buf.len()`
520 unsafe { core::slice::from_raw_parts(buf.as_ptr() as *const MaybeUninit<u8>, buf.len()) }
521 }
522
523 #[cfg(test)]
524 mod test {
525 use super::*;
526 use gbl_async::block_on;
527 use safemath::SafeNum;
528
529 #[derive(Debug)]
530 struct TestCase {
531 rw_offset: u64,
532 rw_size: u64,
533 misalignment: u64,
534 alignment: u64,
535 block_size: u64,
536 storage_size: u64,
537 }
538
539 impl TestCase {
new( rw_offset: u64, rw_size: u64, misalignment: u64, alignment: u64, block_size: u64, storage_size: u64, ) -> Self540 fn new(
541 rw_offset: u64,
542 rw_size: u64,
543 misalignment: u64,
544 alignment: u64,
545 block_size: u64,
546 storage_size: u64,
547 ) -> Self {
548 Self { rw_offset, rw_size, misalignment, alignment, block_size, storage_size }
549 }
550 }
551
552 // Helper object for allocating aligned buffer.
553 struct AlignedBuffer {
554 buffer: Vec<u8>,
555 alignment: u64,
556 size: u64,
557 }
558
559 impl AlignedBuffer {
new(alignment: u64, size: u64) -> Self560 pub fn new(alignment: u64, size: u64) -> Self {
561 let aligned_size = (SafeNum::from(size) + alignment).try_into().unwrap();
562 let buffer = vec![0u8; aligned_size];
563 Self { buffer, alignment, size }
564 }
565
get(&mut self) -> &mut [u8]566 pub fn get(&mut self) -> &mut [u8] {
567 let addr = SafeNum::from(self.buffer.as_ptr() as usize);
568 let aligned_start = addr.round_up(self.alignment) - addr;
569 &mut self.buffer
570 [aligned_start.try_into().unwrap()..(aligned_start + self.size).try_into().unwrap()]
571 }
572 }
573
574 /// Upper bound on the number of `read_blocks_async()/write_blocks_async()` calls by
575 /// `AsBlockDevice::read()` and `AsBlockDevice::write()`.
576 ///
577 /// * `fn read_aligned_all()`: At most 1 call to `read_blocks_async()`.
578 /// * `fn read_aligned_offset_and_buffer()`: At most 2 calls to `read_aligned_all()`.
579 /// * `fn read_aligned_buffer()`: At most 1 call to `read_aligned_offset_and_buffer()` plus 1
580 /// call to `read_blocks_async()`.
581 /// * `fn read_async()`: At most 2 calls to `read_aligned_buffer()`.
582 ///
583 /// Analysis is similar for `fn write_async()`.
584 const READ_WRITE_BLOCKS_UPPER_BOUND: usize = 6;
585
586 // Type alias of the [Disk] type used by unittests.
587 pub(crate) type TestDisk = Disk<RamBlockIo<Vec<u8>>, Vec<u8>>;
588
read_test_helper(case: &TestCase)589 fn read_test_helper(case: &TestCase) {
590 let data = (0..case.storage_size).map(|v| v as u8).collect::<Vec<_>>();
591 let mut disk = TestDisk::new_ram_alloc(case.alignment, case.block_size, data).unwrap();
592 // Make an aligned buffer. A misaligned version is created by taking a sub slice that
593 // starts at an unaligned offset. Because of this we need to allocate
594 // `case.misalignment` more to accommodate it.
595 let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
596 let misalignment = usize::try_from(case.misalignment).unwrap();
597 let rw_sz = usize::try_from(case.rw_size).unwrap();
598 let out = &mut aligned_buf.get()[misalignment..][..rw_sz];
599 block_on(disk.read(case.rw_offset, out)).unwrap();
600 let rw_off = usize::try_from(case.rw_offset).unwrap();
601 assert_eq!(out, &disk.io().storage()[rw_off..][..rw_sz], "Failed. Test case {:?}", case);
602 assert!(disk.io().num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
603 }
604
write_test_helper( case: &TestCase, mut write_func: impl FnMut(&mut TestDisk, u64, &mut [u8]), )605 fn write_test_helper(
606 case: &TestCase,
607 mut write_func: impl FnMut(&mut TestDisk, u64, &mut [u8]),
608 ) {
609 let data = (0..case.storage_size).map(|v| v as u8).collect::<Vec<_>>();
610 // Write a reverse version of the current data.
611 let rw_off = usize::try_from(case.rw_offset).unwrap();
612 let rw_sz = usize::try_from(case.rw_size).unwrap();
613 let mut expected = data[rw_off..][..rw_sz].to_vec();
614 expected.reverse();
615 let mut disk = TestDisk::new_ram_alloc(case.alignment, case.block_size, data).unwrap();
616 // Make an aligned buffer. A misaligned version is created by taking a sub slice that
617 // starts at an unaligned offset. Because of this we need to allocate
618 // `case.misalignment` more to accommodate it.
619 let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
620 let misalignment = usize::try_from(case.misalignment).unwrap();
621 let data = &mut aligned_buf.get()[misalignment..][..rw_sz];
622 data.clone_from_slice(&expected);
623 write_func(&mut disk, case.rw_offset, data);
624 let written = &disk.io().storage()[rw_off..][..rw_sz];
625 assert_eq!(expected, written, "Failed. Test case {:?}", case);
626 // Check that input is not modified.
627 assert_eq!(expected, data, "Input is modified. Test case {:?}", case,);
628 }
629
630 macro_rules! read_write_test {
631 ($name:ident, $x0:expr, $x1:expr, $x2:expr, $x3:expr, $x4:expr, $x5:expr) => {
632 mod $name {
633 use super::*;
634
635 #[test]
636 fn read_test() {
637 read_test_helper(&TestCase::new($x0, $x1, $x2, $x3, $x4, $x5));
638 }
639
640 #[test]
641 fn read_scaled_test() {
642 // Scaled all parameters by double and test again.
643 let (x0, x1, x2, x3, x4, x5) =
644 (2 * $x0, 2 * $x1, 2 * $x2, 2 * $x3, 2 * $x4, 2 * $x5);
645 read_test_helper(&TestCase::new(x0, x1, x2, x3, x4, x5));
646 }
647
648 // Input bytes slice is a mutable reference
649 #[test]
650 fn write_mut_test() {
651 write_test_helper(
652 &TestCase::new($x0, $x1, $x2, $x3, $x4, $x5),
653 |blk, offset, data| {
654 block_on(blk.write(offset, data)).unwrap();
655 assert!(blk.io().num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
656 assert!(blk.io().num_writes <= READ_WRITE_BLOCKS_UPPER_BOUND);
657 },
658 );
659 }
660
661 #[test]
662 fn write_mut_scaled_test() {
663 // Scaled all parameters by double and test again.
664 let (x0, x1, x2, x3, x4, x5) =
665 (2 * $x0, 2 * $x1, 2 * $x2, 2 * $x3, 2 * $x4, 2 * $x5);
666 write_test_helper(
667 &TestCase::new(x0, x1, x2, x3, x4, x5),
668 |blk, offset, data| {
669 block_on(blk.write(offset, data)).unwrap();
670 assert!(blk.io().num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
671 assert!(blk.io().num_writes <= READ_WRITE_BLOCKS_UPPER_BOUND);
672 },
673 );
674 }
675 }
676 };
677 }
678
679 const BLOCK_SIZE: u64 = 512;
680 const ALIGNMENT: u64 = 64;
681 const STORAGE: u64 = BLOCK_SIZE * 32;
682
683 // Test cases for different scenarios of read/write windows w.r.t buffer/block alignmnet
684 // boundary.
685 // offset
686 // |~~~~~~~~~~~~~size~~~~~~~~~~~~|
687 // |---------|---------|---------|
688 read_write_test! {aligned_all, 0, STORAGE, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
689 }
690
691 // offset
692 // |~~~~~~~~~size~~~~~~~~~|
693 // |---------|---------|---------|
694 read_write_test! {
695 aligned_offset_uanligned_size, 0, STORAGE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
696 }
697 // offset
698 // |~~size~~|
699 // |---------|---------|---------|
700 read_write_test! {
701 aligned_offset_intra_block, 0, BLOCK_SIZE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
702 }
703 // offset
704 // |~~~~~~~~~~~size~~~~~~~~~~|
705 // |---------|---------|---------|
706 read_write_test! {
707 unaligned_offset_aligned_end, 1, STORAGE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
708 }
709 // offset
710 // |~~~~~~~~~size~~~~~~~~|
711 // |---------|---------|---------|
712 read_write_test! {unaligned_offset_len, 1, STORAGE - 2, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
713 }
714 // offset
715 // |~~~size~~~|
716 // |---------|---------|---------|
717 read_write_test! {
718 unaligned_offset_len_partial_cross_block, 1, BLOCK_SIZE, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
719 }
720 // offset
721 // |~size~|
722 // |---------|---------|---------|
723 read_write_test! {
724 ualigned_offset_len_partial_intra_block,
725 1,
726 BLOCK_SIZE - 2,
727 0,
728 ALIGNMENT,
729 BLOCK_SIZE,
730 STORAGE
731 }
732
733 // Same sets of test cases but with an additional block added to `rw_offset`
734 read_write_test! {
735 aligned_all_extra_offset,
736 BLOCK_SIZE,
737 STORAGE,
738 0,
739 ALIGNMENT,
740 BLOCK_SIZE,
741 STORAGE + BLOCK_SIZE
742 }
743 read_write_test! {
744 aligned_offset_uanligned_size_extra_offset,
745 BLOCK_SIZE,
746 STORAGE - 1,
747 0,
748 ALIGNMENT,
749 BLOCK_SIZE,
750 STORAGE + BLOCK_SIZE
751 }
752 read_write_test! {
753 aligned_offset_intra_block_extra_offset,
754 BLOCK_SIZE,
755 BLOCK_SIZE - 1,
756 0,
757 ALIGNMENT,
758 BLOCK_SIZE,
759 STORAGE + BLOCK_SIZE
760 }
761 read_write_test! {
762 unaligned_offset_aligned_end_extra_offset,
763 BLOCK_SIZE + 1,
764 STORAGE - 1,
765 0,
766 ALIGNMENT,
767 BLOCK_SIZE,
768 STORAGE + BLOCK_SIZE
769 }
770 read_write_test! {
771 unaligned_offset_len_extra_offset,
772 BLOCK_SIZE + 1,
773 STORAGE - 2,
774 0,
775 ALIGNMENT,
776 BLOCK_SIZE,
777 STORAGE + BLOCK_SIZE
778 }
779 read_write_test! {
780 unaligned_offset_len_partial_cross_block_extra_offset,
781 BLOCK_SIZE + 1,
782 BLOCK_SIZE,
783 0,
784 ALIGNMENT,
785 BLOCK_SIZE,
786 STORAGE + BLOCK_SIZE
787 }
788 read_write_test! {
789 ualigned_offset_len_partial_intra_block_extra_offset,
790 BLOCK_SIZE + 1,
791 BLOCK_SIZE - 2,
792 0,
793 ALIGNMENT,
794 BLOCK_SIZE,
795 STORAGE + BLOCK_SIZE
796 }
797
798 // Same sets of test cases but with unaligned output buffer {'misALIGNMENT` != 0}
799 read_write_test! {
800 aligned_all_unaligned_buffer,
801 0,
802 STORAGE,
803 1,
804 ALIGNMENT,
805 BLOCK_SIZE,
806 STORAGE
807 }
808 read_write_test! {
809 aligned_offset_uanligned_size_unaligned_buffer,
810 0,
811 STORAGE - 1,
812 1,
813 ALIGNMENT,
814 BLOCK_SIZE,
815 STORAGE
816 }
817 read_write_test! {
818 aligned_offset_intra_block_unaligned_buffer,
819 0,
820 BLOCK_SIZE - 1,
821 1,
822 ALIGNMENT,
823 BLOCK_SIZE,
824 STORAGE
825 }
826 read_write_test! {
827 unaligned_offset_aligned_end_unaligned_buffer,
828 1,
829 STORAGE - 1,
830 1,
831 ALIGNMENT,
832 BLOCK_SIZE,
833 STORAGE
834 }
835 read_write_test! {
836 unaligned_offset_len_unaligned_buffer,
837 1,
838 STORAGE - 2,
839 1,
840 ALIGNMENT,
841 BLOCK_SIZE,
842 STORAGE
843 }
844 read_write_test! {
845 unaligned_offset_len_partial_cross_block_unaligned_buffer,
846 1,
847 BLOCK_SIZE,
848 1,
849 ALIGNMENT,
850 BLOCK_SIZE,
851 STORAGE
852 }
853 read_write_test! {
854 ualigned_offset_len_partial_intra_block_unaligned_buffer,
855 1,
856 BLOCK_SIZE - 2,
857 1,
858 ALIGNMENT,
859 BLOCK_SIZE,
860 STORAGE
861 }
862
863 // Special cases where `rw_offset` is not block aligned but buffer aligned. This can
864 // trigger some internal optimization code path.
865 read_write_test! {
866 buffer_aligned_offset_and_len,
867 ALIGNMENT,
868 STORAGE - ALIGNMENT,
869 0,
870 ALIGNMENT,
871 BLOCK_SIZE,
872 STORAGE
873 }
874 read_write_test! {
875 buffer_aligned_offset,
876 ALIGNMENT,
877 STORAGE - ALIGNMENT - 1,
878 0,
879 ALIGNMENT,
880 BLOCK_SIZE,
881 STORAGE
882 }
883 read_write_test! {
884 buffer_aligned_offset_aligned_end,
885 ALIGNMENT,
886 BLOCK_SIZE,
887 0,
888 ALIGNMENT,
889 BLOCK_SIZE,
890 STORAGE
891 }
892 read_write_test! {
893 buffer_aligned_offset_intra_block,
894 ALIGNMENT,
895 BLOCK_SIZE - ALIGNMENT - 1,
896 0,
897 ALIGNMENT,
898 BLOCK_SIZE,
899 STORAGE
900 }
901
902 #[test]
test_no_alignment_require_zero_size_scratch()903 fn test_no_alignment_require_zero_size_scratch() {
904 let mut io = RamBlockIo::new(1, 1, vec![]);
905 assert_eq!(scratch_size(&mut io).unwrap(), 0);
906 }
907
908 #[test]
test_scratch_too_small()909 fn test_scratch_too_small() {
910 let mut io = RamBlockIo::new(512, 512, vec![]);
911 let scratch = vec![0u8; scratch_size(&mut io).unwrap() - 1];
912 assert!(TestDisk::new(io, scratch).is_err());
913 }
914
915 #[test]
test_read_overflow()916 fn test_read_overflow() {
917 let mut disk = TestDisk::new_ram_alloc(512, 512, vec![0u8; 512]).unwrap();
918 assert!(block_on(disk.read(512, &mut vec![0u8; 1][..])).is_err());
919 assert!(block_on(disk.read(0, &mut vec![0u8; 513][..])).is_err());
920 }
921
922 #[test]
test_read_arithmetic_overflow()923 fn test_read_arithmetic_overflow() {
924 let mut disk = TestDisk::new_ram_alloc(512, 512, vec![0u8; 512]).unwrap();
925 assert!(block_on(disk.read(u64::MAX, &mut vec![0u8; 1][..])).is_err());
926 }
927
928 #[test]
test_write_overflow()929 fn test_write_overflow() {
930 let mut disk = TestDisk::new_ram_alloc(512, 512, vec![0u8; 512]).unwrap();
931 assert!(block_on(disk.write(512, &mut vec![0u8; 1])).is_err());
932 assert!(block_on(disk.write(0, &mut vec![0u8; 513])).is_err());
933 }
934
935 #[test]
test_write_arithmetic_overflow()936 fn test_write_arithmetic_overflow() {
937 let mut disk = TestDisk::new_ram_alloc(512, 512, vec![0u8; 512]).unwrap();
938 assert!(block_on(disk.write(u64::MAX, &mut vec![0u8; 1])).is_err());
939 }
940 }
941