From 0cfde01944f099fb3935a56df42de8803f80e15a Mon Sep 17 00:00:00 2001 From: zhou <2628735358@qq.com> Date: Tue, 2 Apr 2024 17:17:07 +0800 Subject: [PATCH 1/7] slab --- kernel/Cargo.toml | 1 + kernel/crates/rust-slabmalloc/Cargo.toml | 16 + kernel/crates/rust-slabmalloc/src/lib.rs | 75 +++ kernel/crates/rust-slabmalloc/src/pages.rs | 489 ++++++++++++++++++++ kernel/crates/rust-slabmalloc/src/sc.rs | 329 +++++++++++++ kernel/crates/rust-slabmalloc/src/zone.rs | 170 +++++++ kernel/src/mm/allocator/kernel_allocator.rs | 154 ++++-- kernel/src/mm/allocator/slab.rs | 162 +++---- kernel/src/mm/init.rs | 5 +- 9 files changed, 1256 insertions(+), 145 deletions(-) create mode 100644 kernel/crates/rust-slabmalloc/Cargo.toml create mode 100644 kernel/crates/rust-slabmalloc/src/lib.rs create mode 100644 kernel/crates/rust-slabmalloc/src/pages.rs create mode 100644 kernel/crates/rust-slabmalloc/src/sc.rs create mode 100644 kernel/crates/rust-slabmalloc/src/zone.rs diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 3a00ba652..3a4080d42 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -51,6 +51,7 @@ fdt = "=0.1.5" uefi = { version = "=0.26.0", features = ["alloc"] } uefi-raw = "=0.5.0" paste = "=1.0.14" +slabmalloc = { path = "crates/rust-slabmalloc" } # target为x86_64时,使用下面的依赖 diff --git a/kernel/crates/rust-slabmalloc/Cargo.toml b/kernel/crates/rust-slabmalloc/Cargo.toml new file mode 100644 index 000000000..3b9b397aa --- /dev/null +++ b/kernel/crates/rust-slabmalloc/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "slabmalloc" +version = "0.11.0" +edition = "2018" + +[features] +unstable = [] +default = [ "unstable" ] + +[dependencies] +log = "0.4" + +[target.'cfg(unix)'.dev-dependencies] +rand = "0.8" +env_logger = "0.9" +spin = "0.9.8" diff --git a/kernel/crates/rust-slabmalloc/src/lib.rs b/kernel/crates/rust-slabmalloc/src/lib.rs new file mode 100644 index 000000000..1930b958e --- /dev/null +++ b/kernel/crates/rust-slabmalloc/src/lib.rs @@ -0,0 +1,75 @@ +//! A slab allocator implementation for objects less than a page-size (4 KiB or 2MiB). +//! +//! # Overview +//! +//! The organization is as follows: +//! +//! * A `ZoneAllocator` manages many `SCAllocator` and can +//! satisfy requests for different allocation sizes. +//! * A `SCAllocator` allocates objects of exactly one size. +//! It stores the objects and meta-data in one or multiple `AllocablePage` objects. +//! * A trait `AllocablePage` that defines the page-type from which we allocate objects. +//! +//! Lastly, it provides two default `AllocablePage` implementations `ObjectPage` and `LargeObjectPage`: +//! * A `ObjectPage` that is 4 KiB in size and contains allocated objects and associated meta-data. +//! * A `LargeObjectPage` that is 2 MiB in size and contains allocated objects and associated meta-data. +//! +//! +//! # Implementing GlobalAlloc +//! See the [global alloc](https://github.com/gz/rust-slabmalloc/tree/master/examples/global_alloc.rs) example. +#![allow(unused_features)] +#![cfg_attr(feature = "unstable", feature(const_mut_refs))] +#![no_std] +#![crate_name = "slabmalloc"] +#![crate_type = "lib"] + +mod pages; +mod sc; +mod zone; + +pub use pages::*; +pub use sc::*; +pub use zone::*; + +use core::alloc::Layout; +use core::fmt; +use core::ptr::{self, NonNull}; + +use log::trace; + +/// How many bytes in the page are used by allocator meta-data. +const OBJECT_PAGE_METADATA_OVERHEAD: usize = 80; + +/// How many bytes a [`ObjectPage`] is. +const OBJECT_PAGE_SIZE: usize = 4096; + +type VAddr = usize; + +/// Error that can be returned for `allocation` and `deallocation` requests. +#[derive(Debug)] +pub enum AllocationError { + /// Can't satisfy the allocation request for Layout because the allocator + /// does not have enough memory (you may be able to `refill` it). + OutOfMemory, + /// Allocator can't deal with the provided size of the Layout. + InvalidLayout, +} + +/// Allocator trait to be implemented by users of slabmalloc to provide memory to slabmalloc. +/// +/// # Safety +/// Needs to adhere to safety requirements of a rust allocator (see GlobalAlloc et. al.). +pub unsafe trait Allocator<'a> { + fn allocate(&mut self, layout: Layout) -> Result, AllocationError>; + fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Result<(), AllocationError>; + + /// Refill the allocator with a [`ObjectPage`]. + /// + /// # Safety + /// TBD (this API needs to change anyways, likely new page should be a raw pointer) + unsafe fn refill( + &mut self, + layout: Layout, + new_page: &'a mut ObjectPage<'a>, + ) -> Result<(), AllocationError>; +} diff --git a/kernel/crates/rust-slabmalloc/src/pages.rs b/kernel/crates/rust-slabmalloc/src/pages.rs new file mode 100644 index 000000000..61089ea72 --- /dev/null +++ b/kernel/crates/rust-slabmalloc/src/pages.rs @@ -0,0 +1,489 @@ +use crate::*; +use core::{ + mem, + sync::atomic::{AtomicU64, Ordering}, +}; + +/// A trait defining bitfield operations we need for tracking allocated objects within a page. +pub(crate) trait Bitfield { + fn initialize(&mut self, for_size: usize, capacity: usize); + fn first_fit( + &self, + base_addr: usize, + layout: Layout, + page_size: usize, + ) -> Option<(usize, usize)>; + fn is_allocated(&self, idx: usize) -> bool; + fn set_bit(&self, idx: usize); + fn clear_bit(&self, idx: usize); + fn is_full(&self) -> bool; + fn all_free(&self, relevant_bits: usize) -> bool; +} + +/// Implementation of bit operations on u64 slices. +/// +/// We allow deallocations (i.e. clearning a bit in the field) +/// from any thread. That's why the bitfield is a bunch of AtomicU64. +impl Bitfield for [AtomicU64] { + /// Initialize the bitfield + /// + /// # Arguments + /// * `for_size`: Object size we want to allocate + /// * `capacity`: Maximum size of the buffer the bitmap maintains. + /// + /// Ensures that we only have free slots for what we can allocate + /// within the page (by marking everything else allocated). + fn initialize(&mut self, for_size: usize, capacity: usize) { + // Set everything to allocated + for bitmap in self.iter_mut() { + *bitmap = AtomicU64::new(u64::max_value()); + } + + // Mark actual slots as free + let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64); + for idx in 0..relevant_bits { + self.clear_bit(idx); + } + } + + /// Tries to find a free block of memory that satisfies `alignment` requirement. + /// + /// # Notes + /// * We pass size here to be able to calculate the resulting address within `data`. + #[inline(always)] + fn first_fit( + &self, + base_addr: usize, + layout: Layout, + page_size: usize, + ) -> Option<(usize, usize)> { + for (base_idx, b) in self.iter().enumerate() { + let bitval = b.load(Ordering::Relaxed); + if bitval == u64::max_value() { + continue; + } else { + let negated = !bitval; + let first_free = negated.trailing_zeros() as usize; + let idx: usize = base_idx * 64 + first_free; + let offset = idx * layout.size(); + + // TODO(bad): psize needs to be passed as arg + let offset_inside_data_area = + offset <= (page_size - OBJECT_PAGE_METADATA_OVERHEAD - layout.size()); + if !offset_inside_data_area { + return None; + } + + let addr: usize = base_addr + offset; + let alignment_ok = addr % layout.align() == 0; + let block_is_free = bitval & (1 << first_free) == 0; + if alignment_ok && block_is_free { + return Some((idx, addr)); + } + } + } + None + } + + /// Check if the bit `idx` is set. + #[inline(always)] + fn is_allocated(&self, idx: usize) -> bool { + let base_idx = idx / 64; + let bit_idx = idx % 64; + (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0 + } + + /// Sets the bit number `idx` in the bit-field. + #[inline(always)] + fn set_bit(&self, idx: usize) { + let base_idx = idx / 64; + let bit_idx = idx % 64; + self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed); + } + + /// Clears bit number `idx` in the bit-field. + #[inline(always)] + fn clear_bit(&self, idx: usize) { + let base_idx = idx / 64; + let bit_idx = idx % 64; + self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed); + } + + /// Checks if we could allocate more objects of a given `alloc_size` within the + /// `capacity` of the memory allocator. + /// + /// # Note + /// The ObjectPage will make sure to mark the top-most bits as allocated + /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits) + /// to track allocated objects). That's why this function can be simpler + /// than it would need to be in practice. + #[inline(always)] + fn is_full(&self) -> bool { + self.iter() + .filter(|&x| x.load(Ordering::Relaxed) != u64::max_value()) + .count() + == 0 + } + + /// Checks if the page has currently no allocations. + /// + /// This is called `all_free` rather than `is_emtpy` because + /// we already have an is_empty fn as part of the slice. + #[inline(always)] + fn all_free(&self, relevant_bits: usize) -> bool { + for (idx, bitmap) in self.iter().enumerate() { + let checking_bit_range = (idx * 64, (idx + 1) * 64); + if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 { + // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free + // the rest will be marked full + let bits_that_should_be_free = relevant_bits - checking_bit_range.0; + let free_mask = (1 << bits_that_should_be_free) - 1; + return (free_mask & bitmap.load(Ordering::Relaxed)) == 0; + } + + if bitmap.load(Ordering::Relaxed) == 0 { + continue; + } else { + return false; + } + } + + true + } +} + +/// This trait is used to define a page from which objects are allocated +/// in an `SCAllocator`. +/// +/// The implementor of this trait needs to provide access to the page meta-data, +/// which consists of: +/// - A bitfield (to track allocations), +/// - `prev` and `next` pointers to insert the page in free lists +pub trait AllocablePage { + /// The total size (in bytes) of the page. + /// + /// # Note + /// We also assume that the address of the page will be aligned to `SIZE`. + const SIZE: usize; + + fn bitfield(&self) -> &[AtomicU64; 8]; + fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8]; + fn prev(&mut self) -> &mut Rawlink + where + Self: core::marker::Sized; + fn next(&mut self) -> &mut Rawlink + where + Self: core::marker::Sized; + + /// Tries to find a free block within `data` that satisfies `alignment` requirement. + fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> { + let base_addr = (self as *const Self as *const u8) as usize; + self.bitfield().first_fit(base_addr, layout, Self::SIZE) + } + + /// Tries to allocate an object within this page. + /// + /// In case the slab is full, returns a null ptr. + fn allocate(&mut self, layout: Layout) -> *mut u8 { + match self.first_fit(layout) { + Some((idx, addr)) => { + self.bitfield().set_bit(idx); + addr as *mut u8 + } + None => ptr::null_mut(), + } + } + + /// Checks if we can still allocate more objects of a given layout within the page. + fn is_full(&self) -> bool { + self.bitfield().is_full() + } + + /// Checks if the page has currently no allocations. + fn is_empty(&self, relevant_bits: usize) -> bool { + self.bitfield().all_free(relevant_bits) + } + + /// Deallocates a memory object within this page. + fn deallocate(&self, ptr: NonNull, layout: Layout) -> Result<(), AllocationError> { + trace!( + "AllocablePage deallocating ptr = {:p} with {:?}", + ptr, + layout + ); + let page_offset = (ptr.as_ptr() as usize) & (Self::SIZE - 1); + assert!(page_offset % layout.size() == 0); + let idx = page_offset / layout.size(); + assert!( + self.bitfield().is_allocated(idx), + "{:p} not marked allocated?", + ptr + ); + + self.bitfield().clear_bit(idx); + Ok(()) + } +} + +/// Holds allocated data within a 4 KiB page. +/// +/// Has a data-section where objects are allocated from +/// and a small amount of meta-data in form of a bitmap +/// to track allocations at the end of the page. +/// +/// # Notes +/// An object of this type will be exactly 4 KiB. +/// It is marked `repr(C)` because we rely on a well defined order of struct +/// members (e.g., dealloc does a cast to find the bitfield). +#[repr(C)] +pub struct ObjectPage<'a> { + /// Holds memory objects. + #[allow(dead_code)] + data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD], + + /// Next element in list (used by `PageList`). + next: Rawlink>, + /// Previous element in list (used by `PageList`) + prev: Rawlink>, + + /// A bit-field to track free/allocated memory within `data`. + pub(crate) bitfield: [AtomicU64; 8], +} + +impl<'a> ObjectPage<'a> { + pub fn new() -> ObjectPage<'a> { + unsafe { mem::MaybeUninit::zeroed().assume_init() } + } +} + +// These needs some more work to be really safe... +unsafe impl<'a> Send for ObjectPage<'a> {} +unsafe impl<'a> Sync for ObjectPage<'a> {} + +impl<'a> AllocablePage for ObjectPage<'a> { + const SIZE: usize = OBJECT_PAGE_SIZE; + + fn bitfield(&self) -> &[AtomicU64; 8] { + &self.bitfield + } + fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] { + &mut self.bitfield + } + + fn prev(&mut self) -> &mut Rawlink { + &mut self.prev + } + + fn next(&mut self) -> &mut Rawlink { + &mut self.next + } +} + +impl<'a> Default for ObjectPage<'a> { + fn default() -> ObjectPage<'a> { + unsafe { mem::MaybeUninit::zeroed().assume_init() } + } +} + +impl<'a> fmt::Debug for ObjectPage<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ObjectPage") + } +} + +/// A list of pages. +pub(crate) struct PageList<'a, T: AllocablePage> { + /// Points to the head of the list. + pub(crate) head: Option<&'a mut T>, + /// Number of elements in the list. + pub(crate) elements: usize, +} + +impl<'a, T: AllocablePage> PageList<'a, T> { + #[cfg(feature = "unstable")] + pub(crate) const fn new() -> PageList<'a, T> { + PageList { + head: None, + elements: 0, + } + } + + #[cfg(not(feature = "unstable"))] + pub(crate) fn new() -> PageList<'a, T> { + PageList { + head: None, + elements: 0, + } + } + + pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> { + let m = match self.head { + None => Rawlink::none(), + Some(ref mut m) => Rawlink::some(*m), + }; + + ObjectPageIterMut { + head: m, + phantom: core::marker::PhantomData, + } + } + + /// Inserts `new_head` at the front of the list. + pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) { + match self.head { + None => { + *new_head.prev() = Rawlink::none(); + self.head = Some(new_head); + } + Some(ref mut head) => { + *new_head.prev() = Rawlink::none(); + *head.prev() = Rawlink::some(new_head); + mem::swap(head, &mut new_head); + *head.next() = Rawlink::some(new_head); + } + } + + self.elements += 1; + } + + /// Removes `slab_page` from the list. + pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) { + unsafe { + match slab_page.prev().resolve_mut() { + None => { + self.head = slab_page.next().resolve_mut(); + } + Some(prev) => { + *prev.next() = match slab_page.next().resolve_mut() { + None => Rawlink::none(), + Some(next) => Rawlink::some(next), + }; + } + } + + match slab_page.next().resolve_mut() { + None => (), + Some(next) => { + *next.prev() = match slab_page.prev().resolve_mut() { + None => Rawlink::none(), + Some(prev) => Rawlink::some(prev), + }; + } + } + } + + *slab_page.prev() = Rawlink::none(); + *slab_page.next() = Rawlink::none(); + self.elements -= 1; + } + + /// Removes `slab_page` from the list. + pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> { + match self.head { + None => None, + Some(ref mut head) => { + let head_next = head.next(); + let mut new_head = unsafe { head_next.resolve_mut() }; + mem::swap(&mut self.head, &mut new_head); + let _ = self.head.as_mut().map(|n| { + *n.prev() = Rawlink::none(); + }); + + self.elements -= 1; + new_head.map(|node| { + *node.prev() = Rawlink::none(); + *node.next() = Rawlink::none(); + node + }) + } + } + } + + /// Does the list contain `s`? + pub(crate) fn contains(&mut self, s: *const T) -> bool { + for slab_page in self.iter_mut() { + if core::ptr::eq(slab_page, s) { + return true; + } + } + + false + } +} + +/// Iterate over all the pages inside a slab allocator +pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> { + head: Rawlink

, + phantom: core::marker::PhantomData<&'a P>, +} + +impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> { + type Item = &'a mut P; + + #[inline] + fn next(&mut self) -> Option<&'a mut P> { + unsafe { + self.head.resolve_mut().map(|next| { + self.head = match next.next().resolve_mut() { + None => Rawlink::none(), + Some(ref mut sp) => Rawlink::some(*sp), + }; + next + }) + } + } +} + +/// Rawlink is a type like Option but for holding a raw pointer. +/// +/// We use it to link AllocablePages together. You probably won't need +/// to use this type if you're not implementing AllocablePage +/// for a custom page-size. +pub struct Rawlink { + p: *mut T, +} + +impl Default for Rawlink { + fn default() -> Self { + Rawlink { p: ptr::null_mut() } + } +} + +impl Rawlink { + /// Like Option::None for Rawlink + pub(crate) fn none() -> Rawlink { + Rawlink { p: ptr::null_mut() } + } + + /// Like Option::Some for Rawlink + pub(crate) fn some(n: &mut T) -> Rawlink { + Rawlink { p: n } + } + + /// Convert the `Rawlink` into an Option value + /// + /// **unsafe** because: + /// + /// - Dereference of raw pointer. + /// - Returns reference of arbitrary lifetime. + #[allow(dead_code)] + pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> { + self.p.as_ref() + } + + /// Convert the `Rawlink` into an Option value + /// + /// **unsafe** because: + /// + /// - Dereference of raw pointer. + /// - Returns reference of arbitrary lifetime. + pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> { + self.p.as_mut() + } + + /// Return the `Rawlink` and replace with `Rawlink::none()` + #[allow(dead_code)] + pub(crate) fn take(&mut self) -> Rawlink { + mem::replace(self, Rawlink::none()) + } +} diff --git a/kernel/crates/rust-slabmalloc/src/sc.rs b/kernel/crates/rust-slabmalloc/src/sc.rs new file mode 100644 index 000000000..1a6de9660 --- /dev/null +++ b/kernel/crates/rust-slabmalloc/src/sc.rs @@ -0,0 +1,329 @@ +//! A SCAllocator that can allocate fixed size objects. + +use core::mem; + +use crate::*; + +/// A genius(?) const min() +/// +/// # What this does +/// * create an array of the two elements you want to choose between +/// * create an arbitrary boolean expression +/// * cast said expresison to a usize +/// * use that value to index into the array created above +/// +/// # Source +/// https://stackoverflow.com/questions/53619695/calculating-maximum-value-of-a-set-of-constant-expressions-at-compile-time +#[cfg(feature = "unstable")] +const fn cmin(a: usize, b: usize) -> usize { + [a, b][(a > b) as usize] +} + +/// The boring variant of min (not const). +#[cfg(not(feature = "unstable"))] +fn cmin(a: usize, b: usize) -> usize { + core::cmp::min(a, b) +} + +/// A slab allocator allocates elements of a fixed size. +/// +/// It maintains three internal lists of objects that implement `AllocablePage` +/// from which it can allocate memory. +/// +/// * `empty_slabs`: Is a list of pages that the SCAllocator maintains, but +/// has 0 allocations in them, these can be given back to a requestor in case +/// of reclamation. +/// * `slabs`: A list of pages partially allocated and still have room for more. +/// * `full_slabs`: A list of pages that are completely allocated. +/// +/// On allocation we allocate memory from `slabs`, however if the list is empty +/// we try to reclaim a page from `empty_slabs` before we return with an out-of-memory +/// error. If a page becomes full after the allocation we move it from `slabs` to +/// `full_slabs`. +/// +/// Similarly, on dealloaction we might move a page from `full_slabs` to `slabs` +/// or from `slabs` to `empty_slabs` after we deallocated an object. +/// +/// If an allocation returns `OutOfMemory` a client using SCAllocator can refill +/// it using the `refill` function. +pub struct SCAllocator<'a, P: AllocablePage> { + /// Maximum possible allocation size for this `SCAllocator`. + pub(crate) size: usize, + /// Keeps track of succeeded allocations. + pub(crate) allocation_count: usize, + /// max objects per page + pub(crate) obj_per_page: usize, + /// List of empty ObjectPages (nothing allocated in these). + pub(crate) empty_slabs: PageList<'a, P>, + /// List of partially used ObjectPage (some objects allocated but pages are not full). + pub(crate) slabs: PageList<'a, P>, + /// List of full ObjectPages (everything allocated in these don't need to search them). + pub(crate) full_slabs: PageList<'a, P>, +} + +/// Creates an instance of a scallocator, we do this in a macro because we +/// re-use the code in const and non-const functions +macro_rules! new_sc_allocator { + ($size:expr) => { + SCAllocator { + size: $size, + allocation_count: 0, + obj_per_page: cmin((P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD) / $size, 8 * 64), + empty_slabs: PageList::new(), + slabs: PageList::new(), + full_slabs: PageList::new(), + } + }; +} + +impl<'a, P: AllocablePage> SCAllocator<'a, P> { + const REBALANCE_COUNT: usize = 64; + + /// Create a new SCAllocator. + #[cfg(feature = "unstable")] + pub const fn new(size: usize) -> SCAllocator<'a, P> { + new_sc_allocator!(size) + } + + #[cfg(not(feature = "unstable"))] + pub fn new(size: usize) -> SCAllocator<'a, P> { + new_sc_allocator!(size) + } + + /// Returns the maximum supported object size of this allocator. + pub fn size(&self) -> usize { + self.size + } + + /// Add a new ObjectPage. + fn insert_partial_slab(&mut self, new_head: &'a mut P) { + self.slabs.insert_front(new_head); + } + + /// Add page to empty list. + fn insert_empty(&mut self, new_head: &'a mut P) { + assert_eq!( + new_head as *const P as usize % P::SIZE, + 0, + "Inserted page is not aligned to page-size." + ); + self.empty_slabs.insert_front(new_head); + } + + /// Since `dealloc` can not reassign pages without requiring a lock + /// we check slabs and full slabs periodically as part of `alloc` + /// and move them to the empty or partially allocated slab lists. + pub(crate) fn check_page_assignments(&mut self) { + for slab_page in self.full_slabs.iter_mut() { + if !slab_page.is_full() { + // We need to move it from self.full_slabs -> self.slabs + trace!("move {:p} full -> partial", slab_page); + self.move_full_to_partial(slab_page); + } + } + + for slab_page in self.slabs.iter_mut() { + if slab_page.is_empty(self.obj_per_page) { + // We need to move it from self.slabs -> self.empty_slabs + trace!("move {:p} partial -> empty", slab_page); + self.move_to_empty(slab_page); + } + } + } + + /// Move a page from `slabs` to `empty_slabs`. + fn move_to_empty(&mut self, page: &'a mut P) { + let page_ptr = page as *const P; + + debug_assert!(self.slabs.contains(page_ptr)); + debug_assert!( + !self.empty_slabs.contains(page_ptr), + "Page {:p} already in emtpy_slabs", + page_ptr + ); + + self.slabs.remove_from_list(page); + self.empty_slabs.insert_front(page); + + debug_assert!(!self.slabs.contains(page_ptr)); + debug_assert!(self.empty_slabs.contains(page_ptr)); + } + + /// Move a page from `full_slabs` to `slab`. + fn move_partial_to_full(&mut self, page: &'a mut P) { + let page_ptr = page as *const P; + + debug_assert!(self.slabs.contains(page_ptr)); + debug_assert!(!self.full_slabs.contains(page_ptr)); + + self.slabs.remove_from_list(page); + self.full_slabs.insert_front(page); + + debug_assert!(!self.slabs.contains(page_ptr)); + debug_assert!(self.full_slabs.contains(page_ptr)); + } + + /// Move a page from `full_slabs` to `slab`. + fn move_full_to_partial(&mut self, page: &'a mut P) { + let page_ptr = page as *const P; + + debug_assert!(!self.slabs.contains(page_ptr)); + debug_assert!(self.full_slabs.contains(page_ptr)); + + self.full_slabs.remove_from_list(page); + self.slabs.insert_front(page); + + debug_assert!(self.slabs.contains(page_ptr)); + debug_assert!(!self.full_slabs.contains(page_ptr)); + } + + /// Tries to allocate a block of memory with respect to the `layout`. + /// Searches within already allocated slab pages, if no suitable spot is found + /// will try to use a page from the empty page list. + /// + /// # Arguments + /// * `sc_layout`: This is not the original layout but adjusted for the + /// SCAllocator size (>= original). + fn try_allocate_from_pagelist(&mut self, sc_layout: Layout) -> *mut u8 { + // TODO: Do we really need to check multiple slab pages (due to alignment) + // If not we can get away with a singly-linked list and have 8 more bytes + // for the bitfield in an ObjectPage. + + for slab_page in self.slabs.iter_mut() { + let ptr = slab_page.allocate(sc_layout); + if !ptr.is_null() { + if slab_page.is_full() { + trace!("move {:p} partial -> full", slab_page); + self.move_partial_to_full(slab_page); + } + self.allocation_count += 1; + return ptr; + } else { + continue; + } + } + + // Periodically rebalance page-lists (since dealloc can't do it for us) + if self.allocation_count > SCAllocator::

::REBALANCE_COUNT { + self.check_page_assignments(); + self.allocation_count = 0; + } + + ptr::null_mut() + } + + pub fn try_reclaim_pages(&mut self, to_reclaim: usize, dealloc: &mut F) -> usize + where + F: FnMut(*mut P), + { + self.check_page_assignments(); + let mut reclaimed = 0; + while reclaimed < to_reclaim { + if let Some(page) = self.empty_slabs.pop() { + dealloc(page as *mut P); + reclaimed += 1; + } else { + break; + } + } + + reclaimed + } + + /// Refill the SCAllocator + /// + /// # Safety + /// ObjectPage needs to be empty etc. + pub unsafe fn refill(&mut self, page: &'a mut P) { + page.bitfield_mut() + .initialize(self.size, P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD); + *page.prev() = Rawlink::none(); + *page.next() = Rawlink::none(); + trace!("adding page to SCAllocator {:p}", page); + self.insert_empty(page); + } + + /// Allocates a block of memory descriped by `layout`. + /// + /// Returns a pointer to a valid region of memory or an + /// AllocationError. + /// + /// The function may also move around pages between lists + /// (empty -> partial or partial -> full). + pub fn allocate(&mut self, layout: Layout) -> Result, AllocationError> { + trace!( + "SCAllocator({}) is trying to allocate {:?}", + self.size, + layout + ); + assert!(layout.size() <= self.size); + assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD)); + let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) }; + assert!(new_layout.size() >= layout.size()); + + let ptr = { + // Try to allocate from partial slabs, + // if we fail check if we have empty pages and allocate from there + let ptr = self.try_allocate_from_pagelist(new_layout); + if ptr.is_null() && self.empty_slabs.head.is_some() { + // Re-try allocation in empty page + let empty_page = self.empty_slabs.pop().expect("We checked head.is_some()"); + debug_assert!(!self.empty_slabs.contains(empty_page)); + + let ptr = empty_page.allocate(layout); + debug_assert!(!ptr.is_null(), "Allocation must have succeeded here."); + + trace!( + "move {:p} empty -> partial empty count {}", + empty_page, + self.empty_slabs.elements + ); + // Move empty page to partial pages + self.insert_partial_slab(empty_page); + ptr + } else { + ptr + } + }; + + let res = NonNull::new(ptr).ok_or(AllocationError::OutOfMemory); + + if !ptr.is_null() { + trace!( + "SCAllocator({}) allocated ptr=0x{:x}", + self.size, + ptr as usize + ); + } + + res + } + + /// Deallocates a previously allocated `ptr` described by `Layout`. + /// + /// May return an error in case an invalid `layout` is provided. + /// The function may also move internal slab pages between lists partial -> empty + /// or full -> partial lists. + pub fn deallocate(&self, ptr: NonNull, layout: Layout) -> Result<(), AllocationError> { + assert!(layout.size() <= self.size); + assert!(self.size <= (P::SIZE - OBJECT_PAGE_METADATA_OVERHEAD)); + trace!( + "SCAllocator({}) is trying to deallocate ptr = {:p} layout={:?} P.size= {}", + self.size, + ptr, + layout, + P::SIZE + ); + + let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1) as usize; + + // Figure out which page we are on and construct a reference to it + // TODO: The linked list will have another &mut reference + let slab_page = unsafe { mem::transmute::(page) }; + let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) }; + + let ret = slab_page.deallocate(ptr, new_layout); + debug_assert!(ret.is_ok(), "Slab page deallocate won't fail at the moment"); + ret + } +} diff --git a/kernel/crates/rust-slabmalloc/src/zone.rs b/kernel/crates/rust-slabmalloc/src/zone.rs new file mode 100644 index 000000000..c38d25302 --- /dev/null +++ b/kernel/crates/rust-slabmalloc/src/zone.rs @@ -0,0 +1,170 @@ +//! A ZoneAllocator to allocate arbitrary object sizes (up to `ZoneAllocator::MAX_ALLOC_SIZE`) +//! +//! The ZoneAllocator achieves this by having many `SCAllocator` + +use crate::*; + +/// Creates an instance of a zone, we do this in a macro because we +/// re-use the code in const and non-const functions +/// +/// We can get rid of this once the const fn feature is fully stabilized. +macro_rules! new_zone { + () => { + ZoneAllocator { + // TODO(perf): We should probably pick better classes + // rather than powers-of-two (see SuperMalloc etc.) + small_slabs: [ + SCAllocator::new(1 << 3), // 8 + SCAllocator::new(1 << 4), // 16 + SCAllocator::new(1 << 5), // 32 + SCAllocator::new(1 << 6), // 64 + SCAllocator::new(1 << 7), // 128 + SCAllocator::new(1 << 8), // 256 + SCAllocator::new(1 << 9), // 512 + SCAllocator::new(1 << 10), // 1024 + SCAllocator::new(1 << 11), // 2048 ], + ], + } + }; +} + +/// A zone allocator for arbitrary sized allocations. +/// +/// Has a bunch of `SCAllocator` and through that can serve allocation +/// requests for many different object sizes up to (MAX_SIZE_CLASSES) by selecting +/// the right `SCAllocator` for allocation and deallocation. +/// +/// The allocator provides to refill functions `refill` and `refill_large` +/// to provide the underlying `SCAllocator` with more memory in case it runs out. +pub struct ZoneAllocator<'a> { + small_slabs: [SCAllocator<'a, ObjectPage<'a>>; ZoneAllocator::MAX_BASE_SIZE_CLASSES], +} + +impl<'a> Default for ZoneAllocator<'a> { + fn default() -> ZoneAllocator<'a> { + new_zone!() + } +} + +enum Slab { + Base(usize), + Unsupported, +} + +impl<'a> ZoneAllocator<'a> { + /// Maximum size that allocated within LargeObjectPages (2 MiB). + /// This is also the maximum object size that this allocator can handle. + pub const MAX_ALLOC_SIZE: usize = 1 << 11; + + /// Maximum size which is allocated with ObjectPages (4 KiB pages). + /// + /// e.g. this is 4 KiB - 80 bytes of meta-data. + pub const MAX_BASE_ALLOC_SIZE: usize = 256; + + /// How many allocators of type SCAllocator we have. + const MAX_BASE_SIZE_CLASSES: usize = 9; + + #[cfg(feature = "unstable")] + pub const fn new() -> ZoneAllocator<'a> { + new_zone!() + } + + #[cfg(not(feature = "unstable"))] + pub fn new() -> ZoneAllocator<'a> { + new_zone!() + } + + /// Return maximum size an object of size `current_size` can use. + /// + /// Used to optimize `realloc`. + pub fn get_max_size(current_size: usize) -> Option { + match current_size { + 0..=8 => Some(8), + 9..=16 => Some(16), + 17..=32 => Some(32), + 33..=64 => Some(64), + 65..=128 => Some(128), + 129..=256 => Some(256), + 257..=512 => Some(512), + 513..=1024 => Some(1024), + 1025..=2048 => Some(2048), + _ => None, + } + } + + /// Figure out index into zone array to get the correct slab allocator for that size. + fn get_slab(requested_size: usize) -> Slab { + match requested_size { + 0..=8 => Slab::Base(0), + 9..=16 => Slab::Base(1), + 17..=32 => Slab::Base(2), + 33..=64 => Slab::Base(3), + 65..=128 => Slab::Base(4), + 129..=256 => Slab::Base(5), + 257..=512 => Slab::Base(6), + 513..=1024 => Slab::Base(7), + 1025..=2048 => Slab::Base(8), + _ => Slab::Unsupported, + } + } + + /// Reclaims empty pages by calling `dealloc` on it and removing it from the + /// empty lists in the [`SCAllocator`]. + /// + /// The `dealloc` function is called at most `reclaim_base_max` times for + /// base pages, and at most `reclaim_large_max` for large pages. + pub fn try_reclaim_base_pages(&mut self, mut to_reclaim: usize, mut dealloc: F) + where + F: Fn(*mut ObjectPage), + { + for i in 0..ZoneAllocator::MAX_BASE_SIZE_CLASSES { + let slab = &mut self.small_slabs[i]; + let just_reclaimed = slab.try_reclaim_pages(to_reclaim, &mut dealloc); + to_reclaim = to_reclaim.saturating_sub(just_reclaimed); + if to_reclaim == 0 { + break; + } + } + } +} + +unsafe impl<'a> crate::Allocator<'a> for ZoneAllocator<'a> { + /// Allocate a pointer to a block of memory described by `layout`. + fn allocate(&mut self, layout: Layout) -> Result, AllocationError> { + match ZoneAllocator::get_slab(layout.size()) { + Slab::Base(idx) => self.small_slabs[idx].allocate(layout), + Slab::Unsupported => Err(AllocationError::InvalidLayout), + } + } + + /// Deallocates a pointer to a block of memory, which was + /// previously allocated by `allocate`. + /// + /// # Arguments + /// * `ptr` - Address of the memory location to free. + /// * `layout` - Memory layout of the block pointed to by `ptr`. + fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Result<(), AllocationError> { + match ZoneAllocator::get_slab(layout.size()) { + Slab::Base(idx) => self.small_slabs[idx].deallocate(ptr, layout), + Slab::Unsupported => Err(AllocationError::InvalidLayout), + } + } + + /// Refills the SCAllocator for a given Layout with an ObjectPage. + /// + /// # Safety + /// ObjectPage needs to be emtpy etc. + unsafe fn refill( + &mut self, + layout: Layout, + new_page: &'a mut ObjectPage<'a>, + ) -> Result<(), AllocationError> { + match ZoneAllocator::get_slab(layout.size()) { + Slab::Base(idx) => { + self.small_slabs[idx].refill(new_page); + Ok(()) + } + Slab::Unsupported => Err(AllocationError::InvalidLayout), + } + } +} diff --git a/kernel/src/mm/allocator/kernel_allocator.rs b/kernel/src/mm/allocator/kernel_allocator.rs index ac64d0fda..3e4b9e65b 100644 --- a/kernel/src/mm/allocator/kernel_allocator.rs +++ b/kernel/src/mm/allocator/kernel_allocator.rs @@ -13,7 +13,10 @@ use core::{ ptr::NonNull, }; -use super::page_frame::{FrameAllocator, PageFrameCount}; +use super::{ + page_frame::{FrameAllocator, PageFrameCount}, + slab::{slab_init_state, SLABALLOCATOR}, +}; /// 类kmalloc的分配器应当实现的trait pub trait LocalAlloc { @@ -59,64 +62,133 @@ impl KernelAllocator { /// 为内核分配器实现LocalAlloc的trait impl LocalAlloc for KernelAllocator { unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 { - return self - .alloc_in_buddy(layout) - .map(|x| x.as_mut_ptr()) - .unwrap_or(core::ptr::null_mut()); + if layout.size() > 2048 || slab_init_state() == false { + return self + .alloc_in_buddy(layout) + .map(|x| x.as_mut_ptr() as *mut u8) + .unwrap_or(core::ptr::null_mut() as *mut u8); + } else { + if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + return slab.allocate(layout); + }; + return core::ptr::null_mut() as *mut u8; + } } unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 { - return self - .alloc_in_buddy(layout) - .map(|x| { - let ptr: *mut u8 = x.as_mut_ptr(); - core::ptr::write_bytes(ptr, 0, x.len()); - ptr - }) - .unwrap_or(core::ptr::null_mut()); + if layout.size() > 2048 || slab_init_state() == false { + return self + .alloc_in_buddy(layout) + .map(|x| { + let ptr: *mut u8 = x.as_mut_ptr(); + core::ptr::write_bytes(ptr, 0, x.len()); + ptr + }) + .unwrap_or(core::ptr::null_mut() as *mut u8); + } else { + if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + return slab.allocate(layout); + }; + return core::ptr::null_mut() as *mut u8; + } } unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) { - self.free_in_buddy(ptr, layout); + // self.free_in_buddy(ptr, layout) + if layout.size() > 2048 || slab_init_state() == false { + self.free_in_buddy(ptr, layout) + } else { + if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + slab.deallocate(ptr, layout).unwrap() + } + } } } /// 为内核slab分配器实现GlobalAlloc特性 unsafe impl GlobalAlloc for KernelAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let r = self.local_alloc_zeroed(layout); - mm_debug_log( - klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(r as usize), None)), - klog_types::LogSource::Buddy, - ); - - return r; - - // self.local_alloc_zeroed(layout, 0) + if layout.size() > 2048 || slab_init_state() == false { + let r = self.local_alloc_zeroed(layout); + mm_debug_log( + klog_types::AllocatorLogType::Alloc(AllocLogItem::new( + layout.clone(), + Some(r as usize), + None, + )), + klog_types::LogSource::Buddy, + ); + + return r; + } else { + let r = self.local_alloc_zeroed(layout); + mm_debug_log( + klog_types::AllocatorLogType::Alloc(AllocLogItem::new( + layout.clone(), + Some(r as usize), + None, + )), + klog_types::LogSource::Slab, + ); + + return r; + } } unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let r = self.local_alloc_zeroed(layout); - - mm_debug_log( - klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( - layout, - Some(r as usize), - None, - )), - klog_types::LogSource::Buddy, - ); - - return r; + if layout.size() > 2048 || slab_init_state() == false { + let r = self.local_alloc_zeroed(layout); + + mm_debug_log( + klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( + layout.clone(), + Some(r as usize), + None, + )), + klog_types::LogSource::Buddy, + ); + + return r; + } else { + let r = self.local_alloc_zeroed(layout); + + mm_debug_log( + klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( + layout.clone(), + Some(r as usize), + None, + )), + klog_types::LogSource::Slab, + ); + + return r; + } } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - mm_debug_log( - klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)), - klog_types::LogSource::Buddy, - ); - - self.local_dealloc(ptr, layout); + if layout.size() > 2048 || slab_init_state() == false { + mm_debug_log( + klog_types::AllocatorLogType::Free(AllocLogItem::new( + layout.clone(), + Some(ptr as usize), + None, + )), + klog_types::LogSource::Buddy, + ); + + self.local_dealloc(ptr, layout); + } else { + mm_debug_log( + klog_types::AllocatorLogType::Free(AllocLogItem::new( + layout.clone(), + Some(ptr as usize), + None, + )), + klog_types::LogSource::Slab, + ); + + self.local_dealloc(ptr, layout); + } } } diff --git a/kernel/src/mm/allocator/slab.rs b/kernel/src/mm/allocator/slab.rs index 5adea79a9..9754f02c0 100644 --- a/kernel/src/mm/allocator/slab.rs +++ b/kernel/src/mm/allocator/slab.rs @@ -1,123 +1,79 @@ //! 当前slab分配器暂时不使用,等待后续完善后合并主线 #![allow(dead_code)] -use core::alloc::Layout; +use core::{alloc::Layout, ptr::NonNull}; -// 定义Slab,用来存放空闲块 -pub struct Slab { - block_size: usize, - free_block_list: FreeBlockList, -} +use alloc::boxed::Box; +use slabmalloc::*; -impl Slab { - /// @brief: 初始化一个slab - /// @param {usize} start_addr - /// @param {usize} slab_size - /// @param {usize} block_size - pub unsafe fn new(start_addr: usize, slab_size: usize, block_size: usize) -> Slab { - let blocks_num = slab_size / block_size; - return Slab { - block_size, - free_block_list: FreeBlockList::new(start_addr, block_size, blocks_num), - }; - } +use crate::libs::spinlock::SpinLock; - /// @brief: 获取slab中可用的block数 - pub fn used_blocks(&self) -> usize { - return self.free_block_list.len(); - } +// 全局slab分配器 +pub(crate) static SLABALLOCATOR: SpinLock> = SpinLock::new(None); - /// @brief: 扩大free_block_list - /// @param {*} mut - /// @param {usize} start_addr - /// @param {usize} slab_size - pub fn grow(&mut self, start_addr: usize, slab_size: usize) { - let num_of_blocks = slab_size / self.block_size; - let mut block_list = - unsafe { FreeBlockList::new(start_addr, self.block_size, num_of_blocks) }; - // 将新链表接到原链表的后面 - while let Some(block) = block_list.pop() { - self.free_block_list.push(block); - } - } - /// @brief: 从slab中分配一个block - /// @return 分配的内存地址 - pub fn allocate(&mut self, _layout: Layout) -> Option<*mut u8> { - match self.free_block_list.pop() { - Some(block) => return Some(block.addr() as *mut u8), - None => return None, - } - } - /// @brief: 将block归还给slab - pub fn free(&mut self, ptr: *mut u8) { - let ptr = ptr as *mut FreeBlock; - unsafe { - self.free_block_list.push(&mut *ptr); - } - } -} -/// slab中的空闲块 -struct FreeBlockList { - len: usize, - head: Option<&'static mut FreeBlock>, +// slab初始化状态 +pub(crate) static mut SLABINITSTATE: bool = false; + +/// slab分配器,实际为一堆小的allocator,可以在里面装4K的page +/// 利用这些allocator可以为对象分配不同大小的空间 +pub(crate) struct SlabAllocator { + zone: ZoneAllocator<'static>, } -impl FreeBlockList { - unsafe fn new(start_addr: usize, block_size: usize, num_of_blocks: usize) -> FreeBlockList { - let mut new_list = FreeBlockList::new_empty(); - for i in (0..num_of_blocks).rev() { - // 从后往前分配,避免内存碎片 - let new_block = (start_addr + i * block_size) as *mut FreeBlock; - new_list.push(&mut *new_block); +impl SlabAllocator { + /// 创建slab分配器 + pub fn new() -> SlabAllocator { + kdebug!("trying to new a slab_allocator"); + SlabAllocator { + zone: ZoneAllocator::new(), } - return new_list; } - fn new_empty() -> FreeBlockList { - return FreeBlockList { len: 0, head: None }; - } - - fn len(&self) -> usize { - return self.len; - } - - /// @brief: 将空闲块从链表中弹出 - fn pop(&mut self) -> Option<&'static mut FreeBlock> { - // 从链表中弹出一个空闲块 - let block = self.head.take().map(|node| { - self.head = node.next.take(); - self.len -= 1; - node - }); - return block; - } - - /// @brief: 将空闲块压入链表 - fn push(&mut self, free_block: &'static mut FreeBlock) { - free_block.next = self.head.take(); - self.len += 1; - self.head = Some(free_block); - } - - fn is_empty(&self) -> bool { - return self.head.is_none(); + /// 为对象(2K以内)分配内存空间 + pub(crate) unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 { + match self.zone.allocate(layout) { + Ok(nptr) => nptr.as_ptr(), + Err(AllocationError::OutOfMemory) => { + let page = ObjectPage::new(); + let boxed_page = Box::new(page); + let leaked_page = Box::leak(boxed_page); + self.zone + .refill(layout, leaked_page) + .expect("Could not refill?"); + self.zone + .allocate(layout) + .expect("Should succeed after refill") + .as_ptr() + } + Err(AllocationError::InvalidLayout) => panic!("Can't allocate this size"), + } } -} -impl Drop for FreeBlockList { - fn drop(&mut self) { - while self.pop().is_some() {} + /// 释放内存空间 + pub(crate) unsafe fn deallocate( + &mut self, + ptr: *mut u8, + layout: Layout, + ) -> Result<(), AllocationError> { + if let Some(nptr) = NonNull::new(ptr) { + self.zone + .deallocate(nptr, layout) + .expect("Couldn't deallocate"); + return Ok(()); + } else { + return Ok(()); + } } } -struct FreeBlock { - next: Option<&'static mut FreeBlock>, +/// 初始化slab分配器 +pub unsafe fn slab_init() { + kdebug!("trying to init a slab_allocator"); + *SLABALLOCATOR.lock() = Some(SlabAllocator::new()); + SLABINITSTATE = true; } -impl FreeBlock { - /// @brief: 获取FreeBlock的地址 - /// @return {*} - fn addr(&self) -> usize { - return self as *const _ as usize; - } +// 查看slab初始化状态 +pub fn slab_init_state() -> bool { + unsafe { SLABINITSTATE } } diff --git a/kernel/src/mm/init.rs b/kernel/src/mm/init.rs index 515c2b35f..290124092 100644 --- a/kernel/src/mm/init.rs +++ b/kernel/src/mm/init.rs @@ -5,7 +5,7 @@ use crate::{ driver::serial::serial8250::send_to_default_serial8250_port, filesystem::procfs::kmsg::kmsg_init, libs::printk::PrintkWriter, - mm::{mmio_buddy::mmio_init, page::page_manager_init}, + mm::{allocator::slab::slab_init, mmio_buddy::mmio_init, page::page_manager_init}, }; use super::MemoryManagementArch; @@ -43,6 +43,9 @@ pub unsafe fn mm_init() { MMArch::init(); + // init slab + slab_init(); + // enable mmio mmio_init(); // enable KMSG From c5153b961a7af2feb233e76193b161f08b4204fc Mon Sep 17 00:00:00 2001 From: zhou <2628735358@qq.com> Date: Tue, 2 Apr 2024 17:53:35 +0800 Subject: [PATCH 2/7] slab --- kernel/crates/rust-slabmalloc/src/sc.rs | 2 +- kernel/src/mm/allocator/kernel_allocator.rs | 40 ++++++++++----------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/kernel/crates/rust-slabmalloc/src/sc.rs b/kernel/crates/rust-slabmalloc/src/sc.rs index 1a6de9660..fd34eada5 100644 --- a/kernel/crates/rust-slabmalloc/src/sc.rs +++ b/kernel/crates/rust-slabmalloc/src/sc.rs @@ -315,7 +315,7 @@ impl<'a, P: AllocablePage> SCAllocator<'a, P> { P::SIZE ); - let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1) as usize; + let page = (ptr.as_ptr() as usize) & !(P::SIZE - 1); // Figure out which page we are on and construct a reference to it // TODO: The linked list will have another &mut reference diff --git a/kernel/src/mm/allocator/kernel_allocator.rs b/kernel/src/mm/allocator/kernel_allocator.rs index 3e4b9e65b..f9f865dde 100644 --- a/kernel/src/mm/allocator/kernel_allocator.rs +++ b/kernel/src/mm/allocator/kernel_allocator.rs @@ -62,21 +62,21 @@ impl KernelAllocator { /// 为内核分配器实现LocalAlloc的trait impl LocalAlloc for KernelAllocator { unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { return self .alloc_in_buddy(layout) - .map(|x| x.as_mut_ptr() as *mut u8) - .unwrap_or(core::ptr::null_mut() as *mut u8); + .map(|x| x.as_mut_ptr()) + .unwrap_or(core::ptr::null_mut()); } else { if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { return slab.allocate(layout); }; - return core::ptr::null_mut() as *mut u8; + return core::ptr::null_mut(); } } unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { return self .alloc_in_buddy(layout) .map(|x| { @@ -84,23 +84,21 @@ impl LocalAlloc for KernelAllocator { core::ptr::write_bytes(ptr, 0, x.len()); ptr }) - .unwrap_or(core::ptr::null_mut() as *mut u8); + .unwrap_or(core::ptr::null_mut()); } else { if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { return slab.allocate(layout); }; - return core::ptr::null_mut() as *mut u8; + return core::ptr::null_mut(); } } unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) { // self.free_in_buddy(ptr, layout) - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { self.free_in_buddy(ptr, layout) - } else { - if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { - slab.deallocate(ptr, layout).unwrap() - } + } else if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + slab.deallocate(ptr, layout).unwrap() } } } @@ -108,11 +106,11 @@ impl LocalAlloc for KernelAllocator { /// 为内核slab分配器实现GlobalAlloc特性 unsafe impl GlobalAlloc for KernelAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { let r = self.local_alloc_zeroed(layout); mm_debug_log( klog_types::AllocatorLogType::Alloc(AllocLogItem::new( - layout.clone(), + layout, Some(r as usize), None, )), @@ -124,7 +122,7 @@ unsafe impl GlobalAlloc for KernelAllocator { let r = self.local_alloc_zeroed(layout); mm_debug_log( klog_types::AllocatorLogType::Alloc(AllocLogItem::new( - layout.clone(), + layout, Some(r as usize), None, )), @@ -136,12 +134,12 @@ unsafe impl GlobalAlloc for KernelAllocator { } unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { let r = self.local_alloc_zeroed(layout); mm_debug_log( klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( - layout.clone(), + layout, Some(r as usize), None, )), @@ -154,7 +152,7 @@ unsafe impl GlobalAlloc for KernelAllocator { mm_debug_log( klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( - layout.clone(), + layout, Some(r as usize), None, )), @@ -166,10 +164,10 @@ unsafe impl GlobalAlloc for KernelAllocator { } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.size() > 2048 || slab_init_state() == false { + if layout.size() > 2048 || !slab_init_state() { mm_debug_log( klog_types::AllocatorLogType::Free(AllocLogItem::new( - layout.clone(), + layout, Some(ptr as usize), None, )), @@ -180,7 +178,7 @@ unsafe impl GlobalAlloc for KernelAllocator { } else { mm_debug_log( klog_types::AllocatorLogType::Free(AllocLogItem::new( - layout.clone(), + layout, Some(ptr as usize), None, )), From f31eb2f6a2c591a3d7bec18ff8432a65b24530b0 Mon Sep 17 00:00:00 2001 From: zhou <2628735358@qq.com> Date: Fri, 5 Apr 2024 16:11:28 +0800 Subject: [PATCH 3/7] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86=E9=83=A8?= =?UTF-8?q?=E5=88=86=E5=86=97=E4=BD=99=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kernel/src/mm/allocator/kernel_allocator.rs | 113 +++++++------------- kernel/src/mm/allocator/slab.rs | 14 ++- 2 files changed, 44 insertions(+), 83 deletions(-) diff --git a/kernel/src/mm/allocator/kernel_allocator.rs b/kernel/src/mm/allocator/kernel_allocator.rs index f9f865dde..47b642899 100644 --- a/kernel/src/mm/allocator/kernel_allocator.rs +++ b/kernel/src/mm/allocator/kernel_allocator.rs @@ -1,4 +1,4 @@ -use klog_types::AllocLogItem; +use klog_types::{AllocLogItem, LogSource}; use crate::{ arch::mm::LockedFrameAllocator, @@ -62,13 +62,13 @@ impl KernelAllocator { /// 为内核分配器实现LocalAlloc的trait impl LocalAlloc for KernelAllocator { unsafe fn local_alloc(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || !slab_init_state() { + if allocator_select_condition(layout) { return self .alloc_in_buddy(layout) .map(|x| x.as_mut_ptr()) .unwrap_or(core::ptr::null_mut()); } else { - if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + if let Some(ref mut slab) = SLABALLOCATOR { return slab.allocate(layout); }; return core::ptr::null_mut(); @@ -76,7 +76,7 @@ impl LocalAlloc for KernelAllocator { } unsafe fn local_alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || !slab_init_state() { + if allocator_select_condition(layout) { return self .alloc_in_buddy(layout) .map(|x| { @@ -86,7 +86,7 @@ impl LocalAlloc for KernelAllocator { }) .unwrap_or(core::ptr::null_mut()); } else { - if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + if let Some(ref mut slab) = SLABALLOCATOR { return slab.allocate(layout); }; return core::ptr::null_mut(); @@ -97,7 +97,7 @@ impl LocalAlloc for KernelAllocator { // self.free_in_buddy(ptr, layout) if layout.size() > 2048 || !slab_init_state() { self.free_in_buddy(ptr, layout) - } else if let Some(ref mut slab) = *SLABALLOCATOR.lock_irqsave() { + } else if let Some(ref mut slab) = SLABALLOCATOR { slab.deallocate(ptr, layout).unwrap() } } @@ -106,90 +106,53 @@ impl LocalAlloc for KernelAllocator { /// 为内核slab分配器实现GlobalAlloc特性 unsafe impl GlobalAlloc for KernelAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || !slab_init_state() { - let r = self.local_alloc_zeroed(layout); - mm_debug_log( - klog_types::AllocatorLogType::Alloc(AllocLogItem::new( - layout, - Some(r as usize), - None, - )), - klog_types::LogSource::Buddy, - ); - - return r; + let r = self.local_alloc_zeroed(layout); + if allocator_select_condition(layout) { + alloc_debug_log(klog_types::LogSource::Buddy, layout, r); } else { - let r = self.local_alloc_zeroed(layout); - mm_debug_log( - klog_types::AllocatorLogType::Alloc(AllocLogItem::new( - layout, - Some(r as usize), - None, - )), - klog_types::LogSource::Slab, - ); - - return r; + alloc_debug_log(klog_types::LogSource::Slab, layout, r); } + return r; } unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if layout.size() > 2048 || !slab_init_state() { - let r = self.local_alloc_zeroed(layout); - - mm_debug_log( - klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( - layout, - Some(r as usize), - None, - )), - klog_types::LogSource::Buddy, - ); - - return r; + let r = self.local_alloc_zeroed(layout); + if allocator_select_condition(layout) { + alloc_debug_log(klog_types::LogSource::Buddy, layout, r); } else { - let r = self.local_alloc_zeroed(layout); - - mm_debug_log( - klog_types::AllocatorLogType::AllocZeroed(AllocLogItem::new( - layout, - Some(r as usize), - None, - )), - klog_types::LogSource::Slab, - ); - - return r; + alloc_debug_log(klog_types::LogSource::Slab, layout, r); } + return r; } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.size() > 2048 || !slab_init_state() { - mm_debug_log( - klog_types::AllocatorLogType::Free(AllocLogItem::new( - layout, - Some(ptr as usize), - None, - )), - klog_types::LogSource::Buddy, - ); - - self.local_dealloc(ptr, layout); + if allocator_select_condition(layout) { + dealloc_debug_log(klog_types::LogSource::Buddy, layout, ptr); } else { - mm_debug_log( - klog_types::AllocatorLogType::Free(AllocLogItem::new( - layout, - Some(ptr as usize), - None, - )), - klog_types::LogSource::Slab, - ); - - self.local_dealloc(ptr, layout); + dealloc_debug_log(klog_types::LogSource::Slab, layout, ptr); } + self.local_dealloc(ptr, layout); } } +fn allocator_select_condition(layout: Layout) -> bool { + layout.size() > 2048 || !slab_init_state() +} + +fn alloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) { + mm_debug_log( + klog_types::AllocatorLogType::Alloc(AllocLogItem::new(layout, Some(ptr as usize), None)), + source, + ) +} + +fn dealloc_debug_log(source: LogSource, layout: Layout, ptr: *mut u8) { + mm_debug_log( + klog_types::AllocatorLogType::Free(AllocLogItem::new(layout, Some(ptr as usize), None)), + source, + ) +} + /// 为内核slab分配器实现Allocator特性 // unsafe impl Allocator for KernelAllocator { // fn allocate(&self, layout: Layout) -> Result, AllocError> { diff --git a/kernel/src/mm/allocator/slab.rs b/kernel/src/mm/allocator/slab.rs index 9754f02c0..c80c222c9 100644 --- a/kernel/src/mm/allocator/slab.rs +++ b/kernel/src/mm/allocator/slab.rs @@ -1,18 +1,16 @@ //! 当前slab分配器暂时不使用,等待后续完善后合并主线 #![allow(dead_code)] -use core::{alloc::Layout, ptr::NonNull}; +use core::{alloc::Layout, ptr::NonNull, sync::atomic::AtomicBool}; use alloc::boxed::Box; use slabmalloc::*; -use crate::libs::spinlock::SpinLock; - // 全局slab分配器 -pub(crate) static SLABALLOCATOR: SpinLock> = SpinLock::new(None); +pub(crate) static mut SLABALLOCATOR: Option = None; // slab初始化状态 -pub(crate) static mut SLABINITSTATE: bool = false; +pub(crate) static mut SLABINITSTATE: AtomicBool = AtomicBool::new(false); /// slab分配器,实际为一堆小的allocator,可以在里面装4K的page /// 利用这些allocator可以为对象分配不同大小的空间 @@ -69,11 +67,11 @@ impl SlabAllocator { /// 初始化slab分配器 pub unsafe fn slab_init() { kdebug!("trying to init a slab_allocator"); - *SLABALLOCATOR.lock() = Some(SlabAllocator::new()); - SLABINITSTATE = true; + SLABALLOCATOR = Some(SlabAllocator::new()); + SLABINITSTATE = true.into(); } // 查看slab初始化状态 pub fn slab_init_state() -> bool { - unsafe { SLABINITSTATE } + unsafe { *SLABINITSTATE.get_mut() } } From e7573296511c8857206e1e6541874bbb07ac16e4 Mon Sep 17 00:00:00 2001 From: zhou <2628735358@qq.com> Date: Sun, 14 Apr 2024 22:02:28 +0800 Subject: [PATCH 4/7] =?UTF-8?q?=E6=9B=B4=E6=96=B0slab=E5=88=86=E9=85=8D?= =?UTF-8?q?=E5=99=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kernel/crates/rust-slabmalloc/src/lib.rs | 4 +++ kernel/crates/rust-slabmalloc/src/pages.rs | 38 ++++++++++++++++----- kernel/src/mm/allocator/kernel_allocator.rs | 6 ++-- kernel/src/mm/allocator/slab.rs | 3 +- 4 files changed, 38 insertions(+), 13 deletions(-) diff --git a/kernel/crates/rust-slabmalloc/src/lib.rs b/kernel/crates/rust-slabmalloc/src/lib.rs index 1930b958e..819fc854c 100644 --- a/kernel/crates/rust-slabmalloc/src/lib.rs +++ b/kernel/crates/rust-slabmalloc/src/lib.rs @@ -22,6 +22,10 @@ #![no_std] #![crate_name = "slabmalloc"] #![crate_type = "lib"] +#![feature(new_uninit)] +#![feature(maybe_uninit_as_bytes)] + +extern crate alloc; mod pages; mod sc; diff --git a/kernel/crates/rust-slabmalloc/src/pages.rs b/kernel/crates/rust-slabmalloc/src/pages.rs index 61089ea72..ce854dbc0 100644 --- a/kernel/crates/rust-slabmalloc/src/pages.rs +++ b/kernel/crates/rust-slabmalloc/src/pages.rs @@ -1,3 +1,5 @@ +use alloc::boxed::Box; + use crate::*; use core::{ mem, @@ -57,6 +59,9 @@ impl Bitfield for [AtomicU64] { layout: Layout, page_size: usize, ) -> Option<(usize, usize)> { + let start_offset = get_offset_for_align(layout); + let data_start = base_addr + start_offset; + for (base_idx, b) in self.iter().enumerate() { let bitval = b.load(Ordering::Relaxed); if bitval == u64::max_value() { @@ -74,7 +79,7 @@ impl Bitfield for [AtomicU64] { return None; } - let addr: usize = base_addr + offset; + let addr: usize = data_start + offset; let alignment_ok = addr % layout.align() == 0; let block_is_free = bitval & (1 << first_free) == 0; if alignment_ok && block_is_free { @@ -152,6 +157,22 @@ impl Bitfield for [AtomicU64] { } } +fn get_offset_for_align(layout: Layout) -> usize { + let offset = match layout.size() { + 0..=8 => 80, + 9..=16 => 80, + 17..=32 => 96, + 33..=64 => 128, + 65..=128 => 128, + 129..=256 => 256, + 257..=512 => 512, + 513..=1024 => 1024, + 1025..=2048 => 2048, + _ => panic!(), + }; + offset +} + /// This trait is used to define a page from which objects are allocated /// in an `SCAllocator`. /// @@ -211,7 +232,8 @@ pub trait AllocablePage { ptr, layout ); - let page_offset = (ptr.as_ptr() as usize) & (Self::SIZE - 1); + let align_offset = get_offset_for_align(layout); + let page_offset = ((ptr.as_ptr() as usize) - align_offset) & (Self::SIZE - 1); assert!(page_offset % layout.size() == 0); let idx = page_offset / layout.size(); assert!( @@ -237,22 +259,22 @@ pub trait AllocablePage { /// members (e.g., dealloc does a cast to find the bitfield). #[repr(C)] pub struct ObjectPage<'a> { - /// Holds memory objects. #[allow(dead_code)] - data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD], + /// A bit-field to track free/allocated memory within `data`. + pub(crate) bitfield: [AtomicU64; 8], /// Next element in list (used by `PageList`). next: Rawlink>, /// Previous element in list (used by `PageList`) prev: Rawlink>, - /// A bit-field to track free/allocated memory within `data`. - pub(crate) bitfield: [AtomicU64; 8], + /// Holds memory objects. + data: [u8; OBJECT_PAGE_SIZE - OBJECT_PAGE_METADATA_OVERHEAD], } impl<'a> ObjectPage<'a> { - pub fn new() -> ObjectPage<'a> { - unsafe { mem::MaybeUninit::zeroed().assume_init() } + pub fn new() -> Box> { + unsafe { Box::new_uninit().assume_init() } } } diff --git a/kernel/src/mm/allocator/kernel_allocator.rs b/kernel/src/mm/allocator/kernel_allocator.rs index 47b642899..9e55a4ab5 100644 --- a/kernel/src/mm/allocator/kernel_allocator.rs +++ b/kernel/src/mm/allocator/kernel_allocator.rs @@ -94,8 +94,7 @@ impl LocalAlloc for KernelAllocator { } unsafe fn local_dealloc(&self, ptr: *mut u8, layout: Layout) { - // self.free_in_buddy(ptr, layout) - if layout.size() > 2048 || !slab_init_state() { + if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 { self.free_in_buddy(ptr, layout) } else if let Some(ref mut slab) = SLABALLOCATOR { slab.deallocate(ptr, layout).unwrap() @@ -126,7 +125,7 @@ unsafe impl GlobalAlloc for KernelAllocator { } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if allocator_select_condition(layout) { + if allocator_select_condition(layout) || ((ptr as usize) % 4096) == 0 { dealloc_debug_log(klog_types::LogSource::Buddy, layout, ptr); } else { dealloc_debug_log(klog_types::LogSource::Slab, layout, ptr); @@ -135,6 +134,7 @@ unsafe impl GlobalAlloc for KernelAllocator { } } +/// 判断选择buddy分配器还是slab分配器 fn allocator_select_condition(layout: Layout) -> bool { layout.size() > 2048 || !slab_init_state() } diff --git a/kernel/src/mm/allocator/slab.rs b/kernel/src/mm/allocator/slab.rs index c80c222c9..d7e4efb6f 100644 --- a/kernel/src/mm/allocator/slab.rs +++ b/kernel/src/mm/allocator/slab.rs @@ -32,8 +32,7 @@ impl SlabAllocator { match self.zone.allocate(layout) { Ok(nptr) => nptr.as_ptr(), Err(AllocationError::OutOfMemory) => { - let page = ObjectPage::new(); - let boxed_page = Box::new(page); + let boxed_page = ObjectPage::new(); let leaked_page = Box::leak(boxed_page); self.zone .refill(layout, leaked_page) From 962be53201ac9bc95cc2088711c59bf84d814704 Mon Sep 17 00:00:00 2001 From: zhou <2628735358@qq.com> Date: Sun, 14 Apr 2024 22:05:08 +0800 Subject: [PATCH 5/7] =?UTF-8?q?make=20fmt=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kernel/crates/rust-slabmalloc/src/pages.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kernel/crates/rust-slabmalloc/src/pages.rs b/kernel/crates/rust-slabmalloc/src/pages.rs index ce854dbc0..16bd596ce 100644 --- a/kernel/crates/rust-slabmalloc/src/pages.rs +++ b/kernel/crates/rust-slabmalloc/src/pages.rs @@ -158,7 +158,7 @@ impl Bitfield for [AtomicU64] { } fn get_offset_for_align(layout: Layout) -> usize { - let offset = match layout.size() { + match layout.size() { 0..=8 => 80, 9..=16 => 80, 17..=32 => 96, @@ -169,8 +169,7 @@ fn get_offset_for_align(layout: Layout) -> usize { 513..=1024 => 1024, 1025..=2048 => 2048, _ => panic!(), - }; - offset + } } /// This trait is used to define a page from which objects are allocated From f1b3cdff69f99ca65d30b849e137265c9ad45542 Mon Sep 17 00:00:00 2001 From: longjin Date: Mon, 15 Apr 2024 04:48:54 +0000 Subject: [PATCH 6/7] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- kernel/crates/rust-slabmalloc/src/pages.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kernel/crates/rust-slabmalloc/src/pages.rs b/kernel/crates/rust-slabmalloc/src/pages.rs index 16bd596ce..eac68f561 100644 --- a/kernel/crates/rust-slabmalloc/src/pages.rs +++ b/kernel/crates/rust-slabmalloc/src/pages.rs @@ -157,6 +157,17 @@ impl Bitfield for [AtomicU64] { } } +/// # get_offset_for_align - 根据布局大小获取page内对齐偏移量 +/// +/// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。 +/// +/// ## 参数 +/// +/// - layout: Layout,这是需要计算对齐偏移量的布局参数。 +/// +/// ## 返回值 +/// +/// - usize: 成功时返回一个usize类型的对齐偏移量。 fn get_offset_for_align(layout: Layout) -> usize { match layout.size() { 0..=8 => 80, From 16472ced81da8aee1aa59937f275548f1c4c0184 Mon Sep 17 00:00:00 2001 From: longjin Date: Mon, 15 Apr 2024 04:50:51 +0000 Subject: [PATCH 7/7] fmt --- kernel/crates/rust-slabmalloc/src/pages.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/crates/rust-slabmalloc/src/pages.rs b/kernel/crates/rust-slabmalloc/src/pages.rs index eac68f561..ba667148f 100644 --- a/kernel/crates/rust-slabmalloc/src/pages.rs +++ b/kernel/crates/rust-slabmalloc/src/pages.rs @@ -158,15 +158,15 @@ impl Bitfield for [AtomicU64] { } /// # get_offset_for_align - 根据布局大小获取page内对齐偏移量 -/// +/// /// 这个函数根据给定的`Layout`大小确定一个合适的对齐偏移量。 -/// +/// /// ## 参数 -/// +/// /// - layout: Layout,这是需要计算对齐偏移量的布局参数。 -/// +/// /// ## 返回值 -/// +/// /// - usize: 成功时返回一个usize类型的对齐偏移量。 fn get_offset_for_align(layout: Layout) -> usize { match layout.size() {