From c815b909e6b055c448a05ebf21d81e2931c7960a Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 4 Feb 2026 06:46:22 +0000 Subject: [PATCH 1/3] improve vmap to map non-contiguous physical pages --- .../src/arch/x86/mm/paging.rs | 93 ++++ litebox_platform_lvbs/src/lib.rs | 144 ++++-- litebox_platform_lvbs/src/mm/mod.rs | 1 + litebox_platform_lvbs/src/mm/vmap.rs | 468 ++++++++++++++++++ 4 files changed, 660 insertions(+), 46 deletions(-) create mode 100644 litebox_platform_lvbs/src/mm/vmap.rs diff --git a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs index 3fd566f8e..1a6532ab5 100644 --- a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs +++ b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs @@ -365,6 +365,99 @@ impl X64PageTable<'_, M, ALIGN> { Ok(M::pa_to_va(frame_range.start.start_address()).as_mut_ptr()) } + /// Map non-contiguous physical frames to virtually contiguous addresses. + /// + /// This function maps each physical frame in `frames` to consecutive virtual addresses + /// starting from `base_va`. Unlike `map_phys_frame_range`, this allows mapping + /// non-contiguous physical pages to a contiguous virtual address range. + /// + /// # Arguments + /// * `frames` - Slice of physical frames to map (may be non-contiguous) + /// * `base_va` - Starting virtual address for the mapping + /// * `flags` - Page table flags to apply to all mappings + /// + /// # Returns + /// * `Ok(())` on success + /// * `Err(MapToError::PageAlreadyMapped)` if any VA is already mapped + /// * `Err(MapToError::FrameAllocationFailed)` if page table allocation fails + /// + /// # Behavior + /// - Any existing mapping is treated as an error (shared mappings not supported) + /// - On error, all pages mapped by this call are unmapped (atomic) + pub(crate) fn map_non_contiguous_phys_pages( + &self, + frames: &[PhysFrame], + base_va: VirtAddr, + flags: PageTableFlags, + ) -> Result<(), MapToError> { + let mut allocator = PageTableAllocator::::new(); + let mut mapped_count: usize = 0; + + let mut inner = self.inner.lock(); + for (i, &target_frame) in frames.iter().enumerate() { + let va = base_va + (i as u64) * Size4KiB::SIZE; + let page: Page = Page::containing_address(va); + + // Without shared mapping support, any existing mapping is an error + match inner.translate(page.start_address()) { + TranslateResult::Mapped { frame, .. } => { + // Page already mapped - rollback and fail + Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); + // Convert MappedFrame to PhysFrame for the error + let existing_frame = + PhysFrame::::containing_address(frame.start_address()); + return Err(MapToError::PageAlreadyMapped(existing_frame)); + } + TranslateResult::NotMapped => {} + TranslateResult::InvalidFrameAddress(_) => { + Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); + return Err(MapToError::FrameAllocationFailed); + } + } + + let table_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + match unsafe { + inner.map_to_with_table_flags( + page, + target_frame, + flags, + table_flags, + &mut allocator, + ) + } { + Ok(fl) => { + mapped_count += 1; + if FLUSH_TLB { + fl.flush(); + } + } + Err(e) => { + Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); + return Err(e); + } + } + } + + Ok(()) + } + + /// Rollback helper: unmap the first `count` pages starting from `base_va`. + fn rollback_mapped_pages( + inner: &mut MappedPageTable<'_, FrameMapping>, + base_va: VirtAddr, + count: usize, + ) { + for i in 0..count { + let va = base_va + (i as u64) * Size4KiB::SIZE; + let page: Page = Page::containing_address(va); + if let Ok((_frame, fl)) = inner.unmap(page) + && FLUSH_TLB + { + fl.flush(); + } + } + } + /// This function creates a new empty top-level page table. pub(crate) unsafe fn new_top_level() -> Self { let frame = PageTableAllocator::::allocate_frame(true) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 1892e5f52..c3ebfaa24 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -41,6 +41,9 @@ use x86_64::{ }; use zerocopy::{FromBytes, IntoBytes}; +#[cfg(feature = "optee_syscall")] +use crate::mm::vmap::{allocate_and_register_vmap, rollback_vmap_allocation}; + extern crate alloc; pub mod arch; @@ -1170,22 +1173,19 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } /// Checks whether the given physical addresses are contiguous with respect to ALIGN. -/// -/// Note: This is a temporary check to let `VmapManager` work with this platform -/// which does not yet support virtually contiguous mapping of non-contiguous physical pages -/// (for now, it maps physical pages with a fixed offset). -#[cfg(feature = "optee_syscall")] -fn check_contiguity( - addrs: &[PhysPageAddr], -) -> Result<(), PhysPointerError> { +fn is_contiguous(addrs: &[PhysPageAddr]) -> bool { for window in addrs.windows(2) { let first = window[0].as_usize(); let second = window[1].as_usize(); - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); + if let Some(expected) = first.checked_add(ALIGN) { + if second != expected { + return false; + } + } else { + return false; } } - Ok(()) + true } #[cfg(feature = "optee_syscall")] @@ -1195,54 +1195,108 @@ impl VmapManager for LinuxKernel pages: &PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result, PhysPointerError> { - // TODO: Remove this check once this platform supports virtually contiguous - // non-contiguous physical page mapping. - check_contiguity(pages)?; - if pages.is_empty() { return Err(PhysPointerError::InvalidPhysicalAddress(0)); } - let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); - let phys_end = x86_64::PhysAddr::new( - pages - .last() - .unwrap() - .as_usize() - .checked_add(ALIGN) - .ok_or(PhysPointerError::Overflow)? as u64, - ); - let frame_range = if ALIGN == PAGE_SIZE { - PhysFrame::range( - PhysFrame::::containing_address(phys_start), - PhysFrame::::containing_address(phys_end), - ) - } else { - unimplemented!("ALIGN other than 4KiB is not supported yet") - }; + + if ALIGN != PAGE_SIZE { + unimplemented!("ALIGN other than 4KiB is not supported yet"); + } let mut flags = PageTableFlags::PRESENT; if perms.contains(PhysPageMapPermissions::WRITE) { flags |= PageTableFlags::WRITABLE; } - if let Ok(page_addr) = self - .page_table_manager - .current_page_table() - .map_phys_frame_range(frame_range, flags) + // Check if pages are contiguous - use fast path if so + if is_contiguous(pages) { + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ); + + if let Ok(page_addr) = self + .page_table_manager + .current_page_table() + .map_phys_frame_range(frame_range, flags) + { + return Ok(PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }); + } else { + return Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )); + } + } + + // Non-contiguous pages: use vmap region allocator + + // Convert pages to PhysAddrs for the allocator + let phys_addrs: alloc::vec::Vec = pages + .iter() + .map(|p| x86_64::PhysAddr::new(p.as_usize() as u64)) + .collect(); + + // Atomically allocate VA range and register all mappings + // This also checks for duplicate PA mappings + let base_va = allocate_and_register_vmap(&phys_addrs) + .ok_or(PhysPointerError::AlreadyMapped(pages[0].as_usize()))?; + + // Convert to PhysFrames for page table mapping + let frames: alloc::vec::Vec> = phys_addrs + .iter() + .map(|&pa| PhysFrame::containing_address(pa)) + .collect(); + + // Map the non-contiguous physical pages to the contiguous virtual range + if let Err(_e) = self + .page_table + .map_non_contiguous_phys_pages(&frames, base_va, flags) { - Ok(PhysPageMapInfo { - base: page_addr, - size: pages.len() * ALIGN, - }) - } else { - Err(PhysPointerError::InvalidPhysicalAddress( + // Rollback the vmap allocation on failure + rollback_vmap_allocation(base_va); + return Err(PhysPointerError::InvalidPhysicalAddress( pages[0].as_usize(), - )) + )); } + + Ok(PhysPageMapInfo { + base: base_va.as_mut_ptr(), + size: pages.len() * ALIGN, + }) } unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { - if ALIGN == PAGE_SIZE { + if ALIGN != PAGE_SIZE { + unimplemented!("ALIGN other than 4KiB is not supported yet"); + } + + let base_va = x86_64::VirtAddr::new_truncate(vmap_info.base as u64); + + // Check if this is a vmap region allocation + if crate::mm::vmap::is_vmap_address(base_va) { + // Unregister from vmap allocator and get number of pages + let num_pages = crate::mm::vmap::unregister_vmap_allocation(base_va) + .ok_or(PhysPointerError::Unmapped(vmap_info.base as usize))?; + + // Unmap VTL0 pages (doesn't deallocate frames - they're not owned by us) + self.unmap_vtl0_pages(vmap_info.base, num_pages * ALIGN) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))?; + + Ok(()) + } else { + // Fall back to original behavior for direct-mapped pages let Some(page_range) = PageRange::::new( vmap_info.base as usize, vmap_info.base.wrapping_add(vmap_info.size) as usize, @@ -1258,8 +1312,6 @@ impl VmapManager for LinuxKernel .unmap_pages(page_range, false, true) .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)) } - } else { - unimplemented!("ALIGN other than 4KiB is not supported yet") } } diff --git a/litebox_platform_lvbs/src/mm/mod.rs b/litebox_platform_lvbs/src/mm/mod.rs index 8365ce6f4..670438b59 100644 --- a/litebox_platform_lvbs/src/mm/mod.rs +++ b/litebox_platform_lvbs/src/mm/mod.rs @@ -6,6 +6,7 @@ use crate::arch::{PhysAddr, VirtAddr}; pub(crate) mod pgtable; +pub mod vmap; #[cfg(test)] pub mod tests; diff --git a/litebox_platform_lvbs/src/mm/vmap.rs b/litebox_platform_lvbs/src/mm/vmap.rs new file mode 100644 index 000000000..59f284d18 --- /dev/null +++ b/litebox_platform_lvbs/src/mm/vmap.rs @@ -0,0 +1,468 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! Vmap region allocator for mapping non-contiguous physical pages to virtually contiguous addresses. +//! +//! This module provides functionality similar to Linux kernel's `vmap()` and `vunmap()`: +//! - Reserves a virtual address region for vmap mappings +//! - Maintains PA↔VA mappings using HashMap for efficient lookups +//! - Supports `pa_to_va` and `va_to_pa` operations within the vmap region + +use alloc::vec::Vec; +use hashbrown::HashMap; +use spin::mutex::SpinMutex; +use x86_64::{PhysAddr, VirtAddr}; + +use super::MemoryProvider; +use crate::mshv::vtl1_mem_layout::PAGE_SIZE; + +/// Start of the vmap virtual address region. +/// This address is chosen to be within the 4-level paging canonical address space +/// and not conflict with VTL1's direct-mapped physical memory. +pub const VMAP_START: u64 = 0x6000_0000_0000; + +/// End of the vmap virtual address region. +/// Provides 1 TiB of virtual address space for vmap allocations. +pub const VMAP_END: u64 = 0x7000_0000_0000; + +/// Size of the vmap region in bytes. +pub const VMAP_SIZE: u64 = VMAP_END - VMAP_START; + +/// Information about a single vmap allocation. +#[derive(Clone, Debug)] +struct VmapAllocation { + /// Number of pages in the allocation. + num_pages: usize, + /// Physical addresses of the mapped pages (in order). + phys_addrs: Vec, +} + +/// A freed VA range available for reuse. +#[derive(Clone, Debug)] +struct FreeRange { + /// Starting virtual address. + start: VirtAddr, + /// Number of pages in this free range. + num_pages: usize, +} + +/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. +/// +/// This allocator uses a bump allocator with a free list for virtual addresses and HashMap +/// for maintaining bidirectional mappings between physical and virtual addresses. +pub struct VmapRegionAllocator { + /// Next available virtual address for allocation (bump allocator). + next_va: VirtAddr, + /// Free list of previously allocated and freed VA ranges. + free_list: Vec, + /// Map from physical address to virtual address. + pa_to_va_map: HashMap, + /// Map from virtual address to physical address. + va_to_pa_map: HashMap, + /// Allocation metadata indexed by starting virtual address. + allocations: HashMap, +} + +impl VmapRegionAllocator { + /// Creates a new vmap region allocator. + pub fn new() -> Self { + Self { + next_va: VirtAddr::new_truncate(VMAP_START), + free_list: Vec::new(), + pa_to_va_map: HashMap::new(), + va_to_pa_map: HashMap::new(), + allocations: HashMap::new(), + } + } + + /// Allocates a contiguous virtual address range for the given number of pages. + /// + /// First tries to find a suitable range in the free list, then falls back to + /// bump allocation. + /// + /// Returns `Some(VirtAddr)` with the starting virtual address on success, + /// or `None` if insufficient virtual address space is available. + pub fn allocate_va_range(&mut self, num_pages: usize) -> Option { + if num_pages == 0 { + return None; + } + + // First, try to find a suitable range in the free list (first-fit) + if let Some(idx) = self.free_list.iter().position(|r| r.num_pages >= num_pages) { + let range = self.free_list.remove(idx); + if range.num_pages > num_pages { + // Split: put remainder back in free list + let remainder_start = VirtAddr::new_truncate( + range.start.as_u64() + (num_pages as u64) * (PAGE_SIZE as u64), + ); + self.free_list.push(FreeRange { + start: remainder_start, + num_pages: range.num_pages - num_pages, + }); + } + return Some(range.start); + } + + // Fall back to bump allocation + let size = (num_pages as u64).checked_mul(PAGE_SIZE as u64)?; + let end_va = self.next_va.as_u64().checked_add(size)?; + + if end_va > VMAP_END { + return None; + } + + let allocated_va = self.next_va; + self.next_va = VirtAddr::new_truncate(end_va); + Some(allocated_va) + } + + /// Returns a VA range to the free list for reuse. + fn free_va_range(&mut self, start: VirtAddr, num_pages: usize) { + if num_pages == 0 { + return; + } + // Simple approach: just add to free list + // A more sophisticated implementation could coalesce adjacent ranges + self.free_list.push(FreeRange { start, num_pages }); + } + + /// Atomically allocates VA range, registers mappings, and records allocation. + /// + /// This ensures consistency - either the entire operation succeeds or nothing changes. + /// + /// Returns the allocated base VA on success, or None if: + /// - No VA space available + /// - Any PA is already mapped (duplicate mapping) + pub fn allocate_and_register(&mut self, phys_addrs: &[PhysAddr]) -> Option { + if phys_addrs.is_empty() { + return None; + } + + // Check for duplicate PA mappings before allocating + for pa in phys_addrs { + if self.pa_to_va_map.contains_key(&pa.as_u64()) { + return None; // PA already mapped + } + } + + // Allocate VA range + let base_va = self.allocate_va_range(phys_addrs.len())?; + + // Register all mappings + for (i, &pa) in phys_addrs.iter().enumerate() { + let va = VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.insert(pa.as_u64(), va.as_u64()); + self.va_to_pa_map.insert(va.as_u64(), pa.as_u64()); + } + + // Record allocation metadata + self.allocations.insert( + base_va.as_u64(), + VmapAllocation { + num_pages: phys_addrs.len(), + phys_addrs: phys_addrs.to_vec(), + }, + ); + + Some(base_va) + } + + /// Rolls back a failed allocation by removing mappings and freeing VA range. + /// + /// Call this if page table mapping fails after `allocate_and_register` succeeds. + pub fn rollback_allocation(&mut self, base_va: VirtAddr) { + if let Some(allocation) = self.allocations.remove(&base_va.as_u64()) { + // Remove all individual page mappings + for (i, pa) in allocation.phys_addrs.iter().enumerate() { + let va = VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.remove(&pa.as_u64()); + self.va_to_pa_map.remove(&va.as_u64()); + } + // Return VA range to free list + self.free_va_range(base_va, allocation.num_pages); + } + } + + /// Unregisters all mappings for an allocation starting at the given virtual address. + /// + /// Returns the number of pages that were unmapped, or `None` if no allocation was found. + pub fn unregister_allocation(&mut self, va_start: VirtAddr) -> Option { + let allocation = self.allocations.remove(&va_start.as_u64())?; + + // Remove all individual page mappings + for (i, pa) in allocation.phys_addrs.iter().enumerate() { + let va = VirtAddr::new_truncate(va_start.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.remove(&pa.as_u64()); + self.va_to_pa_map.remove(&va.as_u64()); + } + + // Return VA range to free list for reuse + self.free_va_range(va_start, allocation.num_pages); + + Some(allocation.num_pages) + } + + /// Translates a physical address to its mapped virtual address. + /// + /// Returns `Some(VirtAddr)` if the physical address is mapped in the vmap region, + /// or `None` if not found. + pub fn pa_to_va(&self, pa: PhysAddr) -> Option { + self.pa_to_va_map + .get(&pa.as_u64()) + .map(|&va| VirtAddr::new_truncate(va)) + } + + /// Translates a virtual address to its corresponding physical address. + /// + /// Returns `Some(PhysAddr)` if the virtual address is in the vmap region and mapped, + /// or `None` if not found. + pub fn va_to_pa(&self, va: VirtAddr) -> Option { + self.va_to_pa_map + .get(&va.as_u64()) + .map(|&pa| PhysAddr::new_truncate(pa)) + } + + /// Checks if a virtual address is within the vmap region. + pub fn is_vmap_address(va: VirtAddr) -> bool { + (VMAP_START..VMAP_END).contains(&va.as_u64()) + } +} + +impl Default for VmapRegionAllocator { + fn default() -> Self { + Self::new() + } +} + +/// Global vmap region allocator instance. +static VMAP_ALLOCATOR: SpinMutex> = SpinMutex::new(None); + +/// Initializes the global vmap allocator if not already initialized. +fn ensure_initialized() -> spin::mutex::SpinMutexGuard<'static, Option> { + let mut guard = VMAP_ALLOCATOR.lock(); + if guard.is_none() { + *guard = Some(VmapRegionAllocator::new()); + } + guard +} + +/// Atomically allocates VA range and registers all mappings. +/// +/// Returns the base VA on success, or None if allocation fails or any PA is already mapped. +pub fn allocate_and_register_vmap(phys_addrs: &[PhysAddr]) -> Option { + let mut guard = ensure_initialized(); + guard + .as_mut() + .and_then(|alloc| alloc.allocate_and_register(phys_addrs)) +} + +/// Rolls back a vmap allocation after page table mapping failure. +pub fn rollback_vmap_allocation(base_va: VirtAddr) { + let mut guard = ensure_initialized(); + if let Some(alloc) = guard.as_mut() { + alloc.rollback_allocation(base_va); + } +} + +/// Unregisters an allocation from the global vmap allocator. +pub fn unregister_vmap_allocation(va_start: VirtAddr) -> Option { + let mut guard = ensure_initialized(); + guard + .as_mut() + .and_then(|alloc| alloc.unregister_allocation(va_start)) +} + +/// Translates a physical address to virtual address in the vmap region. +pub fn vmap_pa_to_va(pa: PhysAddr) -> Option { + let guard = VMAP_ALLOCATOR.lock(); + guard.as_ref().and_then(|alloc| alloc.pa_to_va(pa)) +} + +/// Translates a virtual address to physical address in the vmap region. +pub fn vmap_va_to_pa(va: VirtAddr) -> Option { + let guard = VMAP_ALLOCATOR.lock(); + guard.as_ref().and_then(|alloc| alloc.va_to_pa(va)) +} + +/// Checks if a virtual address is within the vmap region. +pub fn is_vmap_address(va: VirtAddr) -> bool { + VmapRegionAllocator::is_vmap_address(va) +} + +/// Extended memory provider trait that adds vmap-aware address translation. +/// +/// This trait extends the base `MemoryProvider` with methods to handle +/// both direct-mapped and vmap regions. +pub trait VmapAwareMemoryProvider: MemoryProvider { + /// Translates a virtual address to physical address, checking both + /// direct-mapped and vmap regions. + fn va_to_pa_vmap_aware(va: VirtAddr) -> Option { + if is_vmap_address(va) { + vmap_va_to_pa(va) + } else { + // Use the direct mapping translation from MemoryProvider + Some(Self::va_to_pa(va)) + } + } + + /// Translates a physical address to virtual address. + /// + /// First checks if the PA is mapped in the vmap region, otherwise + /// falls back to direct mapping. + fn pa_to_va_vmap_aware(pa: PhysAddr) -> VirtAddr { + // First check if it's in the vmap region + if let Some(va) = vmap_pa_to_va(pa) { + return va; + } + // Fall back to direct mapping + Self::pa_to_va(pa) + } +} + +// Blanket implementation for all types that implement MemoryProvider +impl VmapAwareMemoryProvider for T {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vmap_region_bounds() { + const { assert!(VMAP_START < VMAP_END) }; + assert_eq!(VMAP_SIZE, VMAP_END - VMAP_START); + } + + #[test] + fn test_is_vmap_address() { + assert!(VmapRegionAllocator::is_vmap_address( + VirtAddr::new_truncate(VMAP_START) + )); + assert!(VmapRegionAllocator::is_vmap_address( + VirtAddr::new_truncate(VMAP_START + 0x1000) + )); + assert!(VmapRegionAllocator::is_vmap_address( + VirtAddr::new_truncate(VMAP_END - 1) + )); + assert!(!VmapRegionAllocator::is_vmap_address( + VirtAddr::new_truncate(VMAP_START - 1) + )); + assert!(!VmapRegionAllocator::is_vmap_address( + VirtAddr::new_truncate(VMAP_END) + )); + } + + #[test] + fn test_allocate_va_range() { + let mut allocator = VmapRegionAllocator::new(); + + // Allocate first range + let va1 = allocator.allocate_va_range(1); + assert!(va1.is_some()); + assert_eq!(va1.unwrap().as_u64(), VMAP_START); + + // Allocate second range + let va2 = allocator.allocate_va_range(2); + assert!(va2.is_some()); + assert_eq!(va2.unwrap().as_u64(), VMAP_START + PAGE_SIZE as u64); + + // Zero pages should return None + let va3 = allocator.allocate_va_range(0); + assert!(va3.is_none()); + } + + #[test] + fn test_va_range_reuse() { + let mut allocator = VmapRegionAllocator::new(); + + // Allocate and free a range + let va1 = allocator.allocate_va_range(2).unwrap(); + allocator.free_va_range(va1, 2); + + // Next allocation should reuse the freed range + let va2 = allocator.allocate_va_range(2).unwrap(); + assert_eq!(va1, va2); + + // Allocate smaller than freed - should split + allocator.free_va_range(va2, 2); + let va3 = allocator.allocate_va_range(1).unwrap(); + assert_eq!(va3, va1); + + // Remainder should be in free list + let va4 = allocator.allocate_va_range(1).unwrap(); + assert_eq!(va4.as_u64(), va1.as_u64() + PAGE_SIZE as u64); + } + + #[test] + fn test_allocate_and_register() { + let mut allocator = VmapRegionAllocator::new(); + + let phys_addrs = alloc::vec![ + PhysAddr::new(0x1000), + PhysAddr::new(0x3000), + PhysAddr::new(0x5000), + ]; + + // Allocate and register + let base_va = allocator.allocate_and_register(&phys_addrs); + assert!(base_va.is_some()); + let base_va = base_va.unwrap(); + + // Verify mappings exist + for (i, &pa) in phys_addrs.iter().enumerate() { + let expected_va = + VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + assert_eq!(allocator.pa_to_va(pa), Some(expected_va)); + assert_eq!(allocator.va_to_pa(expected_va), Some(pa)); + } + + // Duplicate PA should fail + let duplicate = allocator.allocate_and_register(&[PhysAddr::new(0x1000)]); + assert!(duplicate.is_none()); + } + + #[test] + fn test_rollback_allocation() { + let mut allocator = VmapRegionAllocator::new(); + + let phys_addrs = alloc::vec![PhysAddr::new(0x1000), PhysAddr::new(0x2000)]; + + let base_va = allocator.allocate_and_register(&phys_addrs).unwrap(); + + // Rollback + allocator.rollback_allocation(base_va); + + // Mappings should be gone + assert_eq!(allocator.pa_to_va(PhysAddr::new(0x1000)), None); + assert_eq!(allocator.pa_to_va(PhysAddr::new(0x2000)), None); + + // VA should be reusable + let new_va = allocator.allocate_va_range(2).unwrap(); + assert_eq!(new_va, base_va); + } + + #[test] + fn test_unregister_allocation() { + let mut allocator = VmapRegionAllocator::new(); + + let phys_addrs = alloc::vec![ + PhysAddr::new(0x1000), + PhysAddr::new(0x3000), + PhysAddr::new(0x5000), + ]; + + let base_va = allocator.allocate_and_register(&phys_addrs).unwrap(); + + // Unregister + let num_pages = allocator.unregister_allocation(base_va); + assert_eq!(num_pages, Some(3)); + + // Verify mappings are gone + for pa in &phys_addrs { + assert_eq!(allocator.pa_to_va(*pa), None); + } + + // VA should be reusable + let new_va = allocator.allocate_va_range(3).unwrap(); + assert_eq!(new_va, base_va); + } +} From d614b382a4199d9ce9d79b0bbf19d1d6d38e2609 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Tue, 17 Feb 2026 21:14:50 +0000 Subject: [PATCH 2/3] refactoring --- dev_tests/src/ratchet.rs | 2 +- litebox_common_linux/src/vmap.rs | 4 +- .../src/arch/x86/mm/paging.rs | 75 ++- litebox_platform_lvbs/src/lib.rs | 130 +++-- litebox_platform_lvbs/src/mm/mod.rs | 2 +- litebox_platform_lvbs/src/mm/vmap.rs | 453 ++++++++---------- 6 files changed, 310 insertions(+), 356 deletions(-) diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index bbc7c35b3..14dc0c010 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -37,7 +37,7 @@ fn ratchet_globals() -> Result<()> { ("litebox/", 9), ("litebox_platform_linux_kernel/", 6), ("litebox_platform_linux_userland/", 5), - ("litebox_platform_lvbs/", 20), + ("litebox_platform_lvbs/", 21), ("litebox_platform_multiplex/", 1), ("litebox_platform_windows_userland/", 7), ("litebox_runner_linux_userland/", 1), diff --git a/litebox_common_linux/src/vmap.rs b/litebox_common_linux/src/vmap.rs index 325ce75b3..19fac943d 100644 --- a/litebox_common_linux/src/vmap.rs +++ b/litebox_common_linux/src/vmap.rs @@ -166,12 +166,12 @@ pub enum PhysPointerError { NoMappingInfo, #[error("Overflow occurred during calculation")] Overflow, - #[error("Non-contiguous physical pages in the array")] - NonContiguousPages, #[error("The operation is unsupported on this platform")] UnsupportedOperation, #[error("Unsupported permissions: {0:#x}")] UnsupportedPermissions(u8), #[error("Memory copy failed")] CopyFailed, + #[error("Duplicate physical page address {0:#x} in the input array")] + DuplicatePhysicalAddress(usize), } diff --git a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs index 1a6532ab5..19c427598 100644 --- a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs +++ b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs @@ -12,7 +12,7 @@ use x86_64::{ PageTableFlags, PhysFrame, Size4KiB, Translate, frame::PhysFrameRange, mapper::{ - FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult, + CleanUp, FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult, UnmapError as X64UnmapError, }, }, @@ -372,50 +372,62 @@ impl X64PageTable<'_, M, ALIGN> { /// non-contiguous physical pages to a contiguous virtual address range. /// /// # Arguments - /// * `frames` - Slice of physical frames to map (may be non-contiguous) - /// * `base_va` - Starting virtual address for the mapping - /// * `flags` - Page table flags to apply to all mappings + /// - `frames` - Slice of physical frames to map (non-contiguous, no duplicate) + /// - `base_va` - Starting virtual address for the mapping + /// - `flags` - Page table flags to apply to all mappings /// /// # Returns - /// * `Ok(())` on success - /// * `Err(MapToError::PageAlreadyMapped)` if any VA is already mapped - /// * `Err(MapToError::FrameAllocationFailed)` if page table allocation fails + /// - `Ok(*mut u8)` — pointer to the start of the mapped virtual range + /// - `Err(MapToError::PageAlreadyMapped)` if any VA is already mapped + /// - `Err(MapToError::FrameAllocationFailed)` if page table allocation fails /// /// # Behavior /// - Any existing mapping is treated as an error (shared mappings not supported) /// - On error, all pages mapped by this call are unmapped (atomic) - pub(crate) fn map_non_contiguous_phys_pages( + pub(crate) fn map_non_contiguous_phys_frames( &self, frames: &[PhysFrame], base_va: VirtAddr, flags: PageTableFlags, - ) -> Result<(), MapToError> { + ) -> Result<*mut u8, MapToError> { let mut allocator = PageTableAllocator::::new(); let mut mapped_count: usize = 0; let mut inner = self.inner.lock(); - for (i, &target_frame) in frames.iter().enumerate() { - let va = base_va + (i as u64) * Size4KiB::SIZE; - let page: Page = Page::containing_address(va); - // Without shared mapping support, any existing mapping is an error - match inner.translate(page.start_address()) { + if !base_va.is_aligned(Size4KiB::SIZE) { + return Err(MapToError::FrameAllocationFailed); + } + + // Quick pre-scan: check all target VAs for existing mappings before + // modifying any page table entries. This avoids expensive rollback when + // an overlap is detected partway through. + for i in 0..frames.len() { + let va = base_va + (i as u64) * Size4KiB::SIZE; + match inner.translate(va) { TranslateResult::Mapped { frame, .. } => { - // Page already mapped - rollback and fail - Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); - // Convert MappedFrame to PhysFrame for the error let existing_frame = PhysFrame::::containing_address(frame.start_address()); return Err(MapToError::PageAlreadyMapped(existing_frame)); } TranslateResult::NotMapped => {} TranslateResult::InvalidFrameAddress(_) => { - Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); return Err(MapToError::FrameAllocationFailed); } } + } + + // All VAs verified as unmapped — proceed with mapping. + let table_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + for (i, &target_frame) in frames.iter().enumerate() { + let va = base_va + (i as u64) * Size4KiB::SIZE; + let page: Page = Page::containing_address(va); + + // Note: Since we lock the entire page table for the duration of this function (`self.inner.lock()`), + // there should be no concurrent modifications to the page table. If we allow concurrent mappings + // in the future, we should re-check the VA here before mapping and return an error + // if it is no longer unmapped. - let table_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; match unsafe { inner.map_to_with_table_flags( page, @@ -432,20 +444,25 @@ impl X64PageTable<'_, M, ALIGN> { } } Err(e) => { - Self::rollback_mapped_pages(&mut inner, base_va, mapped_count); + Self::rollback_mapped_pages(&mut inner, base_va, mapped_count, &mut allocator); return Err(e); } } } - Ok(()) + Ok(base_va.as_mut_ptr()) } - /// Rollback helper: unmap the first `count` pages starting from `base_va`. + /// Rollback helper: unmap the first `count` pages starting from `base_va` + /// and free any intermediate page-table frames (P1/P2/P3) that became empty. + /// + /// Note: The caller must already hold the page table lock (`self.inner`). + /// This function accepts the locked `MappedPageTable` directly. fn rollback_mapped_pages( inner: &mut MappedPageTable<'_, FrameMapping>, base_va: VirtAddr, count: usize, + allocator: &mut PageTableAllocator, ) { for i in 0..count { let va = base_va + (i as u64) * Size4KiB::SIZE; @@ -456,6 +473,20 @@ impl X64PageTable<'_, M, ALIGN> { fl.flush(); } } + + // Free any intermediate page-table frames (P1/P2/P3) that are now + // empty after unmapping. + if count > 0 { + let start = Page::::containing_address(base_va); + let end = Page::::containing_address( + base_va + ((count - 1) as u64) * Size4KiB::SIZE, + ); + // Safety: the vmap VA range is used exclusively by this page table + // and all leaf entries have just been unmapped above. + unsafe { + inner.clean_up_addr_range(Page::range_inclusive(start, end), allocator); + } + } } /// This function creates a new empty top-level page table. diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index c3ebfaa24..2752ed108 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -42,7 +42,7 @@ use x86_64::{ use zerocopy::{FromBytes, IntoBytes}; #[cfg(feature = "optee_syscall")] -use crate::mm::vmap::{allocate_and_register_vmap, rollback_vmap_allocation}; +use crate::mm::vmap::vmap_allocator; extern crate alloc; @@ -1172,6 +1172,7 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } } +#[cfg(feature = "optee_syscall")] /// Checks whether the given physical addresses are contiguous with respect to ALIGN. fn is_contiguous(addrs: &[PhysPageAddr]) -> bool { for window in addrs.windows(2) { @@ -1199,6 +1200,16 @@ impl VmapManager for LinuxKernel return Err(PhysPointerError::InvalidPhysicalAddress(0)); } + // Reject duplicate page addresses — shared mappings are not supported. + { + let mut seen = hashbrown::HashSet::with_capacity(pages.len()); + for page in pages { + if !seen.insert(page.as_usize()) { + return Err(PhysPointerError::DuplicatePhysicalAddress(page.as_usize())); + } + } + } + if ALIGN != PAGE_SIZE { unimplemented!("ALIGN other than 4KiB is not supported yet"); } @@ -1208,7 +1219,7 @@ impl VmapManager for LinuxKernel flags |= PageTableFlags::WRITABLE; } - // Check if pages are contiguous - use fast path if so + // If pages are contiguous, use `map_phys_frame_range` which is efficient and doesn't require vmap VA space. if is_contiguous(pages) { let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); let phys_end = x86_64::PhysAddr::new( @@ -1224,57 +1235,54 @@ impl VmapManager for LinuxKernel PhysFrame::::containing_address(phys_end), ); - if let Ok(page_addr) = self + match self .page_table_manager .current_page_table() .map_phys_frame_range(frame_range, flags) { - return Ok(PhysPageMapInfo { + Ok(page_addr) => Ok(PhysPageMapInfo { base: page_addr, size: pages.len() * ALIGN, - }); - } else { - return Err(PhysPointerError::InvalidPhysicalAddress( + }), + Err(MapToError::PageAlreadyMapped(_)) => { + Err(PhysPointerError::AlreadyMapped(pages[0].as_usize())) + } + Err(_) => Err(PhysPointerError::InvalidPhysicalAddress( pages[0].as_usize(), - )); + )), } - } + } else { + let frames: alloc::vec::Vec> = pages + .iter() + .map(|p| PhysFrame::containing_address(x86_64::PhysAddr::new(p.as_usize() as u64))) + .collect(); - // Non-contiguous pages: use vmap region allocator - - // Convert pages to PhysAddrs for the allocator - let phys_addrs: alloc::vec::Vec = pages - .iter() - .map(|p| x86_64::PhysAddr::new(p.as_usize() as u64)) - .collect(); - - // Atomically allocate VA range and register all mappings - // This also checks for duplicate PA mappings - let base_va = allocate_and_register_vmap(&phys_addrs) - .ok_or(PhysPointerError::AlreadyMapped(pages[0].as_usize()))?; - - // Convert to PhysFrames for page table mapping - let frames: alloc::vec::Vec> = phys_addrs - .iter() - .map(|&pa| PhysFrame::containing_address(pa)) - .collect(); - - // Map the non-contiguous physical pages to the contiguous virtual range - if let Err(_e) = self - .page_table - .map_non_contiguous_phys_pages(&frames, base_va, flags) - { - // Rollback the vmap allocation on failure - rollback_vmap_allocation(base_va); - return Err(PhysPointerError::InvalidPhysicalAddress( - pages[0].as_usize(), - )); - } + let base_va = vmap_allocator() + .allocate_and_register(&frames) + .ok_or(PhysPointerError::AlreadyMapped(pages[0].as_usize()))?; - Ok(PhysPageMapInfo { - base: base_va.as_mut_ptr(), - size: pages.len() * ALIGN, - }) + match self + .page_table_manager + .current_page_table() + .map_non_contiguous_phys_frames(&frames, base_va, flags) + { + Ok(page_addr) => Ok(PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }), + Err(e) => { + vmap_allocator().rollback_allocation(base_va); + match e { + MapToError::PageAlreadyMapped(_) => { + Err(PhysPointerError::AlreadyMapped(pages[0].as_usize())) + } + _ => Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )), + } + } + } + } } unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { @@ -1282,37 +1290,19 @@ impl VmapManager for LinuxKernel unimplemented!("ALIGN other than 4KiB is not supported yet"); } - let base_va = x86_64::VirtAddr::new_truncate(vmap_info.base as u64); + // Unmap VTL0 pages (doesn't deallocate physical frames — they belong to VTL0). + self.unmap_vtl0_pages(vmap_info.base, vmap_info.size) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))?; - // Check if this is a vmap region allocation + let base_va = x86_64::VirtAddr::new(vmap_info.base as u64); + + // If this is a vmap region (for non-contiguous physical pages), unregister from the vmap allocator first. if crate::mm::vmap::is_vmap_address(base_va) { - // Unregister from vmap allocator and get number of pages - let num_pages = crate::mm::vmap::unregister_vmap_allocation(base_va) + crate::mm::vmap::vmap_allocator() + .unregister_allocation(base_va) .ok_or(PhysPointerError::Unmapped(vmap_info.base as usize))?; - - // Unmap VTL0 pages (doesn't deallocate frames - they're not owned by us) - self.unmap_vtl0_pages(vmap_info.base, num_pages * ALIGN) - .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))?; - - Ok(()) - } else { - // Fall back to original behavior for direct-mapped pages - let Some(page_range) = PageRange::::new( - vmap_info.base as usize, - vmap_info.base.wrapping_add(vmap_info.size) as usize, - ) else { - return Err(PhysPointerError::UnalignedPhysicalAddress( - vmap_info.base as usize, - ALIGN, - )); - }; - unsafe { - self.page_table_manager - .current_page_table() - .unmap_pages(page_range, false, true) - .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)) - } } + Ok(()) } fn validate_unowned(&self, pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { diff --git a/litebox_platform_lvbs/src/mm/mod.rs b/litebox_platform_lvbs/src/mm/mod.rs index 670438b59..5bd3a5471 100644 --- a/litebox_platform_lvbs/src/mm/mod.rs +++ b/litebox_platform_lvbs/src/mm/mod.rs @@ -6,7 +6,7 @@ use crate::arch::{PhysAddr, VirtAddr}; pub(crate) mod pgtable; -pub mod vmap; +pub(crate) mod vmap; #[cfg(test)] pub mod tests; diff --git a/litebox_platform_lvbs/src/mm/vmap.rs b/litebox_platform_lvbs/src/mm/vmap.rs index 59f284d18..abbd04d04 100644 --- a/litebox_platform_lvbs/src/mm/vmap.rs +++ b/litebox_platform_lvbs/src/mm/vmap.rs @@ -1,19 +1,20 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -//! Vmap region allocator for mapping non-contiguous physical pages to virtually contiguous addresses. +//! Vmap region allocator for mapping non-contiguous physical page frames to virtually contiguous addresses. //! //! This module provides functionality similar to Linux kernel's `vmap()` and `vunmap()`: //! - Reserves a virtual address region for vmap mappings -//! - Maintains PA↔VA mappings using HashMap for efficient lookups -//! - Supports `pa_to_va` and `va_to_pa` operations within the vmap region +//! - Maintains PA↔VA mappings using HashMap for duplicate detection and cleanup -use alloc::vec::Vec; +use alloc::boxed::Box; use hashbrown::HashMap; +use rangemap::RangeSet; +use spin::Once; use spin::mutex::SpinMutex; -use x86_64::{PhysAddr, VirtAddr}; +use x86_64::VirtAddr; +use x86_64::structures::paging::{PhysFrame, Size4KiB}; -use super::MemoryProvider; use crate::mshv::vtl1_mem_layout::PAGE_SIZE; /// Start of the vmap virtual address region. @@ -25,57 +26,50 @@ pub const VMAP_START: u64 = 0x6000_0000_0000; /// Provides 1 TiB of virtual address space for vmap allocations. pub const VMAP_END: u64 = 0x7000_0000_0000; -/// Size of the vmap region in bytes. -pub const VMAP_SIZE: u64 = VMAP_END - VMAP_START; +/// Number of unmapped guard pages appended after each vmap allocation. +const GUARD_PAGES: usize = 1; /// Information about a single vmap allocation. #[derive(Clone, Debug)] struct VmapAllocation { - /// Number of pages in the allocation. - num_pages: usize, - /// Physical addresses of the mapped pages (in order). - phys_addrs: Vec, + /// Physical frames of the mapped pages (in order). + frames: Box<[PhysFrame]>, } -/// A freed VA range available for reuse. -#[derive(Clone, Debug)] -struct FreeRange { - /// Starting virtual address. - start: VirtAddr, - /// Number of pages in this free range. - num_pages: usize, -} - -/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. +/// Inner state for the vmap region allocator. /// -/// This allocator uses a bump allocator with a free list for virtual addresses and HashMap -/// for maintaining bidirectional mappings between physical and virtual addresses. -pub struct VmapRegionAllocator { +/// Uses a bump allocator with a `RangeSet` free list for virtual addresses +/// and HashMap for maintaining bidirectional mappings between physical and virtual addresses. +struct VmapRegionAllocatorInner { /// Next available virtual address for allocation (bump allocator). next_va: VirtAddr, - /// Free list of previously allocated and freed VA ranges. - free_list: Vec, - /// Map from physical address to virtual address. - pa_to_va_map: HashMap, - /// Map from virtual address to physical address. - va_to_pa_map: HashMap, + /// Free set of previously allocated and freed VA ranges (auto-coalescing). + free_set: RangeSet, + /// Map from physical frame to virtual address. + pa_to_va_map: HashMap, VirtAddr>, + /// Map from virtual address to physical frame. + va_to_pa_map: HashMap>, /// Allocation metadata indexed by starting virtual address. - allocations: HashMap, + allocations: HashMap, } -impl VmapRegionAllocator { - /// Creates a new vmap region allocator. - pub fn new() -> Self { +impl VmapRegionAllocatorInner { + /// Creates a new vmap region allocator inner state. + fn new() -> Self { Self { - next_va: VirtAddr::new_truncate(VMAP_START), - free_list: Vec::new(), + next_va: VirtAddr::new(VMAP_START), + free_set: RangeSet::new(), pa_to_va_map: HashMap::new(), va_to_pa_map: HashMap::new(), allocations: HashMap::new(), } } - /// Allocates a contiguous virtual address range for the given number of pages. + /// Allocates a contiguous virtual address range for the given number of pages, + /// plus [`GUARD_PAGES`] unmapped trailing guard pages. + /// + /// The guard pages are reserved in the VA space but never mapped, so an + /// out-of-bounds access past the allocation triggers a page fault. /// /// First tries to find a suitable range in the free list, then falls back to /// bump allocation. @@ -87,43 +81,41 @@ impl VmapRegionAllocator { return None; } - // First, try to find a suitable range in the free list (first-fit) - if let Some(idx) = self.free_list.iter().position(|r| r.num_pages >= num_pages) { - let range = self.free_list.remove(idx); - if range.num_pages > num_pages { - // Split: put remainder back in free list - let remainder_start = VirtAddr::new_truncate( - range.start.as_u64() + (num_pages as u64) * (PAGE_SIZE as u64), - ); - self.free_list.push(FreeRange { - start: remainder_start, - num_pages: range.num_pages - num_pages, - }); + let total_pages = num_pages.checked_add(GUARD_PAGES)?; + let size = (total_pages as u64).checked_mul(PAGE_SIZE as u64)?; + + // Try to find a suitable range in the free set (first-fit) + for range in self.free_set.iter() { + let range_size = range.end - range.start; + if range_size >= size { + let allocated_start = range.start; + // Remove the allocated portion from the free set + self.free_set + .remove(allocated_start..allocated_start + size); + return Some(allocated_start); } - return Some(range.start); } // Fall back to bump allocation - let size = (num_pages as u64).checked_mul(PAGE_SIZE as u64)?; - let end_va = self.next_va.as_u64().checked_add(size)?; + let end_va = self.next_va + size; - if end_va > VMAP_END { + if end_va > VirtAddr::new(VMAP_END) { return None; } let allocated_va = self.next_va; - self.next_va = VirtAddr::new_truncate(end_va); + self.next_va = end_va; Some(allocated_va) } - /// Returns a VA range to the free list for reuse. + /// Returns a VA range to the free set for reuse. fn free_va_range(&mut self, start: VirtAddr, num_pages: usize) { if num_pages == 0 { return; } - // Simple approach: just add to free list - // A more sophisticated implementation could coalesce adjacent ranges - self.free_list.push(FreeRange { start, num_pages }); + let total_pages = num_pages + GUARD_PAGES; + let end = start + (total_pages as u64) * (PAGE_SIZE as u64); + self.free_set.insert(start..end); } /// Atomically allocates VA range, registers mappings, and records allocation. @@ -133,34 +125,30 @@ impl VmapRegionAllocator { /// Returns the allocated base VA on success, or None if: /// - No VA space available /// - Any PA is already mapped (duplicate mapping) - pub fn allocate_and_register(&mut self, phys_addrs: &[PhysAddr]) -> Option { - if phys_addrs.is_empty() { + pub fn allocate_and_register(&mut self, frames: &[PhysFrame]) -> Option { + if frames.is_empty() { return None; } // Check for duplicate PA mappings before allocating - for pa in phys_addrs { - if self.pa_to_va_map.contains_key(&pa.as_u64()) { + for frame in frames { + if self.pa_to_va_map.contains_key(frame) { return None; // PA already mapped } } - // Allocate VA range - let base_va = self.allocate_va_range(phys_addrs.len())?; + let base_va = self.allocate_va_range(frames.len())?; - // Register all mappings - for (i, &pa) in phys_addrs.iter().enumerate() { - let va = VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.insert(pa.as_u64(), va.as_u64()); - self.va_to_pa_map.insert(va.as_u64(), pa.as_u64()); + for (i, &frame) in frames.iter().enumerate() { + let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.insert(frame, va); + self.va_to_pa_map.insert(va, frame); } - // Record allocation metadata self.allocations.insert( - base_va.as_u64(), + base_va, VmapAllocation { - num_pages: phys_addrs.len(), - phys_addrs: phys_addrs.to_vec(), + frames: frames.into(), }, ); @@ -171,15 +159,13 @@ impl VmapRegionAllocator { /// /// Call this if page table mapping fails after `allocate_and_register` succeeds. pub fn rollback_allocation(&mut self, base_va: VirtAddr) { - if let Some(allocation) = self.allocations.remove(&base_va.as_u64()) { - // Remove all individual page mappings - for (i, pa) in allocation.phys_addrs.iter().enumerate() { - let va = VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.remove(&pa.as_u64()); - self.va_to_pa_map.remove(&va.as_u64()); + if let Some(allocation) = self.allocations.remove(&base_va) { + for (i, frame) in allocation.frames.iter().enumerate() { + let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.remove(frame); + self.va_to_pa_map.remove(&va); } - // Return VA range to free list - self.free_va_range(base_va, allocation.num_pages); + self.free_va_range(base_va, allocation.frames.len()); } } @@ -187,101 +173,64 @@ impl VmapRegionAllocator { /// /// Returns the number of pages that were unmapped, or `None` if no allocation was found. pub fn unregister_allocation(&mut self, va_start: VirtAddr) -> Option { - let allocation = self.allocations.remove(&va_start.as_u64())?; + let allocation = self.allocations.remove(&va_start)?; - // Remove all individual page mappings - for (i, pa) in allocation.phys_addrs.iter().enumerate() { - let va = VirtAddr::new_truncate(va_start.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.remove(&pa.as_u64()); - self.va_to_pa_map.remove(&va.as_u64()); + for (i, frame) in allocation.frames.iter().enumerate() { + let va = VirtAddr::new(va_start.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + self.pa_to_va_map.remove(frame); + self.va_to_pa_map.remove(&va); } - // Return VA range to free list for reuse - self.free_va_range(va_start, allocation.num_pages); - - Some(allocation.num_pages) - } - - /// Translates a physical address to its mapped virtual address. - /// - /// Returns `Some(VirtAddr)` if the physical address is mapped in the vmap region, - /// or `None` if not found. - pub fn pa_to_va(&self, pa: PhysAddr) -> Option { - self.pa_to_va_map - .get(&pa.as_u64()) - .map(|&va| VirtAddr::new_truncate(va)) - } + self.free_va_range(va_start, allocation.frames.len()); - /// Translates a virtual address to its corresponding physical address. - /// - /// Returns `Some(PhysAddr)` if the virtual address is in the vmap region and mapped, - /// or `None` if not found. - pub fn va_to_pa(&self, va: VirtAddr) -> Option { - self.va_to_pa_map - .get(&va.as_u64()) - .map(|&pa| PhysAddr::new_truncate(pa)) + Some(allocation.frames.len()) } /// Checks if a virtual address is within the vmap region. - pub fn is_vmap_address(va: VirtAddr) -> bool { + fn is_vmap_address(va: VirtAddr) -> bool { (VMAP_START..VMAP_END).contains(&va.as_u64()) } } -impl Default for VmapRegionAllocator { - fn default() -> Self { - Self::new() - } +/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. +pub struct VmapRegionAllocator { + inner: SpinMutex, } -/// Global vmap region allocator instance. -static VMAP_ALLOCATOR: SpinMutex> = SpinMutex::new(None); - -/// Initializes the global vmap allocator if not already initialized. -fn ensure_initialized() -> spin::mutex::SpinMutexGuard<'static, Option> { - let mut guard = VMAP_ALLOCATOR.lock(); - if guard.is_none() { - *guard = Some(VmapRegionAllocator::new()); +impl VmapRegionAllocator { + fn new() -> Self { + Self { + inner: SpinMutex::new(VmapRegionAllocatorInner::new()), + } } - guard -} -/// Atomically allocates VA range and registers all mappings. -/// -/// Returns the base VA on success, or None if allocation fails or any PA is already mapped. -pub fn allocate_and_register_vmap(phys_addrs: &[PhysAddr]) -> Option { - let mut guard = ensure_initialized(); - guard - .as_mut() - .and_then(|alloc| alloc.allocate_and_register(phys_addrs)) -} + /// Atomically allocates VA range and registers all mappings. + /// + /// Returns the base VA on success, or None if allocation fails or any PA is already mapped. + pub fn allocate_and_register(&self, frames: &[PhysFrame]) -> Option { + self.inner.lock().allocate_and_register(frames) + } -/// Rolls back a vmap allocation after page table mapping failure. -pub fn rollback_vmap_allocation(base_va: VirtAddr) { - let mut guard = ensure_initialized(); - if let Some(alloc) = guard.as_mut() { - alloc.rollback_allocation(base_va); + /// Rolls back a failed allocation by removing mappings and freeing VA range. + pub fn rollback_allocation(&self, base_va: VirtAddr) { + self.inner.lock().rollback_allocation(base_va); } -} -/// Unregisters an allocation from the global vmap allocator. -pub fn unregister_vmap_allocation(va_start: VirtAddr) -> Option { - let mut guard = ensure_initialized(); - guard - .as_mut() - .and_then(|alloc| alloc.unregister_allocation(va_start)) -} + /// Unregisters all mappings for an allocation starting at the given virtual address. + pub fn unregister_allocation(&self, va_start: VirtAddr) -> Option { + self.inner.lock().unregister_allocation(va_start) + } -/// Translates a physical address to virtual address in the vmap region. -pub fn vmap_pa_to_va(pa: PhysAddr) -> Option { - let guard = VMAP_ALLOCATOR.lock(); - guard.as_ref().and_then(|alloc| alloc.pa_to_va(pa)) + /// Checks if a virtual address is within the vmap region. + pub fn is_vmap_address(va: VirtAddr) -> bool { + VmapRegionAllocatorInner::is_vmap_address(va) + } } -/// Translates a virtual address to physical address in the vmap region. -pub fn vmap_va_to_pa(va: VirtAddr) -> Option { - let guard = VMAP_ALLOCATOR.lock(); - guard.as_ref().and_then(|alloc| alloc.va_to_pa(va)) +/// Returns a reference to the global vmap region allocator. +pub fn vmap_allocator() -> &'static VmapRegionAllocator { + static ALLOCATOR: Once = Once::new(); + ALLOCATOR.call_once(VmapRegionAllocator::new) } /// Checks if a virtual address is within the vmap region. @@ -289,81 +238,46 @@ pub fn is_vmap_address(va: VirtAddr) -> bool { VmapRegionAllocator::is_vmap_address(va) } -/// Extended memory provider trait that adds vmap-aware address translation. -/// -/// This trait extends the base `MemoryProvider` with methods to handle -/// both direct-mapped and vmap regions. -pub trait VmapAwareMemoryProvider: MemoryProvider { - /// Translates a virtual address to physical address, checking both - /// direct-mapped and vmap regions. - fn va_to_pa_vmap_aware(va: VirtAddr) -> Option { - if is_vmap_address(va) { - vmap_va_to_pa(va) - } else { - // Use the direct mapping translation from MemoryProvider - Some(Self::va_to_pa(va)) - } - } - - /// Translates a physical address to virtual address. - /// - /// First checks if the PA is mapped in the vmap region, otherwise - /// falls back to direct mapping. - fn pa_to_va_vmap_aware(pa: PhysAddr) -> VirtAddr { - // First check if it's in the vmap region - if let Some(va) = vmap_pa_to_va(pa) { - return va; - } - // Fall back to direct mapping - Self::pa_to_va(pa) - } -} - -// Blanket implementation for all types that implement MemoryProvider -impl VmapAwareMemoryProvider for T {} - #[cfg(test)] mod tests { use super::*; - - #[test] - fn test_vmap_region_bounds() { - const { assert!(VMAP_START < VMAP_END) }; - assert_eq!(VMAP_SIZE, VMAP_END - VMAP_START); - } + use x86_64::PhysAddr; #[test] fn test_is_vmap_address() { - assert!(VmapRegionAllocator::is_vmap_address( - VirtAddr::new_truncate(VMAP_START) - )); - assert!(VmapRegionAllocator::is_vmap_address( - VirtAddr::new_truncate(VMAP_START + 0x1000) - )); - assert!(VmapRegionAllocator::is_vmap_address( - VirtAddr::new_truncate(VMAP_END - 1) - )); - assert!(!VmapRegionAllocator::is_vmap_address( - VirtAddr::new_truncate(VMAP_START - 1) - )); - assert!(!VmapRegionAllocator::is_vmap_address( - VirtAddr::new_truncate(VMAP_END) - )); + assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( + VMAP_START + ))); + assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( + VMAP_START + 0x1000 + ))); + assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( + VMAP_END - 1 + ))); + assert!(!VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( + VMAP_START - 1 + ))); + assert!(!VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( + VMAP_END + ))); } #[test] fn test_allocate_va_range() { - let mut allocator = VmapRegionAllocator::new(); + let mut allocator = VmapRegionAllocatorInner::new(); - // Allocate first range + // Allocate first range (1 data page + 1 guard page = 2 pages consumed) let va1 = allocator.allocate_va_range(1); assert!(va1.is_some()); assert_eq!(va1.unwrap().as_u64(), VMAP_START); - // Allocate second range + // Second allocation starts after data + guard pages let va2 = allocator.allocate_va_range(2); assert!(va2.is_some()); - assert_eq!(va2.unwrap().as_u64(), VMAP_START + PAGE_SIZE as u64); + assert_eq!( + va2.unwrap().as_u64(), + VMAP_START + (1 + GUARD_PAGES as u64) * PAGE_SIZE as u64 + ); // Zero pages should return None let va3 = allocator.allocate_va_range(0); @@ -372,97 +286,116 @@ mod tests { #[test] fn test_va_range_reuse() { - let mut allocator = VmapRegionAllocator::new(); + let mut allocator = VmapRegionAllocatorInner::new(); - // Allocate and free a range + // Allocate and free a 2-page range (consumes 2 + guard pages) let va1 = allocator.allocate_va_range(2).unwrap(); allocator.free_va_range(va1, 2); - // Next allocation should reuse the freed range + // Next allocation of same size should reuse the freed range let va2 = allocator.allocate_va_range(2).unwrap(); assert_eq!(va1, va2); - // Allocate smaller than freed - should split + // Free the 3-page slot (2 data + 1 guard), then allocate 1 page (needs 1+1=2 pages). + // The remaining 1 page in the 3-page slot is not enough for another 1+1 allocation. allocator.free_va_range(va2, 2); let va3 = allocator.allocate_va_range(1).unwrap(); assert_eq!(va3, va1); - - // Remainder should be in free list - let va4 = allocator.allocate_va_range(1).unwrap(); - assert_eq!(va4.as_u64(), va1.as_u64() + PAGE_SIZE as u64); } #[test] fn test_allocate_and_register() { - let mut allocator = VmapRegionAllocator::new(); + let mut allocator = VmapRegionAllocatorInner::new(); - let phys_addrs = alloc::vec![ - PhysAddr::new(0x1000), - PhysAddr::new(0x3000), - PhysAddr::new(0x5000), + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x3000)), + PhysFrame::::containing_address(PhysAddr::new(0x5000)), ]; // Allocate and register - let base_va = allocator.allocate_and_register(&phys_addrs); + let base_va = allocator.allocate_and_register(&frames); assert!(base_va.is_some()); let base_va = base_va.unwrap(); + assert_eq!(base_va.as_u64(), VMAP_START); - // Verify mappings exist - for (i, &pa) in phys_addrs.iter().enumerate() { - let expected_va = - VirtAddr::new_truncate(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - assert_eq!(allocator.pa_to_va(pa), Some(expected_va)); - assert_eq!(allocator.va_to_pa(expected_va), Some(pa)); - } - - // Duplicate PA should fail - let duplicate = allocator.allocate_and_register(&[PhysAddr::new(0x1000)]); + // Duplicate PA should fail (proves mappings were recorded) + let duplicate = allocator + .allocate_and_register(&[PhysFrame::containing_address(PhysAddr::new(0x1000))]); assert!(duplicate.is_none()); + + // Empty input should return None + assert!(allocator.allocate_and_register(&[]).is_none()); } #[test] fn test_rollback_allocation() { - let mut allocator = VmapRegionAllocator::new(); + let mut allocator = VmapRegionAllocatorInner::new(); - let phys_addrs = alloc::vec![PhysAddr::new(0x1000), PhysAddr::new(0x2000)]; + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x2000)), + ]; - let base_va = allocator.allocate_and_register(&phys_addrs).unwrap(); + let base_va = allocator.allocate_and_register(&frames).unwrap(); // Rollback allocator.rollback_allocation(base_va); - // Mappings should be gone - assert_eq!(allocator.pa_to_va(PhysAddr::new(0x1000)), None); - assert_eq!(allocator.pa_to_va(PhysAddr::new(0x2000)), None); - - // VA should be reusable - let new_va = allocator.allocate_va_range(2).unwrap(); + // Mappings should be gone — re-registering the same PAs must succeed + let new_va = allocator.allocate_and_register(&frames).unwrap(); assert_eq!(new_va, base_va); } #[test] fn test_unregister_allocation() { - let mut allocator = VmapRegionAllocator::new(); + let mut allocator = VmapRegionAllocatorInner::new(); - let phys_addrs = alloc::vec![ - PhysAddr::new(0x1000), - PhysAddr::new(0x3000), - PhysAddr::new(0x5000), + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x3000)), + PhysFrame::::containing_address(PhysAddr::new(0x5000)), ]; - let base_va = allocator.allocate_and_register(&phys_addrs).unwrap(); + let base_va = allocator.allocate_and_register(&frames).unwrap(); // Unregister let num_pages = allocator.unregister_allocation(base_va); assert_eq!(num_pages, Some(3)); - // Verify mappings are gone - for pa in &phys_addrs { - assert_eq!(allocator.pa_to_va(*pa), None); - } - - // VA should be reusable - let new_va = allocator.allocate_va_range(3).unwrap(); + // Mappings should be gone — re-registering the same PAs must succeed + // and reuse the freed VA range + let new_va = allocator.allocate_and_register(&frames).unwrap(); assert_eq!(new_va, base_va); + + // Unregistering an unknown VA returns None + assert_eq!( + allocator.unregister_allocation(VirtAddr::new(VMAP_END - 0x1000)), + None + ); + } + + #[test] + fn test_guard_page_gap() { + let mut allocator = VmapRegionAllocatorInner::new(); + + let frames_a = alloc::vec![PhysFrame::::containing_address(PhysAddr::new( + 0x1000 + )),]; + let frames_b = alloc::vec![PhysFrame::::containing_address(PhysAddr::new( + 0x2000 + )),]; + + let va_a = allocator.allocate_and_register(&frames_a).unwrap(); + let va_b = allocator.allocate_and_register(&frames_b).unwrap(); + + // Allocations should be separated by at least GUARD_PAGES unmapped pages + let gap_pages = (va_b.as_u64() - va_a.as_u64()) / PAGE_SIZE as u64; + assert!( + gap_pages >= (1 + GUARD_PAGES as u64), + "expected at least {} pages between allocations, got {}", + 1 + GUARD_PAGES, + gap_pages + ); } } From 175367b088c84d89832910dc3ff216042724fe20 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 18 Feb 2026 03:08:25 +0000 Subject: [PATCH 3/3] refactoring --- litebox_platform_lvbs/src/lib.rs | 24 +++-- litebox_platform_lvbs/src/mm/vmap.rs | 135 ++++++++++----------------- 2 files changed, 66 insertions(+), 93 deletions(-) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 2752ed108..285f27ccc 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -1290,19 +1290,25 @@ impl VmapManager for LinuxKernel unimplemented!("ALIGN other than 4KiB is not supported yet"); } - // Unmap VTL0 pages (doesn't deallocate physical frames — they belong to VTL0). - self.unmap_vtl0_pages(vmap_info.base, vmap_info.size) - .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))?; - let base_va = x86_64::VirtAddr::new(vmap_info.base as u64); - // If this is a vmap region (for non-contiguous physical pages), unregister from the vmap allocator first. - if crate::mm::vmap::is_vmap_address(base_va) { + // Perform both cleanup steps unconditionally so that a failure in one + // does not leave the other in an inconsistent state. + let unmap_result = self + .unmap_vtl0_pages(vmap_info.base, vmap_info.size) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)); + + let unregister_result = if crate::mm::vmap::is_vmap_address(base_va) { crate::mm::vmap::vmap_allocator() .unregister_allocation(base_va) - .ok_or(PhysPointerError::Unmapped(vmap_info.base as usize))?; - } - Ok(()) + .ok_or(PhysPointerError::Unmapped(vmap_info.base as usize)) + .map(|_| ()) + } else { + Ok(()) + }; + + // Report the first error, if any. + unmap_result.and(unregister_result) } fn validate_unowned(&self, pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { diff --git a/litebox_platform_lvbs/src/mm/vmap.rs b/litebox_platform_lvbs/src/mm/vmap.rs index abbd04d04..510fc7693 100644 --- a/litebox_platform_lvbs/src/mm/vmap.rs +++ b/litebox_platform_lvbs/src/mm/vmap.rs @@ -20,11 +20,11 @@ use crate::mshv::vtl1_mem_layout::PAGE_SIZE; /// Start of the vmap virtual address region. /// This address is chosen to be within the 4-level paging canonical address space /// and not conflict with VTL1's direct-mapped physical memory. -pub const VMAP_START: u64 = 0x6000_0000_0000; +const VMAP_START: u64 = 0x6000_0000_0000; /// End of the vmap virtual address region. /// Provides 1 TiB of virtual address space for vmap allocations. -pub const VMAP_END: u64 = 0x7000_0000_0000; +const VMAP_END: u64 = 0x7000_0000_0000; /// Number of unmapped guard pages appended after each vmap allocation. const GUARD_PAGES: usize = 1; @@ -76,7 +76,7 @@ impl VmapRegionAllocatorInner { /// /// Returns `Some(VirtAddr)` with the starting virtual address on success, /// or `None` if insufficient virtual address space is available. - pub fn allocate_va_range(&mut self, num_pages: usize) -> Option { + fn allocate_va_range(&mut self, num_pages: usize) -> Option { if num_pages == 0 { return None; } @@ -117,35 +117,55 @@ impl VmapRegionAllocatorInner { let end = start + (total_pages as u64) * (PAGE_SIZE as u64); self.free_set.insert(start..end); } +} + +/// Checks if a virtual address is within the vmap region. +pub fn is_vmap_address(va: VirtAddr) -> bool { + (VMAP_START..VMAP_END).contains(&va.as_u64()) +} + +/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. +pub struct VmapRegionAllocator { + inner: SpinMutex, +} + +impl VmapRegionAllocator { + fn new() -> Self { + Self { + inner: SpinMutex::new(VmapRegionAllocatorInner::new()), + } + } /// Atomically allocates VA range, registers mappings, and records allocation. /// /// This ensures consistency - either the entire operation succeeds or nothing changes. /// - /// Returns the allocated base VA on success, or None if: + /// Returns the base VA on success, or None if: /// - No VA space available /// - Any PA is already mapped (duplicate mapping) - pub fn allocate_and_register(&mut self, frames: &[PhysFrame]) -> Option { + pub fn allocate_and_register(&self, frames: &[PhysFrame]) -> Option { if frames.is_empty() { return None; } + let mut inner = self.inner.lock(); + // Check for duplicate PA mappings before allocating for frame in frames { - if self.pa_to_va_map.contains_key(frame) { + if inner.pa_to_va_map.contains_key(frame) { return None; // PA already mapped } } - let base_va = self.allocate_va_range(frames.len())?; + let base_va = inner.allocate_va_range(frames.len())?; for (i, &frame) in frames.iter().enumerate() { let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.insert(frame, va); - self.va_to_pa_map.insert(va, frame); + inner.pa_to_va_map.insert(frame, va); + inner.va_to_pa_map.insert(va, frame); } - self.allocations.insert( + inner.allocations.insert( base_va, VmapAllocation { frames: frames.into(), @@ -158,73 +178,35 @@ impl VmapRegionAllocatorInner { /// Rolls back a failed allocation by removing mappings and freeing VA range. /// /// Call this if page table mapping fails after `allocate_and_register` succeeds. - pub fn rollback_allocation(&mut self, base_va: VirtAddr) { - if let Some(allocation) = self.allocations.remove(&base_va) { + pub fn rollback_allocation(&self, base_va: VirtAddr) { + let mut inner = self.inner.lock(); + if let Some(allocation) = inner.allocations.remove(&base_va) { for (i, frame) in allocation.frames.iter().enumerate() { let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.remove(frame); - self.va_to_pa_map.remove(&va); + inner.pa_to_va_map.remove(frame); + inner.va_to_pa_map.remove(&va); } - self.free_va_range(base_va, allocation.frames.len()); + inner.free_va_range(base_va, allocation.frames.len()); } } /// Unregisters all mappings for an allocation starting at the given virtual address. /// /// Returns the number of pages that were unmapped, or `None` if no allocation was found. - pub fn unregister_allocation(&mut self, va_start: VirtAddr) -> Option { - let allocation = self.allocations.remove(&va_start)?; + pub fn unregister_allocation(&self, va_start: VirtAddr) -> Option { + let mut inner = self.inner.lock(); + let allocation = inner.allocations.remove(&va_start)?; for (i, frame) in allocation.frames.iter().enumerate() { let va = VirtAddr::new(va_start.as_u64() + (i as u64) * (PAGE_SIZE as u64)); - self.pa_to_va_map.remove(frame); - self.va_to_pa_map.remove(&va); + inner.pa_to_va_map.remove(frame); + inner.va_to_pa_map.remove(&va); } - self.free_va_range(va_start, allocation.frames.len()); + inner.free_va_range(va_start, allocation.frames.len()); Some(allocation.frames.len()) } - - /// Checks if a virtual address is within the vmap region. - fn is_vmap_address(va: VirtAddr) -> bool { - (VMAP_START..VMAP_END).contains(&va.as_u64()) - } -} - -/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. -pub struct VmapRegionAllocator { - inner: SpinMutex, -} - -impl VmapRegionAllocator { - fn new() -> Self { - Self { - inner: SpinMutex::new(VmapRegionAllocatorInner::new()), - } - } - - /// Atomically allocates VA range and registers all mappings. - /// - /// Returns the base VA on success, or None if allocation fails or any PA is already mapped. - pub fn allocate_and_register(&self, frames: &[PhysFrame]) -> Option { - self.inner.lock().allocate_and_register(frames) - } - - /// Rolls back a failed allocation by removing mappings and freeing VA range. - pub fn rollback_allocation(&self, base_va: VirtAddr) { - self.inner.lock().rollback_allocation(base_va); - } - - /// Unregisters all mappings for an allocation starting at the given virtual address. - pub fn unregister_allocation(&self, va_start: VirtAddr) -> Option { - self.inner.lock().unregister_allocation(va_start) - } - - /// Checks if a virtual address is within the vmap region. - pub fn is_vmap_address(va: VirtAddr) -> bool { - VmapRegionAllocatorInner::is_vmap_address(va) - } } /// Returns a reference to the global vmap region allocator. @@ -233,11 +215,6 @@ pub fn vmap_allocator() -> &'static VmapRegionAllocator { ALLOCATOR.call_once(VmapRegionAllocator::new) } -/// Checks if a virtual address is within the vmap region. -pub fn is_vmap_address(va: VirtAddr) -> bool { - VmapRegionAllocator::is_vmap_address(va) -} - #[cfg(test)] mod tests { use super::*; @@ -245,21 +222,11 @@ mod tests { #[test] fn test_is_vmap_address() { - assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( - VMAP_START - ))); - assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( - VMAP_START + 0x1000 - ))); - assert!(VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( - VMAP_END - 1 - ))); - assert!(!VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( - VMAP_START - 1 - ))); - assert!(!VmapRegionAllocatorInner::is_vmap_address(VirtAddr::new( - VMAP_END - ))); + assert!(is_vmap_address(VirtAddr::new(VMAP_START))); + assert!(is_vmap_address(VirtAddr::new(VMAP_START + 0x1000))); + assert!(is_vmap_address(VirtAddr::new(VMAP_END - 1))); + assert!(!is_vmap_address(VirtAddr::new(VMAP_START - 1))); + assert!(!is_vmap_address(VirtAddr::new(VMAP_END))); } #[test] @@ -305,7 +272,7 @@ mod tests { #[test] fn test_allocate_and_register() { - let mut allocator = VmapRegionAllocatorInner::new(); + let allocator = VmapRegionAllocator::new(); let frames = alloc::vec![ PhysFrame::::containing_address(PhysAddr::new(0x1000)), @@ -330,7 +297,7 @@ mod tests { #[test] fn test_rollback_allocation() { - let mut allocator = VmapRegionAllocatorInner::new(); + let allocator = VmapRegionAllocator::new(); let frames = alloc::vec![ PhysFrame::::containing_address(PhysAddr::new(0x1000)), @@ -349,7 +316,7 @@ mod tests { #[test] fn test_unregister_allocation() { - let mut allocator = VmapRegionAllocatorInner::new(); + let allocator = VmapRegionAllocator::new(); let frames = alloc::vec![ PhysFrame::::containing_address(PhysAddr::new(0x1000)), @@ -377,7 +344,7 @@ mod tests { #[test] fn test_guard_page_gap() { - let mut allocator = VmapRegionAllocatorInner::new(); + let allocator = VmapRegionAllocator::new(); let frames_a = alloc::vec![PhysFrame::::containing_address(PhysAddr::new( 0x1000