Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion dev_tests/src/ratchet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ fn ratchet_globals() -> Result<()> {
("litebox/", 9),
("litebox_platform_linux_kernel/", 6),
("litebox_platform_linux_userland/", 5),
("litebox_platform_lvbs/", 20),
("litebox_platform_lvbs/", 21),
("litebox_platform_multiplex/", 1),
("litebox_platform_windows_userland/", 7),
("litebox_runner_linux_userland/", 1),
Expand Down
4 changes: 2 additions & 2 deletions litebox_common_linux/src/vmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,12 +166,12 @@ pub enum PhysPointerError {
NoMappingInfo,
#[error("Overflow occurred during calculation")]
Overflow,
#[error("Non-contiguous physical pages in the array")]
NonContiguousPages,
#[error("The operation is unsupported on this platform")]
UnsupportedOperation,
#[error("Unsupported permissions: {0:#x}")]
UnsupportedPermissions(u8),
#[error("Memory copy failed")]
CopyFailed,
#[error("Duplicate physical page address {0:#x} in the input array")]
DuplicatePhysicalAddress(usize),
}
126 changes: 125 additions & 1 deletion litebox_platform_lvbs/src/arch/x86/mm/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use x86_64::{
PageTableFlags, PhysFrame, Size4KiB, Translate,
frame::PhysFrameRange,
mapper::{
FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult,
CleanUp, FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult,
UnmapError as X64UnmapError,
},
},
Expand Down Expand Up @@ -365,6 +365,130 @@ impl<M: MemoryProvider, const ALIGN: usize> X64PageTable<'_, M, ALIGN> {
Ok(M::pa_to_va(frame_range.start.start_address()).as_mut_ptr())
}

/// Map non-contiguous physical frames to virtually contiguous addresses.
///
/// This function maps each physical frame in `frames` to consecutive virtual addresses
/// starting from `base_va`. Unlike `map_phys_frame_range`, this allows mapping
/// non-contiguous physical pages to a contiguous virtual address range.
///
/// # Arguments
/// - `frames` - Slice of physical frames to map (non-contiguous, no duplicate)
/// - `base_va` - Starting virtual address for the mapping
/// - `flags` - Page table flags to apply to all mappings
///
/// # Returns
/// - `Ok(*mut u8)` — pointer to the start of the mapped virtual range
/// - `Err(MapToError::PageAlreadyMapped)` if any VA is already mapped
/// - `Err(MapToError::FrameAllocationFailed)` if page table allocation fails
///
/// # Behavior
/// - Any existing mapping is treated as an error (shared mappings not supported)
/// - On error, all pages mapped by this call are unmapped (atomic)
pub(crate) fn map_non_contiguous_phys_frames(
&self,
frames: &[PhysFrame<Size4KiB>],
base_va: VirtAddr,
flags: PageTableFlags,
) -> Result<*mut u8, MapToError<Size4KiB>> {
let mut allocator = PageTableAllocator::<M>::new();
let mut mapped_count: usize = 0;

let mut inner = self.inner.lock();

if !base_va.is_aligned(Size4KiB::SIZE) {
return Err(MapToError::FrameAllocationFailed);
}

// Quick pre-scan: check all target VAs for existing mappings before
// modifying any page table entries. This avoids expensive rollback when
// an overlap is detected partway through.
for i in 0..frames.len() {
let va = base_va + (i as u64) * Size4KiB::SIZE;
match inner.translate(va) {
TranslateResult::Mapped { frame, .. } => {
let existing_frame =
PhysFrame::<Size4KiB>::containing_address(frame.start_address());
return Err(MapToError::PageAlreadyMapped(existing_frame));
}
TranslateResult::NotMapped => {}
TranslateResult::InvalidFrameAddress(_) => {
return Err(MapToError::FrameAllocationFailed);
}
}
}

// All VAs verified as unmapped — proceed with mapping.
let table_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
for (i, &target_frame) in frames.iter().enumerate() {
let va = base_va + (i as u64) * Size4KiB::SIZE;
let page: Page<Size4KiB> = Page::containing_address(va);

// Note: Since we lock the entire page table for the duration of this function (`self.inner.lock()`),
// there should be no concurrent modifications to the page table. If we allow concurrent mappings
// in the future, we should re-check the VA here before mapping and return an error
// if it is no longer unmapped.

match unsafe {
inner.map_to_with_table_flags(
page,
target_frame,
flags,
table_flags,
&mut allocator,
)
} {
Ok(fl) => {
mapped_count += 1;
if FLUSH_TLB {
fl.flush();
}
}
Err(e) => {
Self::rollback_mapped_pages(&mut inner, base_va, mapped_count, &mut allocator);
return Err(e);
}
}
}

Ok(base_va.as_mut_ptr())
}

/// Rollback helper: unmap the first `count` pages starting from `base_va`
/// and free any intermediate page-table frames (P1/P2/P3) that became empty.
///
/// Note: The caller must already hold the page table lock (`self.inner`).
/// This function accepts the locked `MappedPageTable` directly.
fn rollback_mapped_pages(
inner: &mut MappedPageTable<'_, FrameMapping<M>>,
base_va: VirtAddr,
count: usize,
allocator: &mut PageTableAllocator<M>,
) {
for i in 0..count {
let va = base_va + (i as u64) * Size4KiB::SIZE;
let page: Page<Size4KiB> = Page::containing_address(va);
if let Ok((_frame, fl)) = inner.unmap(page)
&& FLUSH_TLB
{
fl.flush();
}
}

// Free any intermediate page-table frames (P1/P2/P3) that are now
// empty after unmapping.
if count > 0 {
let start = Page::<Size4KiB>::containing_address(base_va);
let end = Page::<Size4KiB>::containing_address(
base_va + ((count - 1) as u64) * Size4KiB::SIZE,
);
// Safety: the vmap VA range is used exclusively by this page table
// and all leaf entries have just been unmapped above.
unsafe {
inner.clean_up_addr_range(Page::range_inclusive(start, end), allocator);
}
}
}

/// This function creates a new empty top-level page table.
pub(crate) unsafe fn new_top_level() -> Self {
let frame = PageTableAllocator::<M>::allocate_frame(true)
Expand Down
172 changes: 110 additions & 62 deletions litebox_platform_lvbs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ use x86_64::{
};
use zerocopy::{FromBytes, IntoBytes};

#[cfg(feature = "optee_syscall")]
use crate::mm::vmap::vmap_allocator;

extern crate alloc;

pub mod arch;
Expand Down Expand Up @@ -1169,23 +1172,21 @@ impl<Host: HostInterface> litebox::platform::SystemInfoProvider for LinuxKernel<
}
}

/// Checks whether the given physical addresses are contiguous with respect to ALIGN.
///
/// Note: This is a temporary check to let `VmapManager` work with this platform
/// which does not yet support virtually contiguous mapping of non-contiguous physical pages
/// (for now, it maps physical pages with a fixed offset).
#[cfg(feature = "optee_syscall")]
fn check_contiguity<const ALIGN: usize>(
addrs: &[PhysPageAddr<ALIGN>],
) -> Result<(), PhysPointerError> {
/// Checks whether the given physical addresses are contiguous with respect to ALIGN.
fn is_contiguous<const ALIGN: usize>(addrs: &[PhysPageAddr<ALIGN>]) -> bool {
for window in addrs.windows(2) {
let first = window[0].as_usize();
let second = window[1].as_usize();
if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? {
return Err(PhysPointerError::NonContiguousPages);
if let Some(expected) = first.checked_add(ALIGN) {
if second != expected {
return false;
}
} else {
return false;
}
}
Ok(())
true
}

#[cfg(feature = "optee_syscall")]
Expand All @@ -1195,72 +1196,119 @@ impl<Host: HostInterface, const ALIGN: usize> VmapManager<ALIGN> for LinuxKernel
pages: &PhysPageAddrArray<ALIGN>,
perms: PhysPageMapPermissions,
) -> Result<PhysPageMapInfo<ALIGN>, PhysPointerError> {
// TODO: Remove this check once this platform supports virtually contiguous
// non-contiguous physical page mapping.
check_contiguity(pages)?;

if pages.is_empty() {
return Err(PhysPointerError::InvalidPhysicalAddress(0));
}
let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64);
let phys_end = x86_64::PhysAddr::new(
pages
.last()
.unwrap()
.as_usize()
.checked_add(ALIGN)
.ok_or(PhysPointerError::Overflow)? as u64,
);
let frame_range = if ALIGN == PAGE_SIZE {
PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(phys_start),
PhysFrame::<Size4KiB>::containing_address(phys_end),
)
} else {
unimplemented!("ALIGN other than 4KiB is not supported yet")
};

// Reject duplicate page addresses — shared mappings are not supported.
{
let mut seen = hashbrown::HashSet::with_capacity(pages.len());
for page in pages {
if !seen.insert(page.as_usize()) {
return Err(PhysPointerError::DuplicatePhysicalAddress(page.as_usize()));
}
}
}

if ALIGN != PAGE_SIZE {
unimplemented!("ALIGN other than 4KiB is not supported yet");
}

let mut flags = PageTableFlags::PRESENT;
if perms.contains(PhysPageMapPermissions::WRITE) {
flags |= PageTableFlags::WRITABLE;
}

if let Ok(page_addr) = self
.page_table_manager
.current_page_table()
.map_phys_frame_range(frame_range, flags)
{
Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
})
// If pages are contiguous, use `map_phys_frame_range` which is efficient and doesn't require vmap VA space.
if is_contiguous(pages) {
let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64);
let phys_end = x86_64::PhysAddr::new(
pages
.last()
.unwrap()
.as_usize()
.checked_add(ALIGN)
.ok_or(PhysPointerError::Overflow)? as u64,
);
let frame_range = PhysFrame::range(
PhysFrame::<Size4KiB>::containing_address(phys_start),
PhysFrame::<Size4KiB>::containing_address(phys_end),
);

match self
.page_table_manager
.current_page_table()
.map_phys_frame_range(frame_range, flags)
{
Ok(page_addr) => Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
}),
Err(MapToError::PageAlreadyMapped(_)) => {
Err(PhysPointerError::AlreadyMapped(pages[0].as_usize()))
}
Err(_) => Err(PhysPointerError::InvalidPhysicalAddress(
pages[0].as_usize(),
)),
}
} else {
Err(PhysPointerError::InvalidPhysicalAddress(
pages[0].as_usize(),
))
let frames: alloc::vec::Vec<PhysFrame<Size4KiB>> = pages
.iter()
.map(|p| PhysFrame::containing_address(x86_64::PhysAddr::new(p.as_usize() as u64)))
.collect();

let base_va = vmap_allocator()
.allocate_and_register(&frames)
.ok_or(PhysPointerError::AlreadyMapped(pages[0].as_usize()))?;

match self
.page_table_manager
.current_page_table()
.map_non_contiguous_phys_frames(&frames, base_va, flags)
{
Ok(page_addr) => Ok(PhysPageMapInfo {
base: page_addr,
size: pages.len() * ALIGN,
}),
Err(e) => {
vmap_allocator().rollback_allocation(base_va);
match e {
MapToError::PageAlreadyMapped(_) => {
Err(PhysPointerError::AlreadyMapped(pages[0].as_usize()))
}
_ => Err(PhysPointerError::InvalidPhysicalAddress(
pages[0].as_usize(),
)),
}
}
}
}
}

unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo<ALIGN>) -> Result<(), PhysPointerError> {
if ALIGN == PAGE_SIZE {
let Some(page_range) = PageRange::<PAGE_SIZE>::new(
vmap_info.base as usize,
vmap_info.base.wrapping_add(vmap_info.size) as usize,
) else {
return Err(PhysPointerError::UnalignedPhysicalAddress(
vmap_info.base as usize,
ALIGN,
));
};
unsafe {
self.page_table_manager
.current_page_table()
.unmap_pages(page_range, false, true)
.map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize))
}
} else {
unimplemented!("ALIGN other than 4KiB is not supported yet")
if ALIGN != PAGE_SIZE {
unimplemented!("ALIGN other than 4KiB is not supported yet");
}

let base_va = x86_64::VirtAddr::new(vmap_info.base as u64);

// Perform both cleanup steps unconditionally so that a failure in one
// does not leave the other in an inconsistent state.
let unmap_result = self
.unmap_vtl0_pages(vmap_info.base, vmap_info.size)
.map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize));

let unregister_result = if crate::mm::vmap::is_vmap_address(base_va) {
crate::mm::vmap::vmap_allocator()
.unregister_allocation(base_va)
.ok_or(PhysPointerError::Unmapped(vmap_info.base as usize))
.map(|_| ())
} else {
Ok(())
};

// Report the first error, if any.
unmap_result.and(unregister_result)
}

fn validate_unowned(&self, pages: &PhysPageAddrArray<ALIGN>) -> Result<(), PhysPointerError> {
Expand Down
1 change: 1 addition & 0 deletions litebox_platform_lvbs/src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
use crate::arch::{PhysAddr, VirtAddr};

pub(crate) mod pgtable;
pub(crate) mod vmap;

#[cfg(test)]
pub mod tests;
Expand Down
Loading