Skip to content
This repository was archived by the owner on Nov 26, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions modules/axhal/src/arch/x86_64/trap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ fn vec_to_str(vec: u64) -> &'static str {
fn err_code_to_flags(err_code: u64) -> Result<MappingFlags, u64> {
let code = PageFaultErrorCode::from_bits_truncate(err_code);
let reserved_bits = (PageFaultErrorCode::CAUSED_BY_WRITE
| PageFaultErrorCode::PROTECTION_VIOLATION
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mingzi47 Sorry to bother you, I was wondering if this change is necessary? Because I'm porting these to axcpu.

| PageFaultErrorCode::USER_MODE
| PageFaultErrorCode::INSTRUCTION_FETCH)
.complement();
Expand Down
140 changes: 134 additions & 6 deletions modules/axmm/src/aspace.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
use core::fmt;

use alloc::sync::Arc;
use axerrno::{AxError, AxResult, ax_err};
use axhal::mem::phys_to_virt;
use axhal::paging::{MappingFlags, PageTable, PagingError};
use memory_addr::{
MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
};
use memory_set::{MemoryArea, MemorySet};
use page_table_multiarch::PageSize;

use crate::backend::Backend;
use crate::backend::{Backend, alloc_frame};
use crate::mapping_err_to_ax_err;

/// The virtual memory address space.
Expand Down Expand Up @@ -169,7 +171,7 @@ impl AddrSpace {

while let Some(area) = self.areas.find(start) {
let backend = area.backend();
if let Backend::Alloc { populate } = backend {
if let Backend::Alloc { populate, .. } = backend {
if !*populate {
for addr in PageIter4K::new(start, area.end().min(end)).unwrap() {
match self.pt.query(addr) {
Expand Down Expand Up @@ -373,9 +375,20 @@ impl AddrSpace {
if let Some(area) = self.areas.find(vaddr) {
let orig_flags = area.flags();
if orig_flags.contains(access_flags) {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) {
// TODO: skip Shared
if !access_flags.contains(MappingFlags::WRITE) {
return false;
}
// 1. page fault caused by write
// 2. pte exists
// 3. Not shared memory
return self.handle_page_fault_cow(vaddr, paddr, page_size);
} else {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
}
}
}
false
Expand All @@ -386,7 +399,11 @@ impl AddrSpace {
let mut new_aspace = Self::new_empty(self.base(), self.size())?;

for area in self.areas.iter() {
let backend = area.backend();
let backend = match area.backend() {
Backend::Alloc { populate, .. } => Backend::new_alloc(*populate),
Backend::Linear { .. } => area.backend().clone(),
};

// Remap the memory area in the new address space.
let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
Expand Down Expand Up @@ -433,6 +450,116 @@ impl AddrSpace {
}
Ok(new_aspace)
}

/// Creates a copy of the current [`AddrSpace`] with copy-on-write (COW)
///
/// For pages that require COW, remove `write` flags.
pub fn copy_with_cow(&mut self) -> AxResult<Self> {
let mut new_aspace = Self::new_empty(self.base(), self.size())?;
let new_pt = &mut new_aspace.pt;
let old_pt = &mut self.pt;

for area in self.areas.iter() {
let mut backend = area.backend().clone();
// TODO: Shared mem area
match &mut backend {
// Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map`
// from mapping page table entries for the virtual addresses.
Backend::Alloc { populate, .. } => {
*populate = false;

let mut flags = area.flags();
flags.remove(MappingFlags::WRITE);

//If the page is mapped in the old page table:
// - Update its permissions in the old page table using `flags`.
// - Map the same physical page into the new page table at the same
// virtual address, with the same page size and `flags`.
// TODO: huge page
for vaddr in PageIter4K::new(area.start(), area.end())
.expect("Failed to create page iterator")
{
if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) {
old_pt
.protect(vaddr, flags)
.map(|(_, tlb)| tlb.flush())
.ok();
new_pt
.map(vaddr, paddr, page_size, flags)
.map(|tlb| tlb.flush())
.ok();
}
}
}
// Linear-backed regions are usually allocated by the kernel and are shared
Backend::Linear { .. } => (),
}

let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend);
new_aspace
.areas
.map(new_area, new_pt, false)
.map_err(mapping_err_to_ax_err)?;
}

Ok(new_aspace)
}

/// Handles a Copy-On-Write (COW) page fault.
///
/// # Arguments
/// - `vaddr`: The virtual address that triggered the fault.
/// - `paddr`: The physical address currently mapped to the faulting virtual address.
///
/// # Returns
/// - `true` if the page fault was handled successfully.
/// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count).
fn handle_page_fault_cow(
&mut self,
vaddr: VirtAddr,
paddr: PhysAddr,
page_size: PageSize,
) -> bool {
let area = self.areas.find(vaddr).unwrap();
match area.backend() {
Backend::Alloc { tracker, .. } => {
let old_frame = match tracker.find(paddr) {
Some(frame) => frame,
None => return false,
};

match Arc::strong_count(&old_frame) {
..=1 => false,
// There is only one AddrSpace reference to the page,
// so there is no need to copy it.
2 => self
.pt
.protect(vaddr, area.flags())
.map(|(_, tlb)| tlb.flush())
.is_ok(),

// Allocates the new page and copies the contents of the original page,
// remapping the virtual address to the physical address of the new page.
// NOTE: Reduce the page's reference count
3.. => match alloc_frame(false, page_size.into()) {
Some(new_frame) => {
new_frame.copy_from(old_frame.clone());
tracker.remove(old_frame.start_paddr());
tracker.insert(new_frame.clone());
self.pt
.remap(vaddr, new_frame.start_paddr(), area.flags())
.map(|(_, tlb)| {
tlb.flush();
})
.is_ok()
}
None => false,
},
}
}
Backend::Linear { .. } => false,
}
}
}

impl fmt::Debug for AddrSpace {
Expand All @@ -447,6 +574,7 @@ impl fmt::Debug for AddrSpace {

impl Drop for AddrSpace {
fn drop(&mut self) {
debug!("AddrSpace drop ..... ");
self.clear();
}
}
134 changes: 114 additions & 20 deletions modules/axmm/src/backend/alloc.rs
Original file line number Diff line number Diff line change
@@ -1,28 +1,115 @@
use axalloc::global_allocator;
use axhal::mem::{phys_to_virt, virt_to_phys};
use axhal::paging::{MappingFlags, PageSize, PageTable};
use alloc::{sync::Arc, vec::Vec};
use axalloc::GlobalPage;
use axhal::{
mem::virt_to_phys,
paging::{MappingFlags, PageSize, PageTable},
};
use kspin::SpinNoIrq;
use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr};

use super::Backend;

fn alloc_frame(zeroed: bool) -> Option<PhysAddr> {
let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?);
if zeroed {
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) };
pub struct FrameTracker {
inner: SpinNoIrq<Vec<Arc<Frame>>>,
}

impl FrameTracker {
fn new() -> Self {
Self {
inner: SpinNoIrq::new(Vec::new()),
}
}

pub fn for_each<F>(&self, f: F)
where
F: FnMut(&Arc<Frame>),
{
self.inner.lock().iter().for_each(f);
}

pub fn find(&self, paddr: PhysAddr) -> Option<Arc<Frame>> {
self.inner
.lock()
.iter()
.find(|frame| frame.contains(paddr))
.map(|frame| frame.clone())
}

pub fn insert(&self, frame: Arc<Frame>) {
self.inner.lock().push(frame);
}

pub fn remove(&self, paddr: PhysAddr) {
let mut vec = self.inner.lock();
let index = vec
.iter()
.position(|frame| frame.contains(paddr))
.expect("Tried to remove a frame that was not present");
vec.remove(index);
}
}

pub struct Frame {
inner: SpinNoIrq<GlobalPage>,
}

impl Frame {
fn new(page: GlobalPage) -> Self {
Self {
inner: SpinNoIrq::new(page),
}
}

pub fn copy_from(&self, other: Arc<Frame>) {
self.inner
.lock()
.as_slice_mut()
.copy_from_slice(other.inner.lock().as_slice());
}

pub fn contains(&self, paddr: PhysAddr) -> bool {
let start = self.start_paddr();
let size = self.inner.lock().size();
// left-closed, right-open interval
start <= paddr && paddr < start + size
}

pub fn start_paddr(&self) -> PhysAddr {
self.inner.lock().start_paddr(virt_to_phys)
}
let paddr = virt_to_phys(vaddr);
Some(paddr)
}

fn dealloc_frame(frame: PhysAddr) {
let vaddr = phys_to_virt(frame);
global_allocator().dealloc_pages(vaddr.as_usize(), 1);
/// Allocates a physical memory frame and optionally zeroes it.
///
/// # Parameters
///
/// - `zeroed`: A boolean indicating whether the allocated frame should be zero-initialized.
///
/// # Returns
///
/// Returns an `Option<Arc<Frame>>`:
/// - `Some(Arc<Frame>)`: Allocation succeeded; the frame is wrapped in a reference-counted pointer.
/// - `None`: Allocation failed (e.g., out of memory).
pub fn alloc_frame(zeroed: bool, page_size: usize) -> Option<Arc<Frame>> {
let page_num = page_size / PAGE_SIZE_4K;
GlobalPage::alloc_contiguous(page_num, page_size)
.ok()
.map(|mut page| {
if zeroed {
page.zero();
}

Arc::new(Frame::new(page))
})
}

impl Backend {
/// Creates a new allocation mapping backend.
pub const fn new_alloc(populate: bool) -> Self {
Self::Alloc { populate }
pub fn new_alloc(populate: bool) -> Self {
Self::Alloc {
populate,
tracker: Arc::new(FrameTracker::new()),
}
}

pub(crate) fn map_alloc(
Expand All @@ -31,6 +118,7 @@ impl Backend {
flags: MappingFlags,
pt: &mut PageTable,
populate: bool,
trakcer: Arc<FrameTracker>,
) -> bool {
debug!(
"map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
Expand All @@ -42,8 +130,9 @@ impl Backend {
if populate {
// allocate all possible physical frames for populated mapping.
for addr in PageIter4K::new(start, start + size).unwrap() {
if let Some(frame) = alloc_frame(true) {
if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) {
if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) {
if let Ok(tlb) = pt.map(addr, page.start_paddr(), PageSize::Size4K, flags) {
trakcer.insert(page);
tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings.
} else {
return false;
Expand All @@ -61,6 +150,7 @@ impl Backend {
size: usize,
pt: &mut PageTable,
_populate: bool,
tracker: Arc<FrameTracker>,
) -> bool {
debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
for addr in PageIter4K::new(start, start + size).unwrap() {
Expand All @@ -71,7 +161,7 @@ impl Backend {
return false;
}
tlb.flush();
dealloc_frame(frame);
tracker.remove(frame);
} else {
// Deallocation is needn't if the page is not mapped.
}
Expand All @@ -84,15 +174,19 @@ impl Backend {
orig_flags: MappingFlags,
pt: &mut PageTable,
populate: bool,
tracker: Arc<FrameTracker>,
) -> bool {
if populate {
false // Populated mappings should not trigger page faults.
} else if let Some(frame) = alloc_frame(true) {
} else if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) {
// Allocate a physical frame lazily and map it to the fault address.
// `vaddr` does not need to be aligned. It will be automatically
// aligned during `pt.map` regardless of the page size.
pt.map(vaddr, frame, PageSize::Size4K, orig_flags)
.map(|tlb| tlb.flush())
pt.map(vaddr, page.start_paddr(), PageSize::Size4K, orig_flags)
.map(|tlb| {
tracker.insert(page);
tlb.flush()
})
.is_ok()
} else {
false
Expand Down
Loading
Loading