From 9dc100102a7f5c58c14b0be4d0a8a8e0bc09d877 Mon Sep 17 00:00:00 2001 From: Vincent Ollivier Date: Sun, 20 Oct 2024 11:20:34 +0200 Subject: [PATCH] Refactor module --- src/sys/fs/block_device.rs | 2 +- src/sys/idt.rs | 4 +- src/sys/mem/heap.rs | 90 ++++---------------------------------- src/sys/mem/mod.rs | 24 +++++++--- src/sys/mem/phys.rs | 75 +++++++++++++++++++++++++++++++ src/sys/mod.rs | 1 - src/sys/net/nic/e1000.rs | 10 ++--- src/sys/net/nic/pcnet.rs | 2 +- src/sys/net/nic/rtl8139.rs | 2 +- src/sys/process.rs | 8 ++-- src/usr/memory.rs | 4 +- 11 files changed, 118 insertions(+), 104 deletions(-) create mode 100644 src/sys/mem/phys.rs diff --git a/src/sys/fs/block_device.rs b/src/sys/fs/block_device.rs index 2a09f6d9..20f1a84c 100644 --- a/src/sys/fs/block_device.rs +++ b/src/sys/fs/block_device.rs @@ -86,7 +86,7 @@ impl BlockDeviceIO for MemBlockDevice { } pub fn mount_mem() { - let mem = sys::allocator::memory_size() / 2; // Half the allocatable memory + let mem = sys::mem::memory_size() / 2; // Half the allocatable memory let len = mem / super::BLOCK_SIZE; // TODO: take a size argument let dev = MemBlockDevice::new(len); *BLOCK_DEVICE.lock() = Some(BlockDevice::Mem(dev)); diff --git a/src/sys/idt.rs b/src/sys/idt.rs index 58bdda76..2509413e 100644 --- a/src/sys/idt.rs +++ b/src/sys/idt.rs @@ -138,7 +138,7 @@ extern "x86-interrupt" fn page_fault_handler( }; if error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE) { - if sys::allocator::alloc_pages(&mut mapper, addr, 1).is_err() { + if sys::mem::alloc_pages(&mut mapper, addr, 1).is_err() { printk!( "{}Error:{} Could not allocate page at {:#X}\n", csi_color, csi_reset, addr @@ -154,7 +154,7 @@ extern "x86-interrupt" fn page_fault_handler( // longer a simple clone of the kernel page table. Currently a process // is executed from its kernel address that is shared with the process. let start = (addr / 4096) * 4096; - if sys::allocator::alloc_pages(&mut mapper, start, 4096).is_ok() { + if sys::mem::alloc_pages(&mut mapper, start, 4096).is_ok() { if sys::process::is_userspace(start) { let code_addr = sys::process::code_addr(); let src = (code_addr + start) as *mut u8; diff --git a/src/sys/mem/heap.rs b/src/sys/mem/heap.rs index 6e6b9cd9..eba0a42c 100644 --- a/src/sys/mem/heap.rs +++ b/src/sys/mem/heap.rs @@ -1,13 +1,7 @@ use crate::sys; -use alloc::slice::SliceIndex; -use alloc::sync::Arc; -use alloc::vec; -use alloc::vec::Vec; use core::cmp; -use core::ops::{Index, IndexMut}; use linked_list_allocator::LockedHeap; -use spin::Mutex; use x86_64::structures::paging::{ mapper::MapToError, page::PageRangeInclusive, FrameAllocator, Mapper, OffsetPageTable, Page, PageTableFlags, Size4KiB, @@ -19,18 +13,18 @@ static ALLOCATOR: LockedHeap = LockedHeap::empty(); pub const HEAP_START: u64 = 0x4444_4444_0000; -fn max_memory() -> u64 { +fn max_memory() -> usize { // Default to 32 MB - option_env!("MOROS_MEMORY").unwrap_or("32").parse::().unwrap() << 20 + option_env!("MOROS_MEMORY").unwrap_or("32").parse::().unwrap() << 20 } pub fn init_heap() -> Result<(), MapToError> { - let mapper = sys::mem::mapper(); - let mut frame_allocator = sys::mem::frame_allocator(); + let mapper = super::mapper(); + let mut frame_allocator = super::frame_allocator(); // Use half of the memory for the heap caped to 16 MB by default // because the allocator is slow. - let heap_size = cmp::min(sys::mem::memory_size(), max_memory()) / 2; + let heap_size = (cmp::min(super::memory_size(), max_memory()) / 2) as u64; let heap_start = VirtAddr::new(HEAP_START); sys::process::init_process_addr(HEAP_START + heap_size); @@ -117,83 +111,15 @@ pub fn free_pages(mapper: &mut OffsetPageTable, addr: u64, size: usize) { } } -#[derive(Clone)] -pub struct PhysBuf { - buf: Arc>>, -} - -impl PhysBuf { - pub fn new(len: usize) -> Self { - Self::from(vec![0; len]) - } - - // Realloc vec until it uses a chunk of contiguous physical memory - fn from(vec: Vec) -> Self { - let buffer_end = vec.len() - 1; - let memory_end = phys_addr(&vec[buffer_end]) - phys_addr(&vec[0]); - if buffer_end == memory_end as usize { - Self { - buf: Arc::new(Mutex::new(vec)), - } - } else { - Self::from(vec.clone()) // Clone vec and try again - } - } - - pub fn addr(&self) -> u64 { - phys_addr(&self.buf.lock()[0]) - } -} - -pub fn phys_addr(ptr: *const u8) -> u64 { - let virt_addr = VirtAddr::new(ptr as u64); - let phys_addr = sys::mem::virt_to_phys(virt_addr).unwrap(); - phys_addr.as_u64() -} - -impl> Index for PhysBuf { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&**self, index) - } -} - -impl> IndexMut for PhysBuf { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut **self, index) - } -} - -impl core::ops::Deref for PhysBuf { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - let vec = self.buf.lock(); - unsafe { alloc::slice::from_raw_parts(vec.as_ptr(), vec.len()) } - } -} - -impl core::ops::DerefMut for PhysBuf { - fn deref_mut(&mut self) -> &mut [u8] { - let mut vec = self.buf.lock(); - unsafe { - alloc::slice::from_raw_parts_mut(vec.as_mut_ptr(), vec.len()) - } - } -} - -pub fn memory_size() -> usize { +pub fn heap_size() -> usize { ALLOCATOR.lock().size() } -pub fn memory_used() -> usize { +pub fn heap_used() -> usize { ALLOCATOR.lock().used() } -pub fn memory_free() -> usize { +pub fn heap_free() -> usize { ALLOCATOR.lock().free() } diff --git a/src/sys/mem/mod.rs b/src/sys/mem/mod.rs index 539558ab..4641604b 100644 --- a/src/sys/mem/mod.rs +++ b/src/sys/mem/mod.rs @@ -1,6 +1,12 @@ +mod heap; +mod phys; + +pub use heap::{alloc_pages, free_pages}; +pub use phys::{phys_addr, PhysBuf}; + use crate::sys; use bootloader::bootinfo::{BootInfo, MemoryMap, MemoryRegionType}; -use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use core::sync::atomic::{AtomicUsize, Ordering}; //use x86_64::instructions::interrupts; use x86_64::registers::control::Cr3; use x86_64::structures::paging::{ @@ -11,7 +17,7 @@ use x86_64::{PhysAddr, VirtAddr}; pub static mut PHYS_MEM_OFFSET: Option = None; static mut MEMORY_MAP: Option<&MemoryMap> = None; static mut MAPPER: Option> = None; -static MEMORY_SIZE: AtomicU64 = AtomicU64::new(0); +static MEMORY_SIZE: AtomicUsize = AtomicUsize::new(0); static ALLOCATED_FRAMES: AtomicUsize = AtomicUsize::new(0); pub fn init(boot_info: &'static BootInfo) { @@ -46,7 +52,7 @@ pub fn init(boot_info: &'static BootInfo) { memory_size += (320 - 256 - 16) << 10; log!("RAM {} MB", memory_size >> 20); - MEMORY_SIZE.store(memory_size, Ordering::Relaxed); + MEMORY_SIZE.store(memory_size as usize, Ordering::Relaxed); let phys_mem_offset = boot_info.physical_memory_offset; @@ -59,7 +65,7 @@ pub fn init(boot_info: &'static BootInfo) { )) }; - sys::allocator::init_heap().expect("heap initialization failed"); + heap::init_heap().expect("heap initialization failed"); //}); sys::idt::clear_irq_mask(1); } @@ -68,10 +74,18 @@ pub fn mapper() -> &'static mut OffsetPageTable<'static> { unsafe { sys::mem::MAPPER.as_mut().unwrap() } } -pub fn memory_size() -> u64 { +pub fn memory_size() -> usize { MEMORY_SIZE.load(Ordering::Relaxed) } +pub fn memory_used() -> usize { + (memory_size() - heap::heap_size()) + heap::heap_used() +} + +pub fn memory_free() -> usize { + (memory_size() - heap::heap_size()) + heap::heap_free() +} + pub fn phys_to_virt(addr: PhysAddr) -> VirtAddr { let phys_mem_offset = unsafe { PHYS_MEM_OFFSET.unwrap() diff --git a/src/sys/mem/phys.rs b/src/sys/mem/phys.rs new file mode 100644 index 00000000..c3f8b596 --- /dev/null +++ b/src/sys/mem/phys.rs @@ -0,0 +1,75 @@ +use alloc::slice::SliceIndex; +use alloc::sync::Arc; +use alloc::vec; +use alloc::vec::Vec; +use core::ops::{Index, IndexMut}; +use spin::Mutex; +use x86_64::VirtAddr; + +#[derive(Clone)] +pub struct PhysBuf { + buf: Arc>>, +} + +impl PhysBuf { + pub fn new(len: usize) -> Self { + Self::from(vec![0; len]) + } + + // Realloc vec until it uses a chunk of contiguous physical memory + fn from(vec: Vec) -> Self { + let buffer_end = vec.len() - 1; + let memory_end = phys_addr(&vec[buffer_end]) - phys_addr(&vec[0]); + if buffer_end == memory_end as usize { + Self { + buf: Arc::new(Mutex::new(vec)), + } + } else { + Self::from(vec.clone()) // Clone vec and try again + } + } + + pub fn addr(&self) -> u64 { + phys_addr(&self.buf.lock()[0]) + } +} + +impl> Index for PhysBuf { + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(&**self, index) + } +} + +impl> IndexMut for PhysBuf { + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(&mut **self, index) + } +} + +impl core::ops::Deref for PhysBuf { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + let vec = self.buf.lock(); + unsafe { alloc::slice::from_raw_parts(vec.as_ptr(), vec.len()) } + } +} + +impl core::ops::DerefMut for PhysBuf { + fn deref_mut(&mut self) -> &mut [u8] { + let mut vec = self.buf.lock(); + unsafe { + alloc::slice::from_raw_parts_mut(vec.as_mut_ptr(), vec.len()) + } + } +} + +pub fn phys_addr(ptr: *const u8) -> u64 { + let virt_addr = VirtAddr::new(ptr as u64); + let phys_addr = super::virt_to_phys(virt_addr).unwrap(); + phys_addr.as_u64() +} diff --git a/src/sys/mod.rs b/src/sys/mod.rs index bdd5acf1..aef35894 100644 --- a/src/sys/mod.rs +++ b/src/sys/mod.rs @@ -38,7 +38,6 @@ macro_rules! log { } pub mod acpi; -pub mod allocator; pub mod ata; pub mod clock; pub mod cmos; diff --git a/src/sys/net/nic/e1000.rs b/src/sys/net/nic/e1000.rs index e70111e5..27ebb022 100644 --- a/src/sys/net/nic/e1000.rs +++ b/src/sys/net/nic/e1000.rs @@ -1,5 +1,5 @@ use crate::sys; -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{EthernetDeviceIO, Config, Stats}; use spin::Mutex; @@ -209,7 +209,7 @@ impl Device { } let ptr = ptr::addr_of!(rx_descs[0]) as *const u8; - let phys_addr = sys::allocator::phys_addr(ptr); + let phys_addr = sys::mem::phys_addr(ptr); // Ring address and length self.write(REG_RDBAL, phys_addr.get_bits(0..32) as u32); @@ -235,7 +235,7 @@ impl Device { } let ptr = ptr::addr_of!(tx_descs[0]) as *const _; - let phys_addr = sys::allocator::phys_addr(ptr); + let phys_addr = sys::mem::phys_addr(ptr); // Ring address and length self.write(REG_TDBAL, phys_addr.get_bits(0..32) as u32); @@ -352,7 +352,7 @@ impl Device { let rx_descs = self.rx_descs.lock(); for i in 0..RX_BUFFERS_COUNT { let ptr = ptr::addr_of!(rx_descs[i]) as *const u8; - let phy = sys::allocator::phys_addr(ptr); + let phy = sys::mem::phys_addr(ptr); debug!( "NET E1000: [{}] {:?} ({:#X} -> {:#X})", i, rx_descs[i], ptr as u64, phy @@ -363,7 +363,7 @@ impl Device { let tx_descs = self.tx_descs.lock(); for i in 0..TX_BUFFERS_COUNT { let ptr = ptr::addr_of!(tx_descs[i]) as *const u8; - let phy = sys::allocator::phys_addr(ptr); + let phy = sys::mem::phys_addr(ptr); debug!( "NET E1000: [{}] {:?} ({:#X} -> {:#X})", i, tx_descs[i], ptr as u64, phy diff --git a/src/sys/net/nic/pcnet.rs b/src/sys/net/nic/pcnet.rs index 0b82ef97..e971d75f 100644 --- a/src/sys/net/nic/pcnet.rs +++ b/src/sys/net/nic/pcnet.rs @@ -1,5 +1,5 @@ use crate::sys; -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{Config, EthernetDeviceIO, Stats}; use alloc::sync::Arc; diff --git a/src/sys/net/nic/rtl8139.rs b/src/sys/net/nic/rtl8139.rs index 802d8c23..c81f2fd7 100644 --- a/src/sys/net/nic/rtl8139.rs +++ b/src/sys/net/nic/rtl8139.rs @@ -1,4 +1,4 @@ -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{Config, EthernetDeviceIO, Stats}; use alloc::sync::Arc; diff --git a/src/sys/process.rs b/src/sys/process.rs index fc804231..a5546878 100644 --- a/src/sys/process.rs +++ b/src/sys/process.rs @@ -431,7 +431,7 @@ impl Process { // Copy args to user memory let args_addr = self.code_addr + (self.stack_addr - self.code_addr) / 2; - sys::allocator::alloc_pages(&mut mapper, args_addr, 1). + sys::mem::alloc_pages(&mut mapper, args_addr, 1). expect("proc args alloc"); let args: &[&str] = unsafe { let ptr = ptr_from_addr(args_ptr as u64) as usize; @@ -508,13 +508,13 @@ impl Process { let mut mapper = self.mapper(); let size = MAX_PROC_SIZE; - sys::allocator::free_pages(&mut mapper, self.code_addr, size); + sys::mem::free_pages(&mut mapper, self.code_addr, size); let addr = USER_ADDR; match mapper.translate(VirtAddr::new(addr)) { TranslateResult::Mapped { frame: _, offset: _, flags } => { if flags.contains(PageTableFlags::USER_ACCESSIBLE) { - sys::allocator::free_pages(&mut mapper, addr, size); + sys::mem::free_pages(&mut mapper, addr, size); } } _ => {} @@ -526,7 +526,7 @@ fn load_binary( mapper: &mut OffsetPageTable, addr: u64, size: usize, buf: &[u8] ) -> Result<(), ()> { debug_assert!(size >= buf.len()); - sys::allocator::alloc_pages(mapper, addr, size)?; + sys::mem::alloc_pages(mapper, addr, size)?; let src = buf.as_ptr(); let dst = addr as *mut u8; unsafe { diff --git a/src/usr/memory.rs b/src/usr/memory.rs index 6d3b81c6..4c37d2b1 100644 --- a/src/usr/memory.rs +++ b/src/usr/memory.rs @@ -72,8 +72,8 @@ fn usage(args: &[&str]) -> Result<(), ExitCode> { } } } - let size = sys::allocator::memory_size(); - let used = sys::allocator::memory_used(); + let size = sys::mem::memory_size(); + let used = sys::mem::memory_used(); let free = size - used; let width = [size, used, free].iter().fold(0, |acc, num| core::cmp::max(acc, unit.format(*num).len())