diff --git a/src/sys/allocator.rs b/src/sys/allocator.rs deleted file mode 100644 index 6e6b9cd96..000000000 --- a/src/sys/allocator.rs +++ /dev/null @@ -1,223 +0,0 @@ -use crate::sys; - -use alloc::slice::SliceIndex; -use alloc::sync::Arc; -use alloc::vec; -use alloc::vec::Vec; -use core::cmp; -use core::ops::{Index, IndexMut}; -use linked_list_allocator::LockedHeap; -use spin::Mutex; -use x86_64::structures::paging::{ - mapper::MapToError, page::PageRangeInclusive, - FrameAllocator, Mapper, OffsetPageTable, Page, PageTableFlags, Size4KiB, -}; -use x86_64::VirtAddr; - -#[cfg_attr(not(feature = "userspace"), global_allocator)] -static ALLOCATOR: LockedHeap = LockedHeap::empty(); - -pub const HEAP_START: u64 = 0x4444_4444_0000; - -fn max_memory() -> u64 { - // Default to 32 MB - option_env!("MOROS_MEMORY").unwrap_or("32").parse::().unwrap() << 20 -} - -pub fn init_heap() -> Result<(), MapToError> { - let mapper = sys::mem::mapper(); - let mut frame_allocator = sys::mem::frame_allocator(); - - // Use half of the memory for the heap caped to 16 MB by default - // because the allocator is slow. - let heap_size = cmp::min(sys::mem::memory_size(), max_memory()) / 2; - let heap_start = VirtAddr::new(HEAP_START); - sys::process::init_process_addr(HEAP_START + heap_size); - - let pages = { - let heap_end = heap_start + heap_size - 1u64; - let heap_start_page = Page::containing_address(heap_start); - let heap_end_page = Page::containing_address(heap_end); - Page::range_inclusive(heap_start_page, heap_end_page) - }; - - let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; - - for page in pages { - let err = MapToError::FrameAllocationFailed; - let frame = frame_allocator.allocate_frame().ok_or(err)?; - unsafe { - mapper.map_to(page, frame, flags, &mut frame_allocator)?.flush(); - } - } - - unsafe { - ALLOCATOR.lock().init(heap_start.as_mut_ptr(), heap_size as usize); - } - - Ok(()) -} - -pub fn alloc_pages( - mapper: &mut OffsetPageTable, addr: u64, size: usize -) -> Result<(), ()> { - let size = size.saturating_sub(1) as u64; - let mut frame_allocator = sys::mem::frame_allocator(); - - let pages = { - let start_page = Page::containing_address(VirtAddr::new(addr)); - let end_page = Page::containing_address(VirtAddr::new(addr + size)); - Page::range_inclusive(start_page, end_page) - }; - - let flags = PageTableFlags::PRESENT - | PageTableFlags::WRITABLE - | PageTableFlags::USER_ACCESSIBLE; - - for page in pages { - if let Some(frame) = frame_allocator.allocate_frame() { - let res = unsafe { - mapper.map_to(page, frame, flags, &mut frame_allocator) - }; - if let Ok(mapping) = res { - //debug!("Mapped {:?} to {:?}", page, frame); - mapping.flush(); - } else { - debug!("Could not map {:?} to {:?}", page, frame); - if let Ok(old_frame) = mapper.translate_page(page) { - debug!("Already mapped to {:?}", old_frame); - } - return Err(()); - } - } else { - debug!("Could not allocate frame for {:?}", page); - return Err(()); - } - } - - Ok(()) -} - -// TODO: Replace `free` by `dealloc` -pub fn free_pages(mapper: &mut OffsetPageTable, addr: u64, size: usize) { - let size = size.saturating_sub(1) as u64; - - let pages: PageRangeInclusive = { - let start_page = Page::containing_address(VirtAddr::new(addr)); - let end_page = Page::containing_address(VirtAddr::new(addr + size)); - Page::range_inclusive(start_page, end_page) - }; - - for page in pages { - if let Ok((_, mapping)) = mapper.unmap(page) { - mapping.flush(); - } else { - //debug!("Could not unmap {:?}", page); - } - } -} - -#[derive(Clone)] -pub struct PhysBuf { - buf: Arc>>, -} - -impl PhysBuf { - pub fn new(len: usize) -> Self { - Self::from(vec![0; len]) - } - - // Realloc vec until it uses a chunk of contiguous physical memory - fn from(vec: Vec) -> Self { - let buffer_end = vec.len() - 1; - let memory_end = phys_addr(&vec[buffer_end]) - phys_addr(&vec[0]); - if buffer_end == memory_end as usize { - Self { - buf: Arc::new(Mutex::new(vec)), - } - } else { - Self::from(vec.clone()) // Clone vec and try again - } - } - - pub fn addr(&self) -> u64 { - phys_addr(&self.buf.lock()[0]) - } -} - -pub fn phys_addr(ptr: *const u8) -> u64 { - let virt_addr = VirtAddr::new(ptr as u64); - let phys_addr = sys::mem::virt_to_phys(virt_addr).unwrap(); - phys_addr.as_u64() -} - -impl> Index for PhysBuf { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&**self, index) - } -} - -impl> IndexMut for PhysBuf { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut **self, index) - } -} - -impl core::ops::Deref for PhysBuf { - type Target = [u8]; - - fn deref(&self) -> &[u8] { - let vec = self.buf.lock(); - unsafe { alloc::slice::from_raw_parts(vec.as_ptr(), vec.len()) } - } -} - -impl core::ops::DerefMut for PhysBuf { - fn deref_mut(&mut self) -> &mut [u8] { - let mut vec = self.buf.lock(); - unsafe { - alloc::slice::from_raw_parts_mut(vec.as_mut_ptr(), vec.len()) - } - } -} - -pub fn memory_size() -> usize { - ALLOCATOR.lock().size() -} - -pub fn memory_used() -> usize { - ALLOCATOR.lock().used() -} - -pub fn memory_free() -> usize { - ALLOCATOR.lock().free() -} - -#[test_case] -fn many_boxes() { - use alloc::boxed::Box; - - let heap_value_1 = Box::new(42); - let heap_value_2 = Box::new(1337); - assert_eq!(*heap_value_1, 42); - assert_eq!(*heap_value_2, 1337); - - for i in 0..1000 { - let x = Box::new(i); - assert_eq!(*x, i); - } -} - -#[test_case] -fn large_vec() { - let n = 1000; - let mut vec = Vec::new(); - for i in 0..n { - vec.push(i); - } - assert_eq!(vec.iter().sum::(), (n - 1) * n / 2); -} diff --git a/src/sys/fs/block_device.rs b/src/sys/fs/block_device.rs index 2a09f6d94..184eb86a1 100644 --- a/src/sys/fs/block_device.rs +++ b/src/sys/fs/block_device.rs @@ -86,7 +86,7 @@ impl BlockDeviceIO for MemBlockDevice { } pub fn mount_mem() { - let mem = sys::allocator::memory_size() / 2; // Half the allocatable memory + let mem = sys::mem::memory_free() / 2; let len = mem / super::BLOCK_SIZE; // TODO: take a size argument let dev = MemBlockDevice::new(len); *BLOCK_DEVICE.lock() = Some(BlockDevice::Mem(dev)); diff --git a/src/sys/idt.rs b/src/sys/idt.rs index 58bdda768..2509413e6 100644 --- a/src/sys/idt.rs +++ b/src/sys/idt.rs @@ -138,7 +138,7 @@ extern "x86-interrupt" fn page_fault_handler( }; if error_code.contains(PageFaultErrorCode::CAUSED_BY_WRITE) { - if sys::allocator::alloc_pages(&mut mapper, addr, 1).is_err() { + if sys::mem::alloc_pages(&mut mapper, addr, 1).is_err() { printk!( "{}Error:{} Could not allocate page at {:#X}\n", csi_color, csi_reset, addr @@ -154,7 +154,7 @@ extern "x86-interrupt" fn page_fault_handler( // longer a simple clone of the kernel page table. Currently a process // is executed from its kernel address that is shared with the process. let start = (addr / 4096) * 4096; - if sys::allocator::alloc_pages(&mut mapper, start, 4096).is_ok() { + if sys::mem::alloc_pages(&mut mapper, start, 4096).is_ok() { if sys::process::is_userspace(start) { let code_addr = sys::process::code_addr(); let src = (code_addr + start) as *mut u8; diff --git a/src/sys/mem.rs b/src/sys/mem.rs deleted file mode 100644 index db980f3f5..000000000 --- a/src/sys/mem.rs +++ /dev/null @@ -1,120 +0,0 @@ -use crate::sys; -use bootloader::bootinfo::{BootInfo, MemoryMap, MemoryRegionType}; -use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; -use x86_64::instructions::interrupts; -use x86_64::registers::control::Cr3; -use x86_64::structures::paging::{ - FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB, Translate, -}; -use x86_64::{PhysAddr, VirtAddr}; - -pub static mut PHYS_MEM_OFFSET: Option = None; -pub static mut MEMORY_MAP: Option<&MemoryMap> = None; -pub static mut MAPPER: Option> = None; - -pub static MEMORY_SIZE: AtomicU64 = AtomicU64::new(0); - -static ALLOCATED_FRAMES: AtomicUsize = AtomicUsize::new(0); - -pub fn init(boot_info: &'static BootInfo) { - interrupts::without_interrupts(|| { - let mut memory_size = 0; - for region in boot_info.memory_map.iter() { - let start_addr = region.range.start_addr(); - let end_addr = region.range.end_addr(); - memory_size += end_addr - start_addr; - log!( - "MEM [{:#016X}-{:#016X}] {:?}", - start_addr, end_addr - 1, region.region_type - ); - } - log!("MEM {} KB", memory_size >> 10); - MEMORY_SIZE.store(memory_size, Ordering::Relaxed); - - let phys_mem_offset = boot_info.physical_memory_offset; - - unsafe { PHYS_MEM_OFFSET.replace(phys_mem_offset) }; - unsafe { MEMORY_MAP.replace(&boot_info.memory_map) }; - unsafe { - MAPPER.replace(OffsetPageTable::new( - active_page_table(), - VirtAddr::new(phys_mem_offset), - )) - }; - - sys::allocator::init_heap().expect("heap initialization failed"); - }); -} - -pub fn mapper() -> &'static mut OffsetPageTable<'static> { - unsafe { sys::mem::MAPPER.as_mut().unwrap() } -} - -pub fn memory_size() -> u64 { - MEMORY_SIZE.load(Ordering::Relaxed) -} - -pub fn phys_to_virt(addr: PhysAddr) -> VirtAddr { - let phys_mem_offset = unsafe { - PHYS_MEM_OFFSET.unwrap() - }; - VirtAddr::new(addr.as_u64() + phys_mem_offset) -} - -pub fn virt_to_phys(addr: VirtAddr) -> Option { - mapper().translate_addr(addr) -} - -pub unsafe fn active_page_table() -> &'static mut PageTable { - let (frame, _) = Cr3::read(); - let phys_addr = frame.start_address(); - let virt_addr = phys_to_virt(phys_addr); - let page_table_ptr: *mut PageTable = virt_addr.as_mut_ptr(); - &mut *page_table_ptr // unsafe -} - -pub unsafe fn create_page_table(frame: PhysFrame) -> &'static mut PageTable { - let phys_addr = frame.start_address(); - let virt_addr = phys_to_virt(phys_addr); - let page_table_ptr: *mut PageTable = virt_addr.as_mut_ptr(); - &mut *page_table_ptr // unsafe -} - -pub struct BootInfoFrameAllocator { - memory_map: &'static MemoryMap, -} - -impl BootInfoFrameAllocator { - pub unsafe fn init(memory_map: &'static MemoryMap) -> Self { - BootInfoFrameAllocator { memory_map } - } - - fn usable_frames(&self) -> impl Iterator { - let regions = self.memory_map.iter(); - let usable_regions = regions.filter(|r| - r.region_type == MemoryRegionType::Usable - ); - let addr_ranges = usable_regions.map(|r| - r.range.start_addr()..r.range.end_addr() - ); - let frame_addresses = addr_ranges.flat_map(|r| - r.step_by(4096) - ); - frame_addresses.map(|addr| - PhysFrame::containing_address(PhysAddr::new(addr)) - ) - } -} - -unsafe impl FrameAllocator for BootInfoFrameAllocator { - fn allocate_frame(&mut self) -> Option { - let next = ALLOCATED_FRAMES.fetch_add(1, Ordering::SeqCst); - // FIXME: When the heap is larger than a few megabytes, - // creating an iterator for each allocation become very slow. - self.usable_frames().nth(next) - } -} - -pub fn frame_allocator() -> BootInfoFrameAllocator { - unsafe { BootInfoFrameAllocator::init(MEMORY_MAP.unwrap()) } -} diff --git a/src/sys/mem/heap.rs b/src/sys/mem/heap.rs new file mode 100644 index 000000000..1d4931dbe --- /dev/null +++ b/src/sys/mem/heap.rs @@ -0,0 +1,91 @@ +use crate::sys; + +use core::cmp; +use linked_list_allocator::LockedHeap; +use x86_64::structures::paging::{ + mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB +}; +use x86_64::VirtAddr; + +#[cfg_attr(not(feature = "userspace"), global_allocator)] +static ALLOCATOR: LockedHeap = LockedHeap::empty(); + +pub const HEAP_START: u64 = 0x4444_4444_0000; + +pub fn init_heap() -> Result<(), MapToError> { + let mapper = super::mapper(); + let mut frame_allocator = super::frame_allocator(); + + // Use half of the memory for the heap caped to 16 MB by default + // because the allocator is slow. + let heap_size = (cmp::min(super::memory_size(), heap_max()) / 2) as u64; + let heap_start = VirtAddr::new(HEAP_START); + sys::process::init_process_addr(HEAP_START + heap_size); + + let pages = { + let heap_end = heap_start + heap_size - 1u64; + let heap_start_page = Page::containing_address(heap_start); + let heap_end_page = Page::containing_address(heap_end); + Page::range_inclusive(heap_start_page, heap_end_page) + }; + + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + + for page in pages { + let err = MapToError::FrameAllocationFailed; + let frame = frame_allocator.allocate_frame().ok_or(err)?; + unsafe { + mapper.map_to(page, frame, flags, &mut frame_allocator)?.flush(); + } + } + + unsafe { + ALLOCATOR.lock().init(heap_start.as_mut_ptr(), heap_size as usize); + } + + Ok(()) +} + +fn heap_max() -> usize { + // Default to 32 MB + option_env!("MOROS_MEMORY").unwrap_or("32").parse::().unwrap() << 20 +} + +pub fn heap_size() -> usize { + ALLOCATOR.lock().size() +} + +pub fn heap_used() -> usize { + ALLOCATOR.lock().used() +} + +pub fn heap_free() -> usize { + ALLOCATOR.lock().free() +} + +#[test_case] +fn many_boxes() { + use alloc::boxed::Box; + + let heap_value_1 = Box::new(42); + let heap_value_2 = Box::new(1337); + assert_eq!(*heap_value_1, 42); + assert_eq!(*heap_value_2, 1337); + + for i in 0..1000 { + let x = Box::new(i); + assert_eq!(*x, i); + } +} + +#[test_case] +fn large_vec() { + use alloc::vec::Vec; + + let n = 1000; + let mut vec = Vec::new(); + for i in 0..n { + vec.push(i); + } + assert_eq!(vec.iter().sum::(), (n - 1) * n / 2); +} diff --git a/src/sys/mem/mod.rs b/src/sys/mem/mod.rs new file mode 100644 index 000000000..45c16e7bf --- /dev/null +++ b/src/sys/mem/mod.rs @@ -0,0 +1,137 @@ +mod heap; +mod paging; +mod phys; + +pub use paging::{alloc_pages, free_pages, active_page_table, create_page_table}; +pub use phys::{phys_addr, PhysBuf}; + +use crate::sys; +use bootloader::bootinfo::{BootInfo, MemoryMap, MemoryRegionType}; +use core::sync::atomic::{AtomicUsize, Ordering}; +use x86_64::structures::paging::{ + FrameAllocator, OffsetPageTable, PhysFrame, Size4KiB, Translate, +}; +use x86_64::{PhysAddr, VirtAddr}; + +pub static mut PHYS_MEM_OFFSET: Option = None; +static mut MEMORY_MAP: Option<&MemoryMap> = None; +static mut MAPPER: Option> = None; +static MEMORY_SIZE: AtomicUsize = AtomicUsize::new(0); +static ALLOCATED_FRAMES: AtomicUsize = AtomicUsize::new(0); + +pub fn init(boot_info: &'static BootInfo) { + // Keep the timer interrupt to have accurate boot time measurement but mask + // the keyboard interrupt that would create a panic if a key is pressed + // during memory allocation otherwise. + sys::idt::set_irq_mask(1); + + let mut memory_size = 0; + let mut last_end_addr = 0; + for region in boot_info.memory_map.iter() { + let start_addr = region.range.start_addr(); + let end_addr = region.range.end_addr(); + let size = end_addr - start_addr; + let hole = start_addr - last_end_addr; + if hole > 0 { + log!( + "MEM [{:#016X}-{:#016X}] {}", // "({} KB)" + last_end_addr, start_addr - 1, "Unmapped" //, hole >> 10 + ); + } + log!( + "MEM [{:#016X}-{:#016X}] {:?}", // "({} KB)" + start_addr, end_addr - 1, region.region_type //, size >> 10 + ); + memory_size += size; + last_end_addr = end_addr; + } + + // 0x000000000A0000-0x000000000EFFFF: + 320 KB of BIOS memory + // 0x000000FEFFC000-0x000000FEFFFFFF: - 256 KB of virtual memory + // 0x000000FFFC0000-0x000000FFFFFFFF: - 16 KB of virtual memory + memory_size += (320 - 256 - 16) << 10; + + log!("RAM {} MB", memory_size >> 20); + MEMORY_SIZE.store(memory_size as usize, Ordering::Relaxed); + + let phys_mem_offset = boot_info.physical_memory_offset; + + unsafe { PHYS_MEM_OFFSET.replace(phys_mem_offset) }; + unsafe { MEMORY_MAP.replace(&boot_info.memory_map) }; + unsafe { + MAPPER.replace(OffsetPageTable::new( + paging::active_page_table(), + VirtAddr::new(phys_mem_offset), + )) + }; + + heap::init_heap().expect("heap initialization failed"); + + sys::idt::clear_irq_mask(1); +} + +pub fn mapper() -> &'static mut OffsetPageTable<'static> { + unsafe { MAPPER.as_mut().unwrap() } +} + +pub fn memory_size() -> usize { + MEMORY_SIZE.load(Ordering::Relaxed) +} + +pub fn memory_used() -> usize { + (memory_size() - heap::heap_size()) + heap::heap_used() +} + +pub fn memory_free() -> usize { + heap::heap_free() +} + +pub fn phys_to_virt(addr: PhysAddr) -> VirtAddr { + let phys_mem_offset = unsafe { + PHYS_MEM_OFFSET.unwrap() + }; + VirtAddr::new(addr.as_u64() + phys_mem_offset) +} + +pub fn virt_to_phys(addr: VirtAddr) -> Option { + mapper().translate_addr(addr) +} + +pub struct BootInfoFrameAllocator { + memory_map: &'static MemoryMap, +} + +impl BootInfoFrameAllocator { + pub unsafe fn init(memory_map: &'static MemoryMap) -> Self { + BootInfoFrameAllocator { memory_map } + } + + fn usable_frames(&self) -> impl Iterator { + let regions = self.memory_map.iter(); + let usable_regions = regions.filter(|r| + r.region_type == MemoryRegionType::Usable + ); + let addr_ranges = usable_regions.map(|r| + r.range.start_addr()..r.range.end_addr() + ); + let frame_addresses = addr_ranges.flat_map(|r| + r.step_by(4096) + ); + frame_addresses.map(|addr| + PhysFrame::containing_address(PhysAddr::new(addr)) + ) + } +} + +unsafe impl FrameAllocator for BootInfoFrameAllocator { + fn allocate_frame(&mut self) -> Option { + let next = ALLOCATED_FRAMES.fetch_add(1, Ordering::SeqCst); + // FIXME: When the heap is larger than a few megabytes, + // creating an iterator for each allocation become very slow. + self.usable_frames().nth(next) + } +} + +pub fn frame_allocator() -> BootInfoFrameAllocator { + unsafe { BootInfoFrameAllocator::init(MEMORY_MAP.unwrap()) } +} diff --git a/src/sys/mem/paging.rs b/src/sys/mem/paging.rs new file mode 100644 index 000000000..85b4cfec4 --- /dev/null +++ b/src/sys/mem/paging.rs @@ -0,0 +1,81 @@ +use x86_64::registers::control::Cr3; +use x86_64::structures::paging::{ + page::PageRangeInclusive, + OffsetPageTable, PageTable, PhysFrame, Size4KiB, + Page, PageTableFlags, Mapper, FrameAllocator, +}; +use x86_64::VirtAddr; + +pub unsafe fn active_page_table() -> &'static mut PageTable { + let (frame, _) = Cr3::read(); + let phys_addr = frame.start_address(); + let virt_addr = super::phys_to_virt(phys_addr); + let page_table_ptr: *mut PageTable = virt_addr.as_mut_ptr(); + &mut *page_table_ptr // unsafe +} + +pub unsafe fn create_page_table(frame: PhysFrame) -> &'static mut PageTable { + let phys_addr = frame.start_address(); + let virt_addr = super::phys_to_virt(phys_addr); + let page_table_ptr: *mut PageTable = virt_addr.as_mut_ptr(); + &mut *page_table_ptr // unsafe +} + +pub fn alloc_pages( + mapper: &mut OffsetPageTable, addr: u64, size: usize +) -> Result<(), ()> { + let size = size.saturating_sub(1) as u64; + let mut frame_allocator = super::frame_allocator(); + + let pages = { + let start_page = Page::containing_address(VirtAddr::new(addr)); + let end_page = Page::containing_address(VirtAddr::new(addr + size)); + Page::range_inclusive(start_page, end_page) + }; + + let flags = PageTableFlags::PRESENT + | PageTableFlags::WRITABLE + | PageTableFlags::USER_ACCESSIBLE; + + for page in pages { + if let Some(frame) = frame_allocator.allocate_frame() { + let res = unsafe { + mapper.map_to(page, frame, flags, &mut frame_allocator) + }; + if let Ok(mapping) = res { + //debug!("Mapped {:?} to {:?}", page, frame); + mapping.flush(); + } else { + debug!("Could not map {:?} to {:?}", page, frame); + if let Ok(old_frame) = mapper.translate_page(page) { + debug!("Already mapped to {:?}", old_frame); + } + return Err(()); + } + } else { + debug!("Could not allocate frame for {:?}", page); + return Err(()); + } + } + + Ok(()) +} + +// TODO: Replace `free` by `dealloc` +pub fn free_pages(mapper: &mut OffsetPageTable, addr: u64, size: usize) { + let size = size.saturating_sub(1) as u64; + + let pages: PageRangeInclusive = { + let start_page = Page::containing_address(VirtAddr::new(addr)); + let end_page = Page::containing_address(VirtAddr::new(addr + size)); + Page::range_inclusive(start_page, end_page) + }; + + for page in pages { + if let Ok((_, mapping)) = mapper.unmap(page) { + mapping.flush(); + } else { + //debug!("Could not unmap {:?}", page); + } + } +} diff --git a/src/sys/mem/phys.rs b/src/sys/mem/phys.rs new file mode 100644 index 000000000..c3f8b596c --- /dev/null +++ b/src/sys/mem/phys.rs @@ -0,0 +1,75 @@ +use alloc::slice::SliceIndex; +use alloc::sync::Arc; +use alloc::vec; +use alloc::vec::Vec; +use core::ops::{Index, IndexMut}; +use spin::Mutex; +use x86_64::VirtAddr; + +#[derive(Clone)] +pub struct PhysBuf { + buf: Arc>>, +} + +impl PhysBuf { + pub fn new(len: usize) -> Self { + Self::from(vec![0; len]) + } + + // Realloc vec until it uses a chunk of contiguous physical memory + fn from(vec: Vec) -> Self { + let buffer_end = vec.len() - 1; + let memory_end = phys_addr(&vec[buffer_end]) - phys_addr(&vec[0]); + if buffer_end == memory_end as usize { + Self { + buf: Arc::new(Mutex::new(vec)), + } + } else { + Self::from(vec.clone()) // Clone vec and try again + } + } + + pub fn addr(&self) -> u64 { + phys_addr(&self.buf.lock()[0]) + } +} + +impl> Index for PhysBuf { + type Output = I::Output; + + #[inline] + fn index(&self, index: I) -> &Self::Output { + Index::index(&**self, index) + } +} + +impl> IndexMut for PhysBuf { + #[inline] + fn index_mut(&mut self, index: I) -> &mut Self::Output { + IndexMut::index_mut(&mut **self, index) + } +} + +impl core::ops::Deref for PhysBuf { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + let vec = self.buf.lock(); + unsafe { alloc::slice::from_raw_parts(vec.as_ptr(), vec.len()) } + } +} + +impl core::ops::DerefMut for PhysBuf { + fn deref_mut(&mut self) -> &mut [u8] { + let mut vec = self.buf.lock(); + unsafe { + alloc::slice::from_raw_parts_mut(vec.as_mut_ptr(), vec.len()) + } + } +} + +pub fn phys_addr(ptr: *const u8) -> u64 { + let virt_addr = VirtAddr::new(ptr as u64); + let phys_addr = super::virt_to_phys(virt_addr).unwrap(); + phys_addr.as_u64() +} diff --git a/src/sys/mod.rs b/src/sys/mod.rs index bdd5acf19..aef358947 100644 --- a/src/sys/mod.rs +++ b/src/sys/mod.rs @@ -38,7 +38,6 @@ macro_rules! log { } pub mod acpi; -pub mod allocator; pub mod ata; pub mod clock; pub mod cmos; diff --git a/src/sys/net/nic/e1000.rs b/src/sys/net/nic/e1000.rs index e70111e55..27ebb0223 100644 --- a/src/sys/net/nic/e1000.rs +++ b/src/sys/net/nic/e1000.rs @@ -1,5 +1,5 @@ use crate::sys; -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{EthernetDeviceIO, Config, Stats}; use spin::Mutex; @@ -209,7 +209,7 @@ impl Device { } let ptr = ptr::addr_of!(rx_descs[0]) as *const u8; - let phys_addr = sys::allocator::phys_addr(ptr); + let phys_addr = sys::mem::phys_addr(ptr); // Ring address and length self.write(REG_RDBAL, phys_addr.get_bits(0..32) as u32); @@ -235,7 +235,7 @@ impl Device { } let ptr = ptr::addr_of!(tx_descs[0]) as *const _; - let phys_addr = sys::allocator::phys_addr(ptr); + let phys_addr = sys::mem::phys_addr(ptr); // Ring address and length self.write(REG_TDBAL, phys_addr.get_bits(0..32) as u32); @@ -352,7 +352,7 @@ impl Device { let rx_descs = self.rx_descs.lock(); for i in 0..RX_BUFFERS_COUNT { let ptr = ptr::addr_of!(rx_descs[i]) as *const u8; - let phy = sys::allocator::phys_addr(ptr); + let phy = sys::mem::phys_addr(ptr); debug!( "NET E1000: [{}] {:?} ({:#X} -> {:#X})", i, rx_descs[i], ptr as u64, phy @@ -363,7 +363,7 @@ impl Device { let tx_descs = self.tx_descs.lock(); for i in 0..TX_BUFFERS_COUNT { let ptr = ptr::addr_of!(tx_descs[i]) as *const u8; - let phy = sys::allocator::phys_addr(ptr); + let phy = sys::mem::phys_addr(ptr); debug!( "NET E1000: [{}] {:?} ({:#X} -> {:#X})", i, tx_descs[i], ptr as u64, phy diff --git a/src/sys/net/nic/pcnet.rs b/src/sys/net/nic/pcnet.rs index 0b82ef973..e971d75f0 100644 --- a/src/sys/net/nic/pcnet.rs +++ b/src/sys/net/nic/pcnet.rs @@ -1,5 +1,5 @@ use crate::sys; -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{Config, EthernetDeviceIO, Stats}; use alloc::sync::Arc; diff --git a/src/sys/net/nic/rtl8139.rs b/src/sys/net/nic/rtl8139.rs index 802d8c23f..c81f2fd79 100644 --- a/src/sys/net/nic/rtl8139.rs +++ b/src/sys/net/nic/rtl8139.rs @@ -1,4 +1,4 @@ -use crate::sys::allocator::PhysBuf; +use crate::sys::mem::PhysBuf; use crate::sys::net::{Config, EthernetDeviceIO, Stats}; use alloc::sync::Arc; diff --git a/src/sys/process.rs b/src/sys/process.rs index fc8042317..a55468781 100644 --- a/src/sys/process.rs +++ b/src/sys/process.rs @@ -431,7 +431,7 @@ impl Process { // Copy args to user memory let args_addr = self.code_addr + (self.stack_addr - self.code_addr) / 2; - sys::allocator::alloc_pages(&mut mapper, args_addr, 1). + sys::mem::alloc_pages(&mut mapper, args_addr, 1). expect("proc args alloc"); let args: &[&str] = unsafe { let ptr = ptr_from_addr(args_ptr as u64) as usize; @@ -508,13 +508,13 @@ impl Process { let mut mapper = self.mapper(); let size = MAX_PROC_SIZE; - sys::allocator::free_pages(&mut mapper, self.code_addr, size); + sys::mem::free_pages(&mut mapper, self.code_addr, size); let addr = USER_ADDR; match mapper.translate(VirtAddr::new(addr)) { TranslateResult::Mapped { frame: _, offset: _, flags } => { if flags.contains(PageTableFlags::USER_ACCESSIBLE) { - sys::allocator::free_pages(&mut mapper, addr, size); + sys::mem::free_pages(&mut mapper, addr, size); } } _ => {} @@ -526,7 +526,7 @@ fn load_binary( mapper: &mut OffsetPageTable, addr: u64, size: usize, buf: &[u8] ) -> Result<(), ()> { debug_assert!(size >= buf.len()); - sys::allocator::alloc_pages(mapper, addr, size)?; + sys::mem::alloc_pages(mapper, addr, size)?; let src = buf.as_ptr(); let dst = addr as *mut u8; unsafe { diff --git a/src/usr/memory.rs b/src/usr/memory.rs index 6d3b81c6f..4c37d2b18 100644 --- a/src/usr/memory.rs +++ b/src/usr/memory.rs @@ -72,8 +72,8 @@ fn usage(args: &[&str]) -> Result<(), ExitCode> { } } } - let size = sys::allocator::memory_size(); - let used = sys::allocator::memory_used(); + let size = sys::mem::memory_size(); + let used = sys::mem::memory_used(); let free = size - used; let width = [size, used, free].iter().fold(0, |acc, num| core::cmp::max(acc, unit.format(*num).len())