From e425a82c9218eaccdb0713e05456fdb05f6aa249 Mon Sep 17 00:00:00 2001 From: valopok Date: Sun, 7 Sep 2025 23:54:59 +0200 Subject: [PATCH 1/7] feat(nvme): add nvme driver support --- Cargo.lock | 19 ++- Cargo.toml | 2 + src/drivers/mod.rs | 2 + src/drivers/nvme.rs | 324 +++++++++++++++++++++++++++++++++++++ src/drivers/pci.rs | 66 +++++++- src/syscalls/mod.rs | 2 + src/syscalls/nvme.rs | 368 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 776 insertions(+), 7 deletions(-) create mode 100644 src/drivers/nvme.rs create mode 100644 src/syscalls/nvme.rs diff --git a/Cargo.lock b/Cargo.lock index 146baf6ee9..a33ef63349 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -789,6 +789,12 @@ dependencies = [ "byteorder", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" + [[package]] name = "hashbrown" version = "0.16.0" @@ -858,7 +864,7 @@ dependencies = [ "float-cmp", "free-list", "fuse-abi", - "hashbrown", + "hashbrown 0.16.0", "heapless 0.9.1", "hermit-entry", "hermit-macro", @@ -890,6 +896,7 @@ dependencies = [ "uhyve-interface", "virtio-spec", "volatile 0.6.1", + "vroom", "x86_64", "zerocopy", ] @@ -2160,6 +2167,16 @@ dependencies = [ "syn", ] +[[package]] +name = "vroom" +version = "0.1.0" +source = "git+https://github.com/valopok/vroom?branch=main#b5e9c7f9debccc0b803a489a8177822abd183d9d" +dependencies = [ + "ahash", + "hashbrown 0.15.5", + "log", +] + [[package]] name = "wait-timeout" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index ffbc53bc8d..009431c42e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ mman = [] mmap = ["mman"] # Deprecated in favor of mman newlib = [] nostd = [] +nvme = ["pci", "vroom"] pci = ["virtio?/pci"] rtl8139 = ["net", "pci"] semihosting = ["dep:semihosting"] @@ -143,6 +144,7 @@ talc = { version = "4" } thiserror = { version = "2", default-features = false } time = { version = "0.3", default-features = false } volatile = "0.6" +vroom = { git = "https://github.com/valopok/vroom", branch = "main", default-features = false, optional = true } zerocopy = { version = "0.8", default-features = false } uhyve-interface = "0.1.3" diff --git a/src/drivers/mod.rs b/src/drivers/mod.rs index ab1aeebb7b..d6a58f2121 100644 --- a/src/drivers/mod.rs +++ b/src/drivers/mod.rs @@ -10,6 +10,8 @@ pub mod mmio; pub mod net; #[cfg(feature = "pci")] pub mod pci; +#[cfg(feature = "nvme")] +pub mod nvme; #[cfg(any( all( not(all(target_arch = "riscv64", feature = "gem-net", not(feature = "pci"))), diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs new file mode 100644 index 0000000000..13e7281908 --- /dev/null +++ b/src/drivers/nvme.rs @@ -0,0 +1,324 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::alloc::{Allocator, Layout}; +use core::ptr::NonNull; + +use ahash::RandomState; +use hashbrown::HashMap; +use hermit_sync::{InterruptTicketMutex, Lazy}; +use memory_addresses::VirtAddr; +use pci_types::InterruptLine; +use vroom::{Dma, IoQueuePair, IoQueuePairId, Namespace, NamespaceId, NvmeDevice}; + +use crate::arch::mm::paging::{virtual_to_physical, BasePageSize, PageSize}; +use crate::arch::pci::PciConfigRegion; +use crate::drivers::pci::PciDevice; +use crate::drivers::Driver; +use crate::mm::device_alloc::DeviceAlloc; +use crate::syscalls::nvme::SysNvmeError; + +pub(crate) struct NvmeDriver { + irq: InterruptLine, + device: InterruptTicketMutex>, + // TODO: Replace with a concurrent hashmap. See crate::synch::futex. + io_queue_pairs: + Lazy, RandomState>>>, +} + +impl NvmeDriver { + pub(crate) fn init(pci_device: &PciDevice) -> Result { + let allocator: NvmeAllocator = NvmeAllocator { + device_allocator: DeviceAlloc {}, + allocations: Lazy::new(|| { + InterruptTicketMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0))) + }), + }; + let (virtual_address, size) = pci_device.memory_map_bar(0, true).ok_or(())?; + let nvme_device: NvmeDevice = NvmeDevice::new( + virtual_address.as_mut_ptr(), + size, + BasePageSize::SIZE as usize, + allocator, + ) + .map_err(|_| ())?; + let driver = Self { + irq: pci_device + .get_irq() + .expect("NVMe driver: Could not get irq from device."), + device: InterruptTicketMutex::new(nvme_device), + io_queue_pairs: Lazy::new(|| { + InterruptTicketMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0))) + }), + }; + Ok(driver) + } + + pub(crate) fn namespace_ids(&self) -> Vec { + self.device.lock().namespace_ids() + } + + pub(crate) fn namespace(&self, namespace_id: &NamespaceId) -> Result { + self.device + .lock() + .namespace(namespace_id) + .map_err(|_| SysNvmeError::NamespaceDoesNotExist) + .copied() + } + + pub(crate) fn clear_namespace(&self, namespace_id: &NamespaceId) -> Result<(), SysNvmeError> { + self.device + .lock() + .clear_namespace(namespace_id) + .map_err(|_| SysNvmeError::CouldNotClearNamespace) + } + + pub(crate) fn maximum_transfer_size(&self) -> usize { + self.device + .lock() + .controller_information() + .maximum_transfer_size + } + + pub(crate) fn maximum_number_of_io_queue_pairs(&self) -> u16 { + self.device + .lock() + .controller_information() + .maximum_number_of_io_queue_pairs + } + + pub(crate) fn maximum_queue_entries_supported(&self) -> u32 { + self.device + .lock() + .controller_information() + .maximum_queue_entries_supported + } + + /// Creates an IO queue pair with a given number of entries for a namespace. + pub(crate) fn create_io_queue_pair( + &mut self, + namespace_id: &NamespaceId, + number_of_entries: u32, + ) -> Result { + let mut device = self.device.lock(); + if !device.namespace_ids().contains(&namespace_id) { + return Err(SysNvmeError::NamespaceDoesNotExist); + } + let mut io_queue_pairs = self.io_queue_pairs.lock(); + if io_queue_pairs.len() + >= device + .controller_information() + .maximum_number_of_io_queue_pairs + .into() + { + return Err(SysNvmeError::MaxNumberOfQueuesReached); + } + let io_queue_pair = device + .create_io_queue_pair(namespace_id, number_of_entries) + .map_err(|_| SysNvmeError::CouldNotCreateIoQueuePair)?; + let id = io_queue_pair.id(); + io_queue_pairs.insert(id, io_queue_pair); + Ok(id) + } + + /// Deletes an IO queue pair and frees its resources. + pub(crate) fn delete_io_queue_pair( + &mut self, + io_queue_pair_id: IoQueuePairId, + ) -> Result<(), SysNvmeError> { + let mut device = self.device.lock(); + let io_queue_pair = self + .io_queue_pairs + .lock() + .remove(&io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + device + .delete_io_queue_pair(io_queue_pair) + .map_err(|_error| SysNvmeError::CouldNotDeleteIoQueuePair) + } + + pub(crate) fn allocate_buffer( + &self, + io_queue_pair_id: &IoQueuePairId, + number_of_elements: usize, + ) -> Result, SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .allocate_buffer(number_of_elements) + .map_err(|_error| SysNvmeError::CouldNotAllocateBuffer) + } + + pub(crate) fn deallocate_buffer( + &self, + io_queue_pair_id: &IoQueuePairId, + buffer: Dma, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .deallocate_buffer(buffer) + .map_err(|_error| SysNvmeError::CouldNotDeallocateBuffer) + } + + /// Reads from the IO queue pair with ID `io_queue_pair_id` + /// into the `buffer` starting from the `logical_block_address`. + pub(crate) fn read_from_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .read(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + /// Writes the `buffer` to the IO queue pair with ID `io_queue_pair_id` + /// starting from the `logical_block_address`. + pub(crate) fn write_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .write(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotWriteToIoQueuePair)?; + Ok(()) + } + + /// Submits a read command to the IO queue pair with ID `io_queue_pair_id` + /// that reads into the `buffer` starting from the `logical_block_address`. + pub(crate) fn submit_read_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .submit_read(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + /// Submits a write command to the IO queue pair with ID `io_queue_pair_id` + /// that writes the `buffer` starting from the `logical_block_address`. + pub(crate) fn submit_write_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .submit_write(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + pub(crate) fn complete_io_with_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .complete_io() + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } +} + +pub(crate) struct NvmeAllocator { + pub(crate) device_allocator: DeviceAlloc, + // TODO: Replace with a concurrent hashmap. See crate::synch::futex. + pub(crate) allocations: Lazy>>, +} + +impl vroom::Allocator for NvmeAllocator { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result<*mut [T], Box> { + debug!("NVMe driver: allocate size {:#x}", layout.size()); + let memory = match self.device_allocator.allocate(layout) { + Err(_) => { + return Err("NVMe driver: Could not allocate memory with device allocator.".into()); + } + Ok(memory) => memory, + }; + self.allocations + .lock() + .insert(memory.as_ptr().addr(), layout); + let slice = + unsafe { core::slice::from_raw_parts_mut(memory.as_mut_ptr() as *mut T, memory.len()) }; + Ok(slice as *mut [T]) + } + + fn deallocate(&self, slice: *mut [T]) -> Result<(), Box> { + let address = slice.as_mut_ptr() as usize; + debug!("NVMe driver: deallocate address {:#X}", address); + let layout: Layout = match self.allocations.lock().remove(&address) { + None => { + return Err( + "NVMe driver: The given address did not map to an address and a layout. + This mapping should have occured during allocation." + .into(), + ); + } + Some(layout) => layout, + }; + let virtual_address = unsafe { NonNull::new_unchecked(address as *mut u8) }; + unsafe { self.device_allocator.deallocate(virtual_address, layout) }; + Ok(()) + } + + fn translate_virtual_to_physical( + &self, + virtual_address: *const T, + ) -> Result<*const T, Box> { + let address = virtual_address as usize; + debug!("NVMe driver: translate virtual address {:#x}", address); + let virtual_address: VirtAddr = VirtAddr::new(address as u64); + let physical_address = + match virtual_to_physical(virtual_address) { + None => { + return Err("NVMe driver: The given virtual address could not be mapped to a physical one.".into()); + } + Some(physical_address) => physical_address, + }; + Ok(physical_address.as_usize() as *mut T) + } +} + +impl Driver for NvmeDriver { + fn get_interrupt_number(&self) -> InterruptLine { + self.irq + } + + fn get_name(&self) -> &'static str { + "nvme" + } +} diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index bf01b1b9b9..151f0d85fa 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -6,14 +6,15 @@ use core::fmt; use ahash::RandomState; use hashbrown::HashMap; -#[cfg(any(feature = "fuse", feature = "vsock", feature = "console"))] -use hermit_sync::InterruptTicketMutex; use hermit_sync::without_interrupts; +#[cfg(any(feature = "fuse", feature = "vsock", feature = "console", feature = "nvme"))] +use hermit_sync::InterruptTicketMutex; use memory_addresses::{PhysAddr, VirtAddr}; use pci_types::capability::CapabilityIterator; +use pci_types::device_type::DeviceType; use pci_types::{ Bar, CommandRegister, ConfigRegionAccess, DeviceId, EndpointHeader, InterruptLine, - InterruptPin, MAX_BARS, PciAddress, PciHeader, StatusRegister, VendorId, + InterruptPin, PciAddress, PciHeader, StatusRegister, VendorId, MAX_BARS, }; use crate::arch::pci::PciConfigRegion; @@ -30,6 +31,10 @@ use crate::drivers::net::rtl8139::{self, RTL8139Driver}; feature = "virtio-net", ))] use crate::drivers::net::virtio::VirtioNetDriver; +#[cfg(any(feature = "tcp", feature = "udp"))] +use crate::drivers::net::NetworkDriver; +#[cfg(feature = "nvme")] +use crate::drivers::nvme::NvmeDriver; #[cfg(any( all( feature = "virtio-net", @@ -248,8 +253,7 @@ impl fmt::Display for PciDevice { }; #[cfg(not(feature = "pci-ids"))] - let (class_name, vendor_name, device_name) = - ("Unknown Class", "Unknown Vendor", "Unknown Device"); + let (class_name, vendor_name, device_name) = ("Unknown Class", "Unknown Vendor", "Unknown Device"); // Output detailed readable information about this device. write!( @@ -341,6 +345,8 @@ pub(crate) enum PciDriver { VirtioConsole(InterruptTicketMutex), #[cfg(feature = "vsock")] VirtioVsock(InterruptTicketMutex), + #[cfg(feature = "nvme")] + Nvme(InterruptTicketMutex), } impl PciDriver { @@ -353,6 +359,15 @@ impl PciDriver { } } + #[cfg(feature = "nvme")] + fn get_nvme_driver(&self) -> Option<&InterruptTicketMutex> { + #[allow(unreachable_patterns)] + match self { + Self::Nvme(drv) => Some(drv), + _ => None, + } + } + #[cfg(feature = "vsock")] fn get_vsock_driver(&self) -> Option<&InterruptTicketMutex> { #[allow(unreachable_patterns)] @@ -372,7 +387,6 @@ impl PciDriver { } fn get_interrupt_handler(&self) -> (InterruptLine, fn()) { - #[allow(unreachable_patterns)] match self { #[cfg(feature = "vsock")] Self::VirtioVsock(drv) => { @@ -405,6 +419,12 @@ impl PciDriver { let irq_number = drv.lock().get_interrupt_number(); (irq_number, console_handler) } + #[cfg(feature = "nvme")] + Self::Nvme(drv) => { + let irq_number = drv.lock().get_interrupt_number(); + fn nvme_handler() {} + (irq_number, nvme_handler) + } _ => todo!(), } } @@ -476,6 +496,14 @@ pub(crate) fn get_console_driver() -> Option<&'static InterruptTicketMutex Option<&'static InterruptTicketMutex> { + PCI_DRIVERS + .get()? + .iter() + .find_map(|drv| drv.get_nvme_driver()) +} + #[cfg(feature = "vsock")] pub(crate) fn get_vsock_driver() -> Option<&'static InterruptTicketMutex> { PCI_DRIVERS @@ -540,6 +568,32 @@ pub(crate) fn init() { } } + #[cfg(feature = "nvme")] + for adapter in PCI_DEVICES.finalize().iter().filter(|adapter| { + let (_, class_id, subclass_id, _) = + adapter.header().revision_and_class(adapter.access()); + let device_type = DeviceType::from((class_id, subclass_id)); + device_type == DeviceType::NvmeController + }) { + info!( + "Found NVMe device with device id {:#x}", + adapter.device_id() + ); + + match NvmeDriver::init(adapter) { + Ok(nvme_driver) => { + info!("NVMe driver initialized."); + register_driver(PciDriver::Nvme(InterruptTicketMutex::new(nvme_driver))); + } + Err(()) => { + error!( + "NVMe driver could not be initialized for device: {:#x}", + adapter.device_id() + ); + } + } + } + // Searching for Realtek RTL8139, which is supported by Qemu #[cfg(all(target_arch = "x86_64", feature = "rtl8139"))] for adapter in PCI_DEVICES.finalize().iter().filter(|x| { diff --git a/src/syscalls/mod.rs b/src/syscalls/mod.rs index fc3fa592ef..4ec6b24211 100644 --- a/src/syscalls/mod.rs +++ b/src/syscalls/mod.rs @@ -45,6 +45,8 @@ mod recmutex; mod semaphore; #[cfg(any(feature = "net", feature = "vsock"))] pub mod socket; +#[cfg(feature = "nvme")] +pub(crate) mod nvme; mod spinlock; mod system; #[cfg(feature = "common-os")] diff --git a/src/syscalls/nvme.rs b/src/syscalls/nvme.rs new file mode 100644 index 0000000000..79e07e9e4e --- /dev/null +++ b/src/syscalls/nvme.rs @@ -0,0 +1,368 @@ +use vroom::{IoQueuePairId, Namespace, NamespaceId, Dma}; + +use crate::drivers::pci::get_nvme_driver; + +// TODO: error messages +#[derive(Debug)] +pub(crate) enum SysNvmeError { + ZeroPointerParameter = 1, + DeviceDoesNotExist = 2, + CouldNotIdentifyNamespaces = 3, + NamespaceDoesNotExist = 4, + MaxNumberOfQueuesReached = 5, + CouldNotCreateIoQueuePair = 6, + CouldNotDeleteIoQueuePair = 7, + CouldNotFindIoQueuePair = 8, + BufferIsZero = 9, + BufferTooBig = 10, + BufferIncorrectlySized = 11, + CouldNotAllocateMemory = 12, + CouldNotAllocateBuffer = 13, + CouldNotDeallocateBuffer = 14, + CouldNotReadFromIoQueuePair = 15, + CouldNotWriteToIoQueuePair = 16, + CouldNotClearNamespace = 17, +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_number_of_namespaces(result: *mut u32) -> usize { + fn inner(result: *mut u32) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let number_of_namespaces = driver.lock().namespace_ids().len() as u32; + *result = number_of_namespaces; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_namespace_ids( + vec_pointer: *mut NamespaceId, + length: u32, +) -> usize { + fn inner(vec_pointer: *mut NamespaceId, length: u32) -> Result<(), SysNvmeError> { + if vec_pointer.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let namespace_ids = driver.lock().namespace_ids(); + if namespace_ids.len() != length as usize { + return Err(SysNvmeError::BufferIncorrectlySized); + } + for i in 0..length as usize { + let pointer = unsafe { vec_pointer.add(i) }; + unsafe { *pointer = namespace_ids[i].clone() }; + } + Ok(()) + } + match inner(vec_pointer, length) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_namespace( + namespace_id: &NamespaceId, + result: *mut Namespace, +) -> usize { + fn inner(namespace_id: &NamespaceId, result: *mut Namespace) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let lock = driver.lock(); + let namespace = lock.namespace(namespace_id)?; + *result = namespace; + Ok(()) + } + match inner(namespace_id, result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_clear_namespace( + namespace_id: &NamespaceId, +) -> usize { + fn inner(namespace_id: &NamespaceId) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let lock = driver.lock(); + lock.clear_namespace(namespace_id) + } + match inner(namespace_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_transfer_size(result: *mut usize) -> usize { + fn inner(result: *mut usize) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_transfer_size = driver.lock().maximum_transfer_size(); + *result = maximum_transfer_size; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_number_of_io_queue_pairs(result: *mut u16) -> usize { + fn inner(result: *mut u16) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_number_of_io_queue_pairs = driver.lock().maximum_number_of_io_queue_pairs(); + *result = maximum_number_of_io_queue_pairs; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_queue_entries_supported(result: *mut u32) -> usize { + fn inner(result: *mut u32) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_queue_entries_supported = driver.lock().maximum_queue_entries_supported(); + *result = maximum_queue_entries_supported; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_create_io_queue_pair( + namespace_id: &NamespaceId, + number_of_entries: u32, + resulting_io_queue_pair_id: *mut IoQueuePairId, +) -> usize { + fn inner( + namespace_id: &NamespaceId, + number_of_entries: u32, + resulting_io_queue_pair_id: *mut IoQueuePairId, + ) -> Result<(), SysNvmeError> { + if resulting_io_queue_pair_id.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let resulting_io_queue_pair_id = unsafe { &mut *resulting_io_queue_pair_id }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let io_queue_pair_id = driver + .lock() + .create_io_queue_pair(namespace_id, number_of_entries)?; + *resulting_io_queue_pair_id = io_queue_pair_id; + Ok(()) + } + match inner(namespace_id, number_of_entries, resulting_io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_delete_io_queue_pair(io_queue_pair_id: IoQueuePairId) -> usize { + fn inner(io_queue_pair_id: IoQueuePairId) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver.lock().delete_io_queue_pair(io_queue_pair_id) + } + match inner(io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_allocate_buffer( + io_queue_pair_id: &IoQueuePairId, + size: usize, + resulting_buffer: *mut Dma, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + number_of_elements: usize, + resulting_buffer_pointer: *mut Dma, + ) -> Result<(), SysNvmeError> { + let resulting_buffer_pointer = unsafe { &mut *resulting_buffer_pointer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let buffer = driver + .lock() + .allocate_buffer(io_queue_pair_id, number_of_elements)?; + *resulting_buffer_pointer = buffer; + Ok(()) + } + match inner(io_queue_pair_id, size, resulting_buffer) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_deallocate_buffer( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, +) -> usize { + fn inner(io_queue_pair_id: &IoQueuePairId, buffer: *mut Dma) -> Result<(), SysNvmeError> { + core::mem::forget(buffer); + let buffer: Dma = unsafe { core::ptr::read(buffer) }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver.lock().deallocate_buffer(io_queue_pair_id, buffer) + } + match inner(io_queue_pair_id, buffer) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_read_from_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &mut *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .read_from_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_write_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { & *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .write_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_submit_read_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &mut *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .submit_read_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_submit_write_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { & *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .submit_write_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_complete_io_with_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + ) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .complete_io_with_io_queue_pair(io_queue_pair_id) + } + match inner(io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} From 43616feaa5646e3a231635f82c88ce110f971051 Mon Sep 17 00:00:00 2001 From: valopok Date: Sun, 28 Sep 2025 15:25:38 +0200 Subject: [PATCH 2/7] nvme: fix typo and formatting --- src/drivers/mod.rs | 4 ++-- src/drivers/nvme.rs | 2 +- src/drivers/pci.rs | 11 +++++++---- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/drivers/mod.rs b/src/drivers/mod.rs index d6a58f2121..47b73b05e5 100644 --- a/src/drivers/mod.rs +++ b/src/drivers/mod.rs @@ -8,10 +8,10 @@ pub mod fs; pub mod mmio; #[cfg(feature = "net")] pub mod net; -#[cfg(feature = "pci")] -pub mod pci; #[cfg(feature = "nvme")] pub mod nvme; +#[cfg(feature = "pci")] +pub mod pci; #[cfg(any( all( not(all(target_arch = "riscv64", feature = "gem-net", not(feature = "pci"))), diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs index 13e7281908..30fb80dfe8 100644 --- a/src/drivers/nvme.rs +++ b/src/drivers/nvme.rs @@ -284,7 +284,7 @@ impl vroom::Allocator for NvmeAllocator { None => { return Err( "NVMe driver: The given address did not map to an address and a layout. - This mapping should have occured during allocation." + This mapping should have occurred during allocation." .into(), ); } diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index 151f0d85fa..3a0d35c559 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -7,10 +7,16 @@ use core::fmt; use ahash::RandomState; use hashbrown::HashMap; use hermit_sync::without_interrupts; -#[cfg(any(feature = "fuse", feature = "vsock", feature = "console", feature = "nvme"))] +#[cfg(any( + feature = "fuse", + feature = "vsock", + feature = "console", + feature = "nvme" +))] use hermit_sync::InterruptTicketMutex; use memory_addresses::{PhysAddr, VirtAddr}; use pci_types::capability::CapabilityIterator; +#[cfg(feature = "nvme")] use pci_types::device_type::DeviceType; use pci_types::{ Bar, CommandRegister, ConfigRegionAccess, DeviceId, EndpointHeader, InterruptLine, @@ -31,8 +37,6 @@ use crate::drivers::net::rtl8139::{self, RTL8139Driver}; feature = "virtio-net", ))] use crate::drivers::net::virtio::VirtioNetDriver; -#[cfg(any(feature = "tcp", feature = "udp"))] -use crate::drivers::net::NetworkDriver; #[cfg(feature = "nvme")] use crate::drivers::nvme::NvmeDriver; #[cfg(any( @@ -425,7 +429,6 @@ impl PciDriver { fn nvme_handler() {} (irq_number, nvme_handler) } - _ => todo!(), } } } From 339dbc096bd7ae7b11e335fdf799e75a67bb5da6 Mon Sep 17 00:00:00 2001 From: valopok Date: Sun, 28 Sep 2025 16:03:13 +0200 Subject: [PATCH 3/7] nvme: fix clippy warnings --- src/drivers/nvme.rs | 27 +++++++++--------------- src/syscalls/nvme.rs | 50 +++++++++++++++++++------------------------- 2 files changed, 31 insertions(+), 46 deletions(-) diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs index 30fb80dfe8..2942a0285d 100644 --- a/src/drivers/nvme.rs +++ b/src/drivers/nvme.rs @@ -100,7 +100,7 @@ impl NvmeDriver { number_of_entries: u32, ) -> Result { let mut device = self.device.lock(); - if !device.namespace_ids().contains(&namespace_id) { + if !device.namespace_ids().contains(namespace_id) { return Err(SysNvmeError::NamespaceDoesNotExist); } let mut io_queue_pairs = self.io_queue_pairs.lock(); @@ -263,23 +263,20 @@ impl vroom::Allocator for NvmeAllocator { layout: core::alloc::Layout, ) -> Result<*mut [T], Box> { debug!("NVMe driver: allocate size {:#x}", layout.size()); - let memory = match self.device_allocator.allocate(layout) { - Err(_) => { - return Err("NVMe driver: Could not allocate memory with device allocator.".into()); - } - Ok(memory) => memory, + let Ok(memory) = self.device_allocator.allocate(layout) else { + return Err("NVMe driver: Could not allocate memory with device allocator.".into()); }; self.allocations .lock() .insert(memory.as_ptr().addr(), layout); let slice = - unsafe { core::slice::from_raw_parts_mut(memory.as_mut_ptr() as *mut T, memory.len()) }; - Ok(slice as *mut [T]) + unsafe { core::slice::from_raw_parts_mut(memory.as_mut_ptr().cast::(), memory.len()) }; + Ok(core::ptr::from_mut::<[T]>(slice)) } fn deallocate(&self, slice: *mut [T]) -> Result<(), Box> { let address = slice.as_mut_ptr() as usize; - debug!("NVMe driver: deallocate address {:#X}", address); + debug!("NVMe driver: deallocate address {address:#X}"); let layout: Layout = match self.allocations.lock().remove(&address) { None => { return Err( @@ -300,15 +297,11 @@ impl vroom::Allocator for NvmeAllocator { virtual_address: *const T, ) -> Result<*const T, Box> { let address = virtual_address as usize; - debug!("NVMe driver: translate virtual address {:#x}", address); + debug!("NVMe driver: translate virtual address {address:#x}"); let virtual_address: VirtAddr = VirtAddr::new(address as u64); - let physical_address = - match virtual_to_physical(virtual_address) { - None => { - return Err("NVMe driver: The given virtual address could not be mapped to a physical one.".into()); - } - Some(physical_address) => physical_address, - }; + let Some(physical_address) = virtual_to_physical(virtual_address) else { + return Err("NVMe driver: The given virtual address could not be mapped to a physical one.".into()); + }; Ok(physical_address.as_usize() as *mut T) } } diff --git a/src/syscalls/nvme.rs b/src/syscalls/nvme.rs index 79e07e9e4e..f4723d1041 100644 --- a/src/syscalls/nvme.rs +++ b/src/syscalls/nvme.rs @@ -1,4 +1,4 @@ -use vroom::{IoQueuePairId, Namespace, NamespaceId, Dma}; +use vroom::{Dma, IoQueuePairId, Namespace, NamespaceId}; use crate::drivers::pci::get_nvme_driver; @@ -7,21 +7,17 @@ use crate::drivers::pci::get_nvme_driver; pub(crate) enum SysNvmeError { ZeroPointerParameter = 1, DeviceDoesNotExist = 2, - CouldNotIdentifyNamespaces = 3, - NamespaceDoesNotExist = 4, - MaxNumberOfQueuesReached = 5, - CouldNotCreateIoQueuePair = 6, - CouldNotDeleteIoQueuePair = 7, - CouldNotFindIoQueuePair = 8, - BufferIsZero = 9, - BufferTooBig = 10, - BufferIncorrectlySized = 11, - CouldNotAllocateMemory = 12, - CouldNotAllocateBuffer = 13, - CouldNotDeallocateBuffer = 14, - CouldNotReadFromIoQueuePair = 15, - CouldNotWriteToIoQueuePair = 16, - CouldNotClearNamespace = 17, + NamespaceDoesNotExist = 3, + MaxNumberOfQueuesReached = 4, + CouldNotCreateIoQueuePair = 5, + CouldNotDeleteIoQueuePair = 6, + CouldNotFindIoQueuePair = 7, + BufferIncorrectlySized = 8, + CouldNotAllocateBuffer = 9, + CouldNotDeallocateBuffer = 10, + CouldNotReadFromIoQueuePair = 11, + CouldNotWriteToIoQueuePair = 12, + CouldNotClearNamespace = 13, } #[hermit_macro::system] @@ -58,10 +54,10 @@ pub unsafe extern "C" fn sys_nvme_namespace_ids( if namespace_ids.len() != length as usize { return Err(SysNvmeError::BufferIncorrectlySized); } - for i in 0..length as usize { + for (i, namespace_id) in namespace_ids.iter().enumerate().take(length as usize) { let pointer = unsafe { vec_pointer.add(i) }; - unsafe { *pointer = namespace_ids[i].clone() }; - } + unsafe { *pointer = namespace_id }; + } Ok(()) } match inner(vec_pointer, length) { @@ -95,13 +91,11 @@ pub unsafe extern "C" fn sys_nvme_namespace( #[hermit_macro::system] #[unsafe(no_mangle)] -pub unsafe extern "C" fn sys_nvme_clear_namespace( - namespace_id: &NamespaceId, -) -> usize { +pub unsafe extern "C" fn sys_nvme_clear_namespace(namespace_id: &NamespaceId) -> usize { fn inner(namespace_id: &NamespaceId) -> Result<(), SysNvmeError> { let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; let lock = driver.lock(); - lock.clear_namespace(namespace_id) + lock.clear_namespace(namespace_id) } match inner(namespace_id) { Ok(()) => 0, @@ -241,7 +235,7 @@ pub unsafe extern "C" fn sys_nvme_deallocate_buffer( buffer: *mut Dma, ) -> usize { fn inner(io_queue_pair_id: &IoQueuePairId, buffer: *mut Dma) -> Result<(), SysNvmeError> { - core::mem::forget(buffer); + let _ = buffer; let buffer: Dma = unsafe { core::ptr::read(buffer) }; let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; driver.lock().deallocate_buffer(io_queue_pair_id, buffer) @@ -288,7 +282,7 @@ pub unsafe extern "C" fn sys_nvme_write_to_io_queue_pair( buffer: *const Dma, logical_block_address: u64, ) -> Result<(), SysNvmeError> { - let buffer = unsafe { & *buffer }; + let buffer = unsafe { &*buffer }; let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; driver .lock() @@ -336,7 +330,7 @@ pub unsafe extern "C" fn sys_nvme_submit_write_to_io_queue_pair( buffer: *const Dma, logical_block_address: u64, ) -> Result<(), SysNvmeError> { - let buffer = unsafe { & *buffer }; + let buffer = unsafe { &*buffer }; let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; driver .lock() @@ -353,9 +347,7 @@ pub unsafe extern "C" fn sys_nvme_submit_write_to_io_queue_pair( pub unsafe extern "C" fn sys_nvme_complete_io_with_io_queue_pair( io_queue_pair_id: &IoQueuePairId, ) -> usize { - fn inner( - io_queue_pair_id: &IoQueuePairId, - ) -> Result<(), SysNvmeError> { + fn inner(io_queue_pair_id: &IoQueuePairId) -> Result<(), SysNvmeError> { let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; driver .lock() From e2b4ef25dbaae32e4a8bf50c56257b995e518dcb Mon Sep 17 00:00:00 2001 From: valopok Date: Thu, 2 Oct 2025 14:22:06 +0200 Subject: [PATCH 4/7] nvme: fix formatting --- src/drivers/nvme.rs | 10 +++++++--- src/drivers/pci.rs | 7 ++++--- src/syscalls/mod.rs | 4 ++-- src/syscalls/nvme.rs | 4 ++-- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs index 2942a0285d..66a07b6338 100644 --- a/src/drivers/nvme.rs +++ b/src/drivers/nvme.rs @@ -269,8 +269,9 @@ impl vroom::Allocator for NvmeAllocator { self.allocations .lock() .insert(memory.as_ptr().addr(), layout); - let slice = - unsafe { core::slice::from_raw_parts_mut(memory.as_mut_ptr().cast::(), memory.len()) }; + let slice = unsafe { + core::slice::from_raw_parts_mut(memory.as_mut_ptr().cast::(), memory.len()) + }; Ok(core::ptr::from_mut::<[T]>(slice)) } @@ -300,7 +301,10 @@ impl vroom::Allocator for NvmeAllocator { debug!("NVMe driver: translate virtual address {address:#x}"); let virtual_address: VirtAddr = VirtAddr::new(address as u64); let Some(physical_address) = virtual_to_physical(virtual_address) else { - return Err("NVMe driver: The given virtual address could not be mapped to a physical one.".into()); + return Err( + "NVMe driver: The given virtual address could not be mapped to a physical one." + .into(), + ); }; Ok(physical_address.as_usize() as *mut T) } diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index 3a0d35c559..2245607ac6 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -6,7 +6,6 @@ use core::fmt; use ahash::RandomState; use hashbrown::HashMap; -use hermit_sync::without_interrupts; #[cfg(any( feature = "fuse", feature = "vsock", @@ -14,13 +13,14 @@ use hermit_sync::without_interrupts; feature = "nvme" ))] use hermit_sync::InterruptTicketMutex; +use hermit_sync::without_interrupts; use memory_addresses::{PhysAddr, VirtAddr}; use pci_types::capability::CapabilityIterator; #[cfg(feature = "nvme")] use pci_types::device_type::DeviceType; use pci_types::{ Bar, CommandRegister, ConfigRegionAccess, DeviceId, EndpointHeader, InterruptLine, - InterruptPin, PciAddress, PciHeader, StatusRegister, VendorId, MAX_BARS, + InterruptPin, MAX_BARS, PciAddress, PciHeader, StatusRegister, VendorId, }; use crate::arch::pci::PciConfigRegion; @@ -257,7 +257,8 @@ impl fmt::Display for PciDevice { }; #[cfg(not(feature = "pci-ids"))] - let (class_name, vendor_name, device_name) = ("Unknown Class", "Unknown Vendor", "Unknown Device"); + let (class_name, vendor_name, device_name) = + ("Unknown Class", "Unknown Vendor", "Unknown Device"); // Output detailed readable information about this device. write!( diff --git a/src/syscalls/mod.rs b/src/syscalls/mod.rs index 4ec6b24211..554d0116dc 100644 --- a/src/syscalls/mod.rs +++ b/src/syscalls/mod.rs @@ -39,14 +39,14 @@ mod futex; pub(crate) mod interfaces; #[cfg(feature = "mman")] mod mman; +#[cfg(feature = "nvme")] +pub(crate) mod nvme; mod processor; #[cfg(feature = "newlib")] mod recmutex; mod semaphore; #[cfg(any(feature = "net", feature = "vsock"))] pub mod socket; -#[cfg(feature = "nvme")] -pub(crate) mod nvme; mod spinlock; mod system; #[cfg(feature = "common-os")] diff --git a/src/syscalls/nvme.rs b/src/syscalls/nvme.rs index f4723d1041..6564ea2123 100644 --- a/src/syscalls/nvme.rs +++ b/src/syscalls/nvme.rs @@ -55,8 +55,8 @@ pub unsafe extern "C" fn sys_nvme_namespace_ids( return Err(SysNvmeError::BufferIncorrectlySized); } for (i, namespace_id) in namespace_ids.iter().enumerate().take(length as usize) { - let pointer = unsafe { vec_pointer.add(i) }; - unsafe { *pointer = namespace_id }; + let pointer = unsafe { vec_pointer.add(i) }; + unsafe { *pointer = *namespace_id }; } Ok(()) } From daf377de57e9083bfce9a806aaeb7f8c62cf6305 Mon Sep 17 00:00:00 2001 From: valopok Date: Thu, 2 Oct 2025 14:31:14 +0200 Subject: [PATCH 5/7] nvme: fix indentation --- src/drivers/nvme.rs | 14 +++++++------- src/drivers/pci.rs | 3 ++- src/syscalls/nvme.rs | 10 +++++----- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs index 66a07b6338..026a57d123 100644 --- a/src/drivers/nvme.rs +++ b/src/drivers/nvme.rs @@ -10,10 +10,10 @@ use memory_addresses::VirtAddr; use pci_types::InterruptLine; use vroom::{Dma, IoQueuePair, IoQueuePairId, Namespace, NamespaceId, NvmeDevice}; -use crate::arch::mm::paging::{virtual_to_physical, BasePageSize, PageSize}; +use crate::arch::mm::paging::{BasePageSize, PageSize, virtual_to_physical}; use crate::arch::pci::PciConfigRegion; -use crate::drivers::pci::PciDevice; use crate::drivers::Driver; +use crate::drivers::pci::PciDevice; use crate::mm::device_alloc::DeviceAlloc; use crate::syscalls::nvme::SysNvmeError; @@ -264,7 +264,7 @@ impl vroom::Allocator for NvmeAllocator { ) -> Result<*mut [T], Box> { debug!("NVMe driver: allocate size {:#x}", layout.size()); let Ok(memory) = self.device_allocator.allocate(layout) else { - return Err("NVMe driver: Could not allocate memory with device allocator.".into()); + return Err("NVMe driver: Could not allocate memory with device allocator.".into()); }; self.allocations .lock() @@ -301,10 +301,10 @@ impl vroom::Allocator for NvmeAllocator { debug!("NVMe driver: translate virtual address {address:#x}"); let virtual_address: VirtAddr = VirtAddr::new(address as u64); let Some(physical_address) = virtual_to_physical(virtual_address) else { - return Err( - "NVMe driver: The given virtual address could not be mapped to a physical one." - .into(), - ); + return Err( + "NVMe driver: The given virtual address could not be mapped to a physical one." + .into(), + ); }; Ok(physical_address.as_usize() as *mut T) } diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index 2245607ac6..6a0db94d34 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -258,7 +258,7 @@ impl fmt::Display for PciDevice { #[cfg(not(feature = "pci-ids"))] let (class_name, vendor_name, device_name) = - ("Unknown Class", "Unknown Vendor", "Unknown Device"); + ("Unknown Class", "Unknown Vendor", "Unknown Device"); // Output detailed readable information about this device. write!( @@ -430,6 +430,7 @@ impl PciDriver { fn nvme_handler() {} (irq_number, nvme_handler) } + _ => todo!(), } } } diff --git a/src/syscalls/nvme.rs b/src/syscalls/nvme.rs index 6564ea2123..c869439eb6 100644 --- a/src/syscalls/nvme.rs +++ b/src/syscalls/nvme.rs @@ -54,10 +54,10 @@ pub unsafe extern "C" fn sys_nvme_namespace_ids( if namespace_ids.len() != length as usize { return Err(SysNvmeError::BufferIncorrectlySized); } - for (i, namespace_id) in namespace_ids.iter().enumerate().take(length as usize) { - let pointer = unsafe { vec_pointer.add(i) }; - unsafe { *pointer = *namespace_id }; - } + for (i, namespace_id) in namespace_ids.iter().enumerate().take(length as usize) { + let pointer = unsafe { vec_pointer.add(i) }; + unsafe { *pointer = *namespace_id }; + } Ok(()) } match inner(vec_pointer, length) { @@ -235,7 +235,7 @@ pub unsafe extern "C" fn sys_nvme_deallocate_buffer( buffer: *mut Dma, ) -> usize { fn inner(io_queue_pair_id: &IoQueuePairId, buffer: *mut Dma) -> Result<(), SysNvmeError> { - let _ = buffer; + let _ = buffer; let buffer: Dma = unsafe { core::ptr::read(buffer) }; let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; driver.lock().deallocate_buffer(io_queue_pair_id, buffer) From f68482166ab074a716f383e70b3b5f821c5a635d Mon Sep 17 00:00:00 2001 From: valopok Date: Thu, 2 Oct 2025 14:38:33 +0200 Subject: [PATCH 6/7] nvme: fix indentation, unreachable pattern warning and typos --- src/arch/x86_64/mm/paging.rs | 2 +- src/drivers/nvme.rs | 2 +- src/drivers/pci.rs | 3 ++- src/drivers/virtio/virtqueue/packed.rs | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/arch/x86_64/mm/paging.rs b/src/arch/x86_64/mm/paging.rs index 109770afc4..b270fb6eba 100644 --- a/src/arch/x86_64/mm/paging.rs +++ b/src/arch/x86_64/mm/paging.rs @@ -113,7 +113,7 @@ pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option { match translate_result { TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => { - trace!("Uable to determine the physical address of 0x{virtual_address:X}"); + trace!("Unable to determine the physical address of 0x{virtual_address:X}"); None } TranslateResult::Mapped { frame, offset, .. } => { diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs index 026a57d123..088ef32ef2 100644 --- a/src/drivers/nvme.rs +++ b/src/drivers/nvme.rs @@ -305,7 +305,7 @@ impl vroom::Allocator for NvmeAllocator { "NVMe driver: The given virtual address could not be mapped to a physical one." .into(), ); - }; + }; Ok(physical_address.as_usize() as *mut T) } } diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index 6a0db94d34..b47400ef98 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -430,7 +430,8 @@ impl PciDriver { fn nvme_handler() {} (irq_number, nvme_handler) } - _ => todo!(), + #[allow(unreachable_patterns)] + _ => todo!(), } } } diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index ec7aa994b7..4b7391519e 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -74,7 +74,7 @@ impl WrapCount { WrapCount(true) } - /// Toogles a given wrap count to respectiver other value. + /// Toggles a given wrap count to respectiver other value. /// /// If WrapCount(true) returns WrapCount(false), /// if WrapCount(false) returns WrapCount(true). @@ -470,7 +470,7 @@ impl DrvNotif { } impl DevNotif { - /// Enables the notificication capability for a specific buffer. + /// Enables the notification capability for a specific buffer. pub fn enable_notif_specific(&mut self) { self.f_notif_idx = true; } From 32757ce06caefd931c2e0af53a3198e7c681c731 Mon Sep 17 00:00:00 2001 From: valopok Date: Thu, 2 Oct 2025 14:44:23 +0200 Subject: [PATCH 7/7] nvme: add nvme feature to init_cell.rs --- src/init_cell.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/init_cell.rs b/src/init_cell.rs index 1dba68e44e..19fb448475 100644 --- a/src/init_cell.rs +++ b/src/init_cell.rs @@ -1,5 +1,10 @@ #![cfg_attr( - not(any(feature = "vsock", feature = "fuse", feature = "console")), + not(any( + feature = "vsock", + feature = "fuse", + feature = "console", + feature = "nvme" + )), expect(dead_code) )]