diff --git a/Cargo.lock b/Cargo.lock index 146baf6ee9..a33ef63349 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -789,6 +789,12 @@ dependencies = [ "byteorder", ] +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" + [[package]] name = "hashbrown" version = "0.16.0" @@ -858,7 +864,7 @@ dependencies = [ "float-cmp", "free-list", "fuse-abi", - "hashbrown", + "hashbrown 0.16.0", "heapless 0.9.1", "hermit-entry", "hermit-macro", @@ -890,6 +896,7 @@ dependencies = [ "uhyve-interface", "virtio-spec", "volatile 0.6.1", + "vroom", "x86_64", "zerocopy", ] @@ -2160,6 +2167,16 @@ dependencies = [ "syn", ] +[[package]] +name = "vroom" +version = "0.1.0" +source = "git+https://github.com/valopok/vroom?branch=main#b5e9c7f9debccc0b803a489a8177822abd183d9d" +dependencies = [ + "ahash", + "hashbrown 0.15.5", + "log", +] + [[package]] name = "wait-timeout" version = "0.2.1" diff --git a/Cargo.toml b/Cargo.toml index ffbc53bc8d..009431c42e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ mman = [] mmap = ["mman"] # Deprecated in favor of mman newlib = [] nostd = [] +nvme = ["pci", "vroom"] pci = ["virtio?/pci"] rtl8139 = ["net", "pci"] semihosting = ["dep:semihosting"] @@ -143,6 +144,7 @@ talc = { version = "4" } thiserror = { version = "2", default-features = false } time = { version = "0.3", default-features = false } volatile = "0.6" +vroom = { git = "https://github.com/valopok/vroom", branch = "main", default-features = false, optional = true } zerocopy = { version = "0.8", default-features = false } uhyve-interface = "0.1.3" diff --git a/src/arch/x86_64/mm/paging.rs b/src/arch/x86_64/mm/paging.rs index 109770afc4..b270fb6eba 100644 --- a/src/arch/x86_64/mm/paging.rs +++ b/src/arch/x86_64/mm/paging.rs @@ -113,7 +113,7 @@ pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option { match translate_result { TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => { - trace!("Uable to determine the physical address of 0x{virtual_address:X}"); + trace!("Unable to determine the physical address of 0x{virtual_address:X}"); None } TranslateResult::Mapped { frame, offset, .. } => { diff --git a/src/drivers/mod.rs b/src/drivers/mod.rs index ab1aeebb7b..47b73b05e5 100644 --- a/src/drivers/mod.rs +++ b/src/drivers/mod.rs @@ -8,6 +8,8 @@ pub mod fs; pub mod mmio; #[cfg(feature = "net")] pub mod net; +#[cfg(feature = "nvme")] +pub mod nvme; #[cfg(feature = "pci")] pub mod pci; #[cfg(any( diff --git a/src/drivers/nvme.rs b/src/drivers/nvme.rs new file mode 100644 index 0000000000..088ef32ef2 --- /dev/null +++ b/src/drivers/nvme.rs @@ -0,0 +1,321 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::alloc::{Allocator, Layout}; +use core::ptr::NonNull; + +use ahash::RandomState; +use hashbrown::HashMap; +use hermit_sync::{InterruptTicketMutex, Lazy}; +use memory_addresses::VirtAddr; +use pci_types::InterruptLine; +use vroom::{Dma, IoQueuePair, IoQueuePairId, Namespace, NamespaceId, NvmeDevice}; + +use crate::arch::mm::paging::{BasePageSize, PageSize, virtual_to_physical}; +use crate::arch::pci::PciConfigRegion; +use crate::drivers::Driver; +use crate::drivers::pci::PciDevice; +use crate::mm::device_alloc::DeviceAlloc; +use crate::syscalls::nvme::SysNvmeError; + +pub(crate) struct NvmeDriver { + irq: InterruptLine, + device: InterruptTicketMutex>, + // TODO: Replace with a concurrent hashmap. See crate::synch::futex. + io_queue_pairs: + Lazy, RandomState>>>, +} + +impl NvmeDriver { + pub(crate) fn init(pci_device: &PciDevice) -> Result { + let allocator: NvmeAllocator = NvmeAllocator { + device_allocator: DeviceAlloc {}, + allocations: Lazy::new(|| { + InterruptTicketMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0))) + }), + }; + let (virtual_address, size) = pci_device.memory_map_bar(0, true).ok_or(())?; + let nvme_device: NvmeDevice = NvmeDevice::new( + virtual_address.as_mut_ptr(), + size, + BasePageSize::SIZE as usize, + allocator, + ) + .map_err(|_| ())?; + let driver = Self { + irq: pci_device + .get_irq() + .expect("NVMe driver: Could not get irq from device."), + device: InterruptTicketMutex::new(nvme_device), + io_queue_pairs: Lazy::new(|| { + InterruptTicketMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0))) + }), + }; + Ok(driver) + } + + pub(crate) fn namespace_ids(&self) -> Vec { + self.device.lock().namespace_ids() + } + + pub(crate) fn namespace(&self, namespace_id: &NamespaceId) -> Result { + self.device + .lock() + .namespace(namespace_id) + .map_err(|_| SysNvmeError::NamespaceDoesNotExist) + .copied() + } + + pub(crate) fn clear_namespace(&self, namespace_id: &NamespaceId) -> Result<(), SysNvmeError> { + self.device + .lock() + .clear_namespace(namespace_id) + .map_err(|_| SysNvmeError::CouldNotClearNamespace) + } + + pub(crate) fn maximum_transfer_size(&self) -> usize { + self.device + .lock() + .controller_information() + .maximum_transfer_size + } + + pub(crate) fn maximum_number_of_io_queue_pairs(&self) -> u16 { + self.device + .lock() + .controller_information() + .maximum_number_of_io_queue_pairs + } + + pub(crate) fn maximum_queue_entries_supported(&self) -> u32 { + self.device + .lock() + .controller_information() + .maximum_queue_entries_supported + } + + /// Creates an IO queue pair with a given number of entries for a namespace. + pub(crate) fn create_io_queue_pair( + &mut self, + namespace_id: &NamespaceId, + number_of_entries: u32, + ) -> Result { + let mut device = self.device.lock(); + if !device.namespace_ids().contains(namespace_id) { + return Err(SysNvmeError::NamespaceDoesNotExist); + } + let mut io_queue_pairs = self.io_queue_pairs.lock(); + if io_queue_pairs.len() + >= device + .controller_information() + .maximum_number_of_io_queue_pairs + .into() + { + return Err(SysNvmeError::MaxNumberOfQueuesReached); + } + let io_queue_pair = device + .create_io_queue_pair(namespace_id, number_of_entries) + .map_err(|_| SysNvmeError::CouldNotCreateIoQueuePair)?; + let id = io_queue_pair.id(); + io_queue_pairs.insert(id, io_queue_pair); + Ok(id) + } + + /// Deletes an IO queue pair and frees its resources. + pub(crate) fn delete_io_queue_pair( + &mut self, + io_queue_pair_id: IoQueuePairId, + ) -> Result<(), SysNvmeError> { + let mut device = self.device.lock(); + let io_queue_pair = self + .io_queue_pairs + .lock() + .remove(&io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + device + .delete_io_queue_pair(io_queue_pair) + .map_err(|_error| SysNvmeError::CouldNotDeleteIoQueuePair) + } + + pub(crate) fn allocate_buffer( + &self, + io_queue_pair_id: &IoQueuePairId, + number_of_elements: usize, + ) -> Result, SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .allocate_buffer(number_of_elements) + .map_err(|_error| SysNvmeError::CouldNotAllocateBuffer) + } + + pub(crate) fn deallocate_buffer( + &self, + io_queue_pair_id: &IoQueuePairId, + buffer: Dma, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .deallocate_buffer(buffer) + .map_err(|_error| SysNvmeError::CouldNotDeallocateBuffer) + } + + /// Reads from the IO queue pair with ID `io_queue_pair_id` + /// into the `buffer` starting from the `logical_block_address`. + pub(crate) fn read_from_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .read(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + /// Writes the `buffer` to the IO queue pair with ID `io_queue_pair_id` + /// starting from the `logical_block_address`. + pub(crate) fn write_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .write(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotWriteToIoQueuePair)?; + Ok(()) + } + + /// Submits a read command to the IO queue pair with ID `io_queue_pair_id` + /// that reads into the `buffer` starting from the `logical_block_address`. + pub(crate) fn submit_read_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .submit_read(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + /// Submits a write command to the IO queue pair with ID `io_queue_pair_id` + /// that writes the `buffer` starting from the `logical_block_address`. + pub(crate) fn submit_write_to_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + buffer: &Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .submit_write(buffer, logical_block_address) + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } + + pub(crate) fn complete_io_with_io_queue_pair( + &mut self, + io_queue_pair_id: &IoQueuePairId, + ) -> Result<(), SysNvmeError> { + let mut io_queue_pairs = self.io_queue_pairs.lock(); + let io_queue_pair = io_queue_pairs + .get_mut(io_queue_pair_id) + .ok_or(SysNvmeError::CouldNotFindIoQueuePair)?; + io_queue_pair + .complete_io() + .map_err(|_error| SysNvmeError::CouldNotReadFromIoQueuePair)?; + Ok(()) + } +} + +pub(crate) struct NvmeAllocator { + pub(crate) device_allocator: DeviceAlloc, + // TODO: Replace with a concurrent hashmap. See crate::synch::futex. + pub(crate) allocations: Lazy>>, +} + +impl vroom::Allocator for NvmeAllocator { + fn allocate( + &self, + layout: core::alloc::Layout, + ) -> Result<*mut [T], Box> { + debug!("NVMe driver: allocate size {:#x}", layout.size()); + let Ok(memory) = self.device_allocator.allocate(layout) else { + return Err("NVMe driver: Could not allocate memory with device allocator.".into()); + }; + self.allocations + .lock() + .insert(memory.as_ptr().addr(), layout); + let slice = unsafe { + core::slice::from_raw_parts_mut(memory.as_mut_ptr().cast::(), memory.len()) + }; + Ok(core::ptr::from_mut::<[T]>(slice)) + } + + fn deallocate(&self, slice: *mut [T]) -> Result<(), Box> { + let address = slice.as_mut_ptr() as usize; + debug!("NVMe driver: deallocate address {address:#X}"); + let layout: Layout = match self.allocations.lock().remove(&address) { + None => { + return Err( + "NVMe driver: The given address did not map to an address and a layout. + This mapping should have occurred during allocation." + .into(), + ); + } + Some(layout) => layout, + }; + let virtual_address = unsafe { NonNull::new_unchecked(address as *mut u8) }; + unsafe { self.device_allocator.deallocate(virtual_address, layout) }; + Ok(()) + } + + fn translate_virtual_to_physical( + &self, + virtual_address: *const T, + ) -> Result<*const T, Box> { + let address = virtual_address as usize; + debug!("NVMe driver: translate virtual address {address:#x}"); + let virtual_address: VirtAddr = VirtAddr::new(address as u64); + let Some(physical_address) = virtual_to_physical(virtual_address) else { + return Err( + "NVMe driver: The given virtual address could not be mapped to a physical one." + .into(), + ); + }; + Ok(physical_address.as_usize() as *mut T) + } +} + +impl Driver for NvmeDriver { + fn get_interrupt_number(&self) -> InterruptLine { + self.irq + } + + fn get_name(&self) -> &'static str { + "nvme" + } +} diff --git a/src/drivers/pci.rs b/src/drivers/pci.rs index bf01b1b9b9..b47400ef98 100644 --- a/src/drivers/pci.rs +++ b/src/drivers/pci.rs @@ -6,11 +6,18 @@ use core::fmt; use ahash::RandomState; use hashbrown::HashMap; -#[cfg(any(feature = "fuse", feature = "vsock", feature = "console"))] +#[cfg(any( + feature = "fuse", + feature = "vsock", + feature = "console", + feature = "nvme" +))] use hermit_sync::InterruptTicketMutex; use hermit_sync::without_interrupts; use memory_addresses::{PhysAddr, VirtAddr}; use pci_types::capability::CapabilityIterator; +#[cfg(feature = "nvme")] +use pci_types::device_type::DeviceType; use pci_types::{ Bar, CommandRegister, ConfigRegionAccess, DeviceId, EndpointHeader, InterruptLine, InterruptPin, MAX_BARS, PciAddress, PciHeader, StatusRegister, VendorId, @@ -30,6 +37,8 @@ use crate::drivers::net::rtl8139::{self, RTL8139Driver}; feature = "virtio-net", ))] use crate::drivers::net::virtio::VirtioNetDriver; +#[cfg(feature = "nvme")] +use crate::drivers::nvme::NvmeDriver; #[cfg(any( all( feature = "virtio-net", @@ -341,6 +350,8 @@ pub(crate) enum PciDriver { VirtioConsole(InterruptTicketMutex), #[cfg(feature = "vsock")] VirtioVsock(InterruptTicketMutex), + #[cfg(feature = "nvme")] + Nvme(InterruptTicketMutex), } impl PciDriver { @@ -353,6 +364,15 @@ impl PciDriver { } } + #[cfg(feature = "nvme")] + fn get_nvme_driver(&self) -> Option<&InterruptTicketMutex> { + #[allow(unreachable_patterns)] + match self { + Self::Nvme(drv) => Some(drv), + _ => None, + } + } + #[cfg(feature = "vsock")] fn get_vsock_driver(&self) -> Option<&InterruptTicketMutex> { #[allow(unreachable_patterns)] @@ -372,7 +392,6 @@ impl PciDriver { } fn get_interrupt_handler(&self) -> (InterruptLine, fn()) { - #[allow(unreachable_patterns)] match self { #[cfg(feature = "vsock")] Self::VirtioVsock(drv) => { @@ -405,6 +424,13 @@ impl PciDriver { let irq_number = drv.lock().get_interrupt_number(); (irq_number, console_handler) } + #[cfg(feature = "nvme")] + Self::Nvme(drv) => { + let irq_number = drv.lock().get_interrupt_number(); + fn nvme_handler() {} + (irq_number, nvme_handler) + } + #[allow(unreachable_patterns)] _ => todo!(), } } @@ -476,6 +502,14 @@ pub(crate) fn get_console_driver() -> Option<&'static InterruptTicketMutex Option<&'static InterruptTicketMutex> { + PCI_DRIVERS + .get()? + .iter() + .find_map(|drv| drv.get_nvme_driver()) +} + #[cfg(feature = "vsock")] pub(crate) fn get_vsock_driver() -> Option<&'static InterruptTicketMutex> { PCI_DRIVERS @@ -540,6 +574,32 @@ pub(crate) fn init() { } } + #[cfg(feature = "nvme")] + for adapter in PCI_DEVICES.finalize().iter().filter(|adapter| { + let (_, class_id, subclass_id, _) = + adapter.header().revision_and_class(adapter.access()); + let device_type = DeviceType::from((class_id, subclass_id)); + device_type == DeviceType::NvmeController + }) { + info!( + "Found NVMe device with device id {:#x}", + adapter.device_id() + ); + + match NvmeDriver::init(adapter) { + Ok(nvme_driver) => { + info!("NVMe driver initialized."); + register_driver(PciDriver::Nvme(InterruptTicketMutex::new(nvme_driver))); + } + Err(()) => { + error!( + "NVMe driver could not be initialized for device: {:#x}", + adapter.device_id() + ); + } + } + } + // Searching for Realtek RTL8139, which is supported by Qemu #[cfg(all(target_arch = "x86_64", feature = "rtl8139"))] for adapter in PCI_DEVICES.finalize().iter().filter(|x| { diff --git a/src/drivers/virtio/virtqueue/packed.rs b/src/drivers/virtio/virtqueue/packed.rs index ec7aa994b7..4b7391519e 100644 --- a/src/drivers/virtio/virtqueue/packed.rs +++ b/src/drivers/virtio/virtqueue/packed.rs @@ -74,7 +74,7 @@ impl WrapCount { WrapCount(true) } - /// Toogles a given wrap count to respectiver other value. + /// Toggles a given wrap count to respectiver other value. /// /// If WrapCount(true) returns WrapCount(false), /// if WrapCount(false) returns WrapCount(true). @@ -470,7 +470,7 @@ impl DrvNotif { } impl DevNotif { - /// Enables the notificication capability for a specific buffer. + /// Enables the notification capability for a specific buffer. pub fn enable_notif_specific(&mut self) { self.f_notif_idx = true; } diff --git a/src/init_cell.rs b/src/init_cell.rs index 1dba68e44e..19fb448475 100644 --- a/src/init_cell.rs +++ b/src/init_cell.rs @@ -1,5 +1,10 @@ #![cfg_attr( - not(any(feature = "vsock", feature = "fuse", feature = "console")), + not(any( + feature = "vsock", + feature = "fuse", + feature = "console", + feature = "nvme" + )), expect(dead_code) )] diff --git a/src/syscalls/mod.rs b/src/syscalls/mod.rs index fc3fa592ef..554d0116dc 100644 --- a/src/syscalls/mod.rs +++ b/src/syscalls/mod.rs @@ -39,6 +39,8 @@ mod futex; pub(crate) mod interfaces; #[cfg(feature = "mman")] mod mman; +#[cfg(feature = "nvme")] +pub(crate) mod nvme; mod processor; #[cfg(feature = "newlib")] mod recmutex; diff --git a/src/syscalls/nvme.rs b/src/syscalls/nvme.rs new file mode 100644 index 0000000000..c869439eb6 --- /dev/null +++ b/src/syscalls/nvme.rs @@ -0,0 +1,360 @@ +use vroom::{Dma, IoQueuePairId, Namespace, NamespaceId}; + +use crate::drivers::pci::get_nvme_driver; + +// TODO: error messages +#[derive(Debug)] +pub(crate) enum SysNvmeError { + ZeroPointerParameter = 1, + DeviceDoesNotExist = 2, + NamespaceDoesNotExist = 3, + MaxNumberOfQueuesReached = 4, + CouldNotCreateIoQueuePair = 5, + CouldNotDeleteIoQueuePair = 6, + CouldNotFindIoQueuePair = 7, + BufferIncorrectlySized = 8, + CouldNotAllocateBuffer = 9, + CouldNotDeallocateBuffer = 10, + CouldNotReadFromIoQueuePair = 11, + CouldNotWriteToIoQueuePair = 12, + CouldNotClearNamespace = 13, +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_number_of_namespaces(result: *mut u32) -> usize { + fn inner(result: *mut u32) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let number_of_namespaces = driver.lock().namespace_ids().len() as u32; + *result = number_of_namespaces; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_namespace_ids( + vec_pointer: *mut NamespaceId, + length: u32, +) -> usize { + fn inner(vec_pointer: *mut NamespaceId, length: u32) -> Result<(), SysNvmeError> { + if vec_pointer.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let namespace_ids = driver.lock().namespace_ids(); + if namespace_ids.len() != length as usize { + return Err(SysNvmeError::BufferIncorrectlySized); + } + for (i, namespace_id) in namespace_ids.iter().enumerate().take(length as usize) { + let pointer = unsafe { vec_pointer.add(i) }; + unsafe { *pointer = *namespace_id }; + } + Ok(()) + } + match inner(vec_pointer, length) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_namespace( + namespace_id: &NamespaceId, + result: *mut Namespace, +) -> usize { + fn inner(namespace_id: &NamespaceId, result: *mut Namespace) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let lock = driver.lock(); + let namespace = lock.namespace(namespace_id)?; + *result = namespace; + Ok(()) + } + match inner(namespace_id, result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_clear_namespace(namespace_id: &NamespaceId) -> usize { + fn inner(namespace_id: &NamespaceId) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let lock = driver.lock(); + lock.clear_namespace(namespace_id) + } + match inner(namespace_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_transfer_size(result: *mut usize) -> usize { + fn inner(result: *mut usize) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_transfer_size = driver.lock().maximum_transfer_size(); + *result = maximum_transfer_size; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_number_of_io_queue_pairs(result: *mut u16) -> usize { + fn inner(result: *mut u16) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_number_of_io_queue_pairs = driver.lock().maximum_number_of_io_queue_pairs(); + *result = maximum_number_of_io_queue_pairs; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_maximum_queue_entries_supported(result: *mut u32) -> usize { + fn inner(result: *mut u32) -> Result<(), SysNvmeError> { + if result.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let result = unsafe { &mut *result }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let maximum_queue_entries_supported = driver.lock().maximum_queue_entries_supported(); + *result = maximum_queue_entries_supported; + Ok(()) + } + match inner(result) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_create_io_queue_pair( + namespace_id: &NamespaceId, + number_of_entries: u32, + resulting_io_queue_pair_id: *mut IoQueuePairId, +) -> usize { + fn inner( + namespace_id: &NamespaceId, + number_of_entries: u32, + resulting_io_queue_pair_id: *mut IoQueuePairId, + ) -> Result<(), SysNvmeError> { + if resulting_io_queue_pair_id.is_null() { + return Err(SysNvmeError::ZeroPointerParameter); + } + let resulting_io_queue_pair_id = unsafe { &mut *resulting_io_queue_pair_id }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let io_queue_pair_id = driver + .lock() + .create_io_queue_pair(namespace_id, number_of_entries)?; + *resulting_io_queue_pair_id = io_queue_pair_id; + Ok(()) + } + match inner(namespace_id, number_of_entries, resulting_io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_delete_io_queue_pair(io_queue_pair_id: IoQueuePairId) -> usize { + fn inner(io_queue_pair_id: IoQueuePairId) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver.lock().delete_io_queue_pair(io_queue_pair_id) + } + match inner(io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_allocate_buffer( + io_queue_pair_id: &IoQueuePairId, + size: usize, + resulting_buffer: *mut Dma, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + number_of_elements: usize, + resulting_buffer_pointer: *mut Dma, + ) -> Result<(), SysNvmeError> { + let resulting_buffer_pointer = unsafe { &mut *resulting_buffer_pointer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + let buffer = driver + .lock() + .allocate_buffer(io_queue_pair_id, number_of_elements)?; + *resulting_buffer_pointer = buffer; + Ok(()) + } + match inner(io_queue_pair_id, size, resulting_buffer) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_deallocate_buffer( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, +) -> usize { + fn inner(io_queue_pair_id: &IoQueuePairId, buffer: *mut Dma) -> Result<(), SysNvmeError> { + let _ = buffer; + let buffer: Dma = unsafe { core::ptr::read(buffer) }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver.lock().deallocate_buffer(io_queue_pair_id, buffer) + } + match inner(io_queue_pair_id, buffer) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_read_from_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &mut *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .read_from_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_write_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &*buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .write_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_submit_read_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *mut Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &mut *buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .submit_read_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_submit_write_to_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, +) -> usize { + fn inner( + io_queue_pair_id: &IoQueuePairId, + buffer: *const Dma, + logical_block_address: u64, + ) -> Result<(), SysNvmeError> { + let buffer = unsafe { &*buffer }; + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .submit_write_to_io_queue_pair(io_queue_pair_id, buffer, logical_block_address) + } + match inner(io_queue_pair_id, buffer, logical_block_address) { + Ok(()) => 0, + Err(error) => error as usize, + } +} + +#[hermit_macro::system] +#[unsafe(no_mangle)] +pub unsafe extern "C" fn sys_nvme_complete_io_with_io_queue_pair( + io_queue_pair_id: &IoQueuePairId, +) -> usize { + fn inner(io_queue_pair_id: &IoQueuePairId) -> Result<(), SysNvmeError> { + let driver = get_nvme_driver().ok_or(SysNvmeError::DeviceDoesNotExist)?; + driver + .lock() + .complete_io_with_io_queue_pair(io_queue_pair_id) + } + match inner(io_queue_pair_id) { + Ok(()) => 0, + Err(error) => error as usize, + } +}