From e749c85f1e2f8e3a2c6e619cd77b0ddf2144ddb1 Mon Sep 17 00:00:00 2001 From: eraykaradag Date: Tue, 28 Jan 2025 13:28:17 +0000 Subject: [PATCH] new allocator service --- Cargo.lock | 63 +++++- Cargo.toml | 2 +- allocator/Cargo.toml | 15 ++ allocator/src/configuration.rs | 47 +++++ allocator/src/error.rs | 14 ++ allocator/src/main.rs | 34 ++++ allocator/src/resources/cpu.rs | 278 ++++++++++++++++++++++++++ allocator/src/resources/huge_pages.rs | 144 +++++++++++++ allocator/src/resources/mod.rs | 254 +++++++++++++++++++++++ 9 files changed, 844 insertions(+), 7 deletions(-) create mode 100644 allocator/Cargo.toml create mode 100644 allocator/src/configuration.rs create mode 100644 allocator/src/error.rs create mode 100644 allocator/src/main.rs create mode 100644 allocator/src/resources/cpu.rs create mode 100644 allocator/src/resources/huge_pages.rs create mode 100644 allocator/src/resources/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 85888e81..e195ad7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,6 +38,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "allocator" +version = "0.1.0" +dependencies = [ + "lazy_static", + "log", + "serde", + "serde_yaml 0.9.34+deprecated", + "tempfile", + "thiserror 2.0.11", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -613,7 +625,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-util", "tower-service", @@ -1001,7 +1013,7 @@ dependencies = [ "log", "serde", "serde_json", - "serde_yaml", + "serde_yaml 0.8.26", "sha2 0.9.9", "tar", "tempfile", @@ -1100,7 +1112,7 @@ dependencies = [ "log", "nu-ansi-term", "regex", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1370,7 +1382,7 @@ dependencies = [ "ipnet", "once_cell", "rand", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -1393,7 +1405,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -2724,6 +2736,19 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.7.0", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "sha2" version = "0.9.9" @@ -2896,7 +2921,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +dependencies = [ + "thiserror-impl 2.0.11", ] [[package]] @@ -2910,6 +2944,17 @@ dependencies = [ "syn", ] +[[package]] +name = "thiserror-impl" +version = "2.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "threadpool" version = "1.8.1" @@ -3081,6 +3126,12 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index a78d16c0..a395c3a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ num-traits = "0.2" tempfile = "3.5" [workspace] -members = [".", "samples/command_executer", "driver-bindings", "eif_loader", "enclave_build", "vsock_proxy"] +members = [".", "samples/command_executer", "driver-bindings", "eif_loader", "enclave_build", "vsock_proxy","allocator"] [features] default = [] diff --git a/allocator/Cargo.toml b/allocator/Cargo.toml new file mode 100644 index 00000000..58d79316 --- /dev/null +++ b/allocator/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "allocator" +version = "0.1.0" +edition = "2021" + +[dependencies] +log = "0.4.22" +serde = {version = "1.0.217", features = ["derive"] } +serde_yaml = "0.9.34" +thiserror = "2.0.9" +lazy_static = "1.4" + + +[dev-dependencies] +tempfile = "3.2" \ No newline at end of file diff --git a/allocator/src/configuration.rs b/allocator/src/configuration.rs new file mode 100644 index 00000000..d9834e5f --- /dev/null +++ b/allocator/src/configuration.rs @@ -0,0 +1,47 @@ +use serde::Deserialize; +use crate::resources; +use crate::error::Error; + +//deserializing from allocator.yaml file +#[derive(Debug, PartialEq, Deserialize,Clone)] +#[serde(deny_unknown_fields)] +#[serde(untagged)] +pub enum ResourcePool { + CpuCount { memory_mib: usize , cpu_count: usize}, + CpuPool { cpu_pool: String, memory_mib: usize }, +} +pub fn get_resource_pool_from_config() -> Result, Box> { + //config file deserializing + let f = std::fs::File::open("/etc/nitro_enclaves/allocator.yaml")?; + let pool: Vec = match serde_yaml::from_reader(f) { + Ok(pool) => pool, + Err(_) => {return Err(Box::new(Error::ConfigFileCorruption));},//error messages use anyhow + }; + if pool.len() > 4 { + eprintln!("{}",Error::MoreResourcePoolThanSupported); + } + Ok(pool) +} +pub fn get_current_allocated_cpu_pool() -> Result>, Box> { + let f = std::fs::read_to_string("/sys/module/nitro_enclaves/parameters/ne_cpus")?; + if f.trim().is_empty() { + return Ok(None); + } + let cpu_list = resources::cpu::parse_cpu_list(&f[..])?; + Ok(Some(cpu_list)) +} +//clears everything in a numa node. +pub fn clear_everything_in_numa_node() -> Result<(), Box> {//change the name + match get_current_allocated_cpu_pool()?{ + Some(cpu_list) => { + //find numa by one of cpuids + let numa = resources::cpu::get_numa_node_for_cpu(cpu_list.clone().into_iter().next().unwrap())?; + //release everything + let _ = resources::huge_pages::release_all_huge_pages(numa)?; + let _ = resources::cpu::deallocate_cpu_set(&cpu_list); + } + None => {} + }; + Ok(()) +} + diff --git a/allocator/src/error.rs b/allocator/src/error.rs new file mode 100644 index 00000000..aebcebad --- /dev/null +++ b/allocator/src/error.rs @@ -0,0 +1,14 @@ +#[derive(thiserror::Error, Debug)] +pub enum Error +{ + #[error(transparent)] + ParseInt(#[from] std::num::ParseIntError), + #[error(transparent)] + TryFromInt(#[from] std::num::TryFromIntError), + #[error(transparent)] + Allocation(#[from] super::resources::Error), + #[error("Invalid config file. This might happened due to old config file or config file corruption. See release notes :")] + ConfigFileCorruption, + #[error("WARNING! Requested resource pool is more than supported. Supported Enclave number is 4")] + MoreResourcePoolThanSupported, +} diff --git a/allocator/src/main.rs b/allocator/src/main.rs new file mode 100644 index 00000000..9097acb9 --- /dev/null +++ b/allocator/src/main.rs @@ -0,0 +1,34 @@ +mod resources; +mod error; +mod configuration; + + +fn main() -> Result<(), Box> { + let _ = configuration::clear_everything_in_numa_node(); + + match configuration::get_resource_pool_from_config() { + Ok(pool) => { + let numa_node = match resources::Allocation::allocate_by_cpu_pools(pool.clone()){ + Ok(numa) => numa, + Err(e) =>{ + eprintln!("Allocation failed: {}",e); + return Err(Box::new(e)); + },//proper error messages + }; + match resources::Allocation::allocate_by_cpu_count(pool,numa_node) { + Ok(_) => {}, + Err(e) => { + let _ = configuration::clear_everything_in_numa_node(); + eprintln!(" Allocation failed: {}",e); + return Err(Box::new(e)); + } + } //check if allocation successful or not, if not clear what you allocated previously + } + Err(e) => { + eprintln!("Allocation failed: {}",e); + return Err(e); + } + + }; + Ok(()) +} diff --git a/allocator/src/resources/cpu.rs b/allocator/src/resources/cpu.rs new file mode 100644 index 00000000..1ccd0221 --- /dev/null +++ b/allocator/src/resources/cpu.rs @@ -0,0 +1,278 @@ +pub type CpuSet = std::collections::BTreeSet::; +type CpuSets = std::collections::HashMap::; + +#[derive(thiserror::Error, Debug)] +pub enum Error +{ + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + ParseInt(#[from] std::num::ParseIntError), + #[error("missing CPU pool file, make sure the Nitro Enclaves driver is present")] + MissingCpuPoolFile, + #[error("unexpected sysfs file structure")] + UnexptectedFileStructure, + #[error("failed to configure requested cpu pool, this indicates insufficient system resources")] + InsufficientCpuPool, +} + +const CPU_POOL_FILE: &str = "/sys/module/nitro_enclaves/parameters/ne_cpus"; + +pub struct Allocation +{ + #[allow(dead_code)] + cpu_set: CpuSet, +} + +impl Allocation +{ + pub fn new(cpu_set: CpuSet) -> Result + { + allocate_cpu_set(&cpu_set)?; + + Ok(Self + { + cpu_set, + }) + } +} + +/*impl Drop for Allocation +{ + fn drop(&mut self) + { + if let Err(error) = deallocate_cpu_set(&self.cpu_set) + { + log::error!("Failed to release CPUs: {error}"); + } + } +}*/ + +pub fn find_suitable_cpu_sets(cpu_count: usize) -> Result +{ + let cpu_0_numa_node = get_numa_node_for_cpu(0)?; + let cpu_0_siblings = get_cpu_siblings(0)?; + + (0 .. get_numa_node_count()?).try_fold( + CpuSets::new(), + |mut cpu_sets, numa_node| + { + let mut cpus_in_numa_node = get_cpus_in_numa_node(numa_node)?; + + if numa_node == cpu_0_numa_node + { + cpus_in_numa_node.retain(|cpu| !cpu_0_siblings.contains(cpu)); + } + + if cpus_in_numa_node.len() < cpu_count + { + return Ok(cpu_sets); + } + + let cores = cpus_in_numa_node.into_iter().try_fold( + CpuSets::new(), |mut cores: CpuSets, cpu| + { + let core_id = get_core_id(cpu)?; + + cores.entry(core_id).or_default().insert(cpu); + + Ok::<_, Error>(cores) + })?; + + let mut selected_cpus = CpuSet::new(); + + for cpus_in_core in cores.values() + { + let siblings = get_cpu_siblings( + // Safety: We know we have at least one entry in the set + *cpus_in_core.first().unwrap())?; + + if *cpus_in_core == siblings + { + selected_cpus.extend(cpus_in_core); + + if selected_cpus.len() >= cpu_count + { + cpu_sets.insert(numa_node, selected_cpus); + + break; + } + } + } + + Ok(cpu_sets) + }) +} + +fn allocate_cpu_set(update: &CpuSet) -> Result<(), Error> +{ + let mut cpu_set = get_cpu_pool()?; + cpu_set.extend(update); + + set_cpu_pool(&cpu_set) +} + +pub fn deallocate_cpu_set(update: &CpuSet) -> Result<(), Error> +{ + let mut cpu_set = get_cpu_pool()?; + cpu_set.retain(|cpu| !update.contains(cpu)); + + set_cpu_pool(&cpu_set) +} + +fn get_core_id(cpu: usize) -> Result +{ + let core_id_path = format!("/sys/devices/system/cpu/cpu{cpu}/topology/core_id"); + let content = std::fs::read_to_string(core_id_path)?; + + Ok(content.trim().parse()?) +} + +fn get_numa_node_count() -> Result +{ + let node_path = "/sys/devices/system/node"; + + Ok(get_numa_nodes(node_path)?.len()) +} + +pub fn get_numa_node_for_cpu(cpu: usize) -> Result +{ + let cpu_path = format!("/sys/devices/system/cpu/cpu{cpu}"); + + get_numa_nodes(&cpu_path)?.into_iter().next().ok_or(Error::UnexptectedFileStructure) +} + +fn get_numa_nodes(path: &str) -> Result +{ + std::fs::read_dir(path)? + .try_fold(CpuSet::new(), |mut set, entry| + { + let entry = entry?; + let file_name = entry.file_name(); + let file_name = file_name.to_str().ok_or(Error::UnexptectedFileStructure)?; + + if let Some(file_name) = file_name.strip_prefix("node") + { + set.insert(file_name.parse()?); + } + + Ok(set) + }) +} + +fn get_cpus_in_numa_node(node: usize) -> Result +{ + let cpu_list_path = format!("/sys/devices/system/node/node{node}/cpulist"); + + get_cpu_list(&cpu_list_path) +} + +fn get_cpu_siblings(cpu: usize) -> Result +{ + let thread_siblings_list_path = + format!("/sys/devices/system/cpu/cpu{cpu}/topology/thread_siblings_list"); + + get_cpu_list(&thread_siblings_list_path) +} + +fn get_cpu_list(list: &str) -> Result +{ + let list = std::fs::read_to_string(list)?; + + parse_cpu_list(&list) +} + +fn get_cpu_pool() -> Result +{ + if !std::path::Path::new(CPU_POOL_FILE).exists() + { + return Err(Error::MissingCpuPoolFile); + } + + get_cpu_list(CPU_POOL_FILE) +} + +fn set_cpu_pool(cpu_set: &CpuSet) -> Result<(), Error> +{ + if !std::path::Path::new(CPU_POOL_FILE).exists() + { + return Err(Error::MissingCpuPoolFile); + } + + let cpu_list = format_cpu_list(cpu_set); + + Ok(match std::fs::write(CPU_POOL_FILE, cpu_list) + { + // We expect and invalid input error when writing an empty CPU list, but the driver + // will still tear down the CPU pool. + // See: https://github.com/aws/aws-nitro-enclaves-cli/issues/397 + Err(error) if error.kind() == std::io::ErrorKind::InvalidInput && cpu_set.is_empty() + => Ok(()), + other => other, + }?) +} + +pub fn parse_cpu_list(cpu_list: &str) -> Result +{ + cpu_list.trim().split_terminator(',') + .try_fold(CpuSet::new(), |mut set, entry| + { + if let Some((start, end)) = entry.split_once('-') + { + let start: usize = start.parse()?; + let end: usize = end.parse()?; + + set.extend(start..=end); + } + else + { + set.insert(entry.parse()?); + } + + Ok(set) + }) +} + +pub fn format_cpu_list(cpu_set: &CpuSet) -> String +{ + let mut cpu_set = cpu_set.iter(); + + let Some(first) = cpu_set.next() + else + { + return "\n".to_string(); + }; + + let mut cpu_list = Vec::new(); + let last_range = cpu_set.fold( + *first..=*first, + |range, &cpu| + { + if cpu == *range.end() + 1 + { + *range.start()..=cpu + } + else + { + cpu_list.push(format_range(range)); + + cpu..=cpu + } + }); + + cpu_list.push(format_range(last_range)); + + cpu_list.join(",") + "\n" +} + +fn format_range(range: std::ops::RangeInclusive) -> String +{ + if range.start() == range.end() + { + range.start().to_string() + } + else + { + format!("{}-{}", range.start(), range.end()) + } +} \ No newline at end of file diff --git a/allocator/src/resources/huge_pages.rs b/allocator/src/resources/huge_pages.rs new file mode 100644 index 00000000..e449ecaa --- /dev/null +++ b/allocator/src/resources/huge_pages.rs @@ -0,0 +1,144 @@ +type Pages = std::collections::HashMap; +type PageSizes = std::collections::BTreeSet; +#[derive(thiserror::Error, Debug)] +pub enum Error +{ + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + ParseInt(#[from] std::num::ParseIntError), + #[error("failed to configure requested memory, this indicates insufficient system resources. Rebooting the system might solve the issue")] + InsufficientMemory, + #[error("unexpected sysfs file structure")] + UnexptectedFileStructure, +} + +pub struct Allocation +{ + numa_node: usize, + allocated_pages: Pages, +} + +impl Allocation +{ + pub fn new(numa_node: usize, memory_mib: usize) -> Result + { + let allocated_pages = configure_huge_pages(numa_node, memory_mib)?; + + Ok(Self + { + numa_node, + allocated_pages, + }) + } + pub fn release_resources(&self){ + if let Err(error) = release_huge_pages(self.numa_node, &self.allocated_pages) + { + log::error!("Failed to release huge pages: {error}"); + } + } +} + +/*impl Drop for Allocation +{ + fn drop(&mut self) + { + if let Err(error) = release_huge_pages(self.numa_node, &self.allocated_pages) + { + log::error!("Failed to release huge pages: {error}"); + } + } +}*/ + +fn configure_huge_pages(numa_node: usize, memory_mib: usize) -> Result +{ + let mut remaining_memory = memory_mib * 1024; // Convert to kB + let mut allocated_pages = Pages::new(); + + for page_size in get_huge_page_sizes(numa_node)?.into_iter().rev() + { + let needed_pages = remaining_memory / page_size; + + if needed_pages == 0 + { + continue; + } + + let huge_pages_path = + format!("/sys/devices/system/node/node{numa_node}/hugepages/hugepages-{page_size}kB/nr_hugepages"); + + let current_pages: usize = std::fs::read_to_string(&huge_pages_path)?.trim() + .parse()?; + let new_pages = current_pages + needed_pages; + + std::fs::write(&huge_pages_path, new_pages.to_string())?; + + let actual_pages: usize = std::fs::read_to_string(&huge_pages_path)?.trim() + .parse()?; + let actual_allocated_pages = actual_pages - current_pages; + + if actual_allocated_pages > 0 + { + allocated_pages.insert(page_size, actual_allocated_pages); + remaining_memory = remaining_memory.saturating_sub(page_size * actual_allocated_pages); + } + + if remaining_memory == 0 + { + break; + } + } + + if remaining_memory != 0 + { + release_huge_pages(numa_node, &allocated_pages)?; + + return Err(Error::InsufficientMemory); + } + + Ok(allocated_pages) +} + +pub fn release_huge_pages(numa_node: usize, allocated_pages: &Pages) + -> Result<(), Error> +{ + for (page_size, &allocated_count) in allocated_pages + { + let huge_pages_path = + format!("/sys/devices/system/node/node{numa_node}/hugepages/hugepages-{page_size}kB/nr_hugepages"); + + let current_pages: usize = std::fs::read_to_string(&huge_pages_path)?.trim() + .parse()?; + let new_pages = current_pages.saturating_sub(allocated_count); + + std::fs::write(&huge_pages_path, new_pages.to_string())?; + } + + Ok(()) +} +pub fn release_all_huge_pages(numa_node: usize) -> Result<(), Error> { + for page_size in get_huge_page_sizes(numa_node)?.into_iter().rev() + { + let huge_pages_path = + format!("/sys/devices/system/node/node{numa_node}/hugepages/hugepages-{page_size}kB/nr_hugepages"); + std::fs::write(&huge_pages_path, "0")?; + } + Ok(()) +} +fn get_huge_page_sizes(numa_node: usize) -> Result +{ + let path = format!("/sys/devices/system/node/node{numa_node}/hugepages"); + + std::fs::read_dir(path)? + .map(|entry| + { + let file_name = entry?.file_name(); + let file_name = file_name.to_str().ok_or(Error::UnexptectedFileStructure)?; + + Ok(file_name.strip_prefix("hugepages-") + .and_then(|file_name| file_name.strip_suffix("kB")) + .ok_or(Error::UnexptectedFileStructure)? + .parse()?) + }) + .collect() +} \ No newline at end of file diff --git a/allocator/src/resources/mod.rs b/allocator/src/resources/mod.rs new file mode 100644 index 00000000..08717f52 --- /dev/null +++ b/allocator/src/resources/mod.rs @@ -0,0 +1,254 @@ +//! Sysfs-based enclave resource allocation +pub mod cpu; +pub mod huge_pages; +use std::collections::BTreeSet; + + +use crate::configuration::ResourcePool; + +#[derive(thiserror::Error, Debug)] +pub enum Error +{ + #[error(transparent)] + Cpu(#[from] cpu::Error), + #[error(transparent)] + HugePage(#[from] huge_pages::Error), + #[error("failed to find suitable combination of CPUs and memory")] + Allocation, + #[error("Config file cannot include cpus from different numa nodes")] + NumaDifference, +} + +pub struct Allocation +{ + // Both allocations implement Drop + #[allow(dead_code)] + cpu_set_allocation: cpu::Allocation, + _huge_pages_allocation: huge_pages::Allocation, +} + +impl Allocation +{ + #[allow(dead_code)] + pub fn new(cpu_count: usize, memory_mib: usize) -> Result + { + // Find NUMA nodes with a suitable CPU set + for (numa_node, cpu_set) in cpu::find_suitable_cpu_sets(cpu_count)?.into_iter() + { + // Try to allocate the memory on the NUMA node ... + let huge_pages_allocation = + match huge_pages::Allocation::new(numa_node, memory_mib) + { + Ok(allocation) => allocation, + Err(huge_pages::Error::InsufficientMemory) => continue, + Err(error) => return Err(error.into()), + }; + + // ... if successful, also allocate the CPU set + let cpu_set_allocation = cpu::Allocation::new(cpu_set)?; + + return Ok(Self + { + cpu_set_allocation, + _huge_pages_allocation: huge_pages_allocation, + }); + } + + Err(Error::Allocation) + } + pub fn allocate_by_cpu_count(mut pool: Vec,target_numa: Option) -> Result<(), Error> + { + pool.retain(|p| matches!(p, ResourcePool::CpuCount{..})); + if pool.len() == 0 { + return Ok(()); + } + + + let total_cpu_count: usize = pool.iter() + .map(|p| { + if let ResourcePool::CpuCount { cpu_count, .. } = p { + *cpu_count + } else { + unreachable!() // Won't happen because we already filtered + } + }) + .sum(); + + // Find NUMA nodes with a suitable CPU set + for (numa_node, cpu_set) in cpu::find_suitable_cpu_sets(total_cpu_count)?.into_iter() + .filter(|(numa_node, _)| target_numa.is_none() || *numa_node == target_numa.unwrap()) + {//if user specificly defined cpu ids in config file and they also want to allocate some other resource pool with cpu_count tag then allocator should allocate them in same numa node + match allocate_cpus_n_pages(&pool, cpu_set, numa_node) { + Ok(_) => return Ok(()), + Err(Error::HugePage(huge_pages::Error::InsufficientMemory)) => continue, + Err(error) => return Err(error.into()), + } + } + return Err(Error::Allocation); + } + + pub fn allocate_by_cpu_pools(mut pools: Vec) -> Result, Error> + { + pools.retain(|p| matches!(p, ResourcePool::CpuPool {..})); + if pools.len() == 0 { + return Ok(None); + } + + let mut final_cpu_list = cpu::CpuSet::new(); + + //merging cpu lists + for pool in &pools { + if let ResourcePool::CpuPool { cpu_pool, .. } = pool { + final_cpu_list.extend(cpu::parse_cpu_list(cpu_pool)?); + } + } + + //check if provided cpus are in the same numa node + let numa_node = match sanity_check_numa_nodes(&final_cpu_list) { + Ok(numa) => numa, + Err(e) => return Err(e), + }; + + match allocate_cpus_n_pages(&pools, final_cpu_list, numa_node) { + Ok(_) => return Ok(Some(numa_node)), + Err(error) => return Err(error.into()), + } + + } +} +//All enclaves should be allocated in same numa node if user wants to allocate specific cpus then system should check if they are all in same numa node +pub fn sanity_check_numa_nodes(cpu_set: &BTreeSet) -> Result {//change the logic + let mut numa = usize::MAX; + for cpu in cpu_set{ + let cpu_numa = cpu::get_numa_node_for_cpu(*cpu)?; + if numa != usize::MAX { + if numa != cpu_numa{ + return Err(Error::NumaDifference); + } + } + numa = cpu_numa; + } + Ok(numa) +} +fn allocate_cpus_n_pages(pool: &Vec, cpu_set: BTreeSet, numa_node:usize) -> Result<(), Error>{ + let mut allocated_pages:Vec = Vec::with_capacity(pool.len()); + // Try to allocate the memory on the NUMA node ... + for enclave in pool { + let memory_mib = match enclave { + ResourcePool::CpuCount { memory_mib, .. } | + ResourcePool::CpuPool { memory_mib, .. } => *memory_mib + }; + let huge_pages_allocation = + match huge_pages::Allocation::new(numa_node, memory_mib) + { + Ok(allocation) => allocation, + Err(huge_pages::Error::InsufficientMemory) => { + //release everything + for delete in &allocated_pages { + delete.release_resources(); + } + return Err(Error::HugePage(huge_pages::Error::InsufficientMemory)); + } + Err(error) => return Err(error.into()), + }; + allocated_pages.push(huge_pages_allocation); + } + // ... if successful, also allocate the CPU set + match cpu::Allocation::new(cpu_set) { + Ok(_) => {return Ok(())}, + Err(_) => { + for delete in &allocated_pages { + delete.release_resources(); + } + return Err(Error::Cpu(cpu::Error::InsufficientCpuPool)); + }, + } +} + + + + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Mutex; + use lazy_static::lazy_static; + use crate::configuration; + // Create a global mutex for test synchronization + lazy_static! { + static ref TEST_MUTEX: Mutex<()> = Mutex::new(()); + } + + fn create_resource_pool_with_count(memory: usize, cpu_count: usize) -> ResourcePool { + ResourcePool::CpuCount { + memory_mib: memory, + cpu_count + } + } + + fn create_resource_pool_with_pool(memory: usize, cpu_pool: &str) -> ResourcePool { + ResourcePool::CpuPool { + memory_mib: memory, + cpu_pool: cpu_pool.to_string() + } + } + + #[test] + fn test_find_n_allocate() { + let _lock = TEST_MUTEX.lock().unwrap(); + println!("Testing find_n_allocate"); + let pools = vec![ + create_resource_pool_with_count(1024, 2), + create_resource_pool_with_count(512, 2), + ]; + println!("Created pools: {:?}", pools); + + match Allocation::allocate_by_cpu_count(pools, Some(0)) { + Ok(_) => println!("find_n_allocate successful"), + Err(e) => panic!("find_n_allocate failed with error: {:?}", e), + } + let _ = configuration::clear_everything_in_numa_node(); + } + + #[test] + fn test_find_n_allocate_with_target_numa() { + let _lock = TEST_MUTEX.lock().unwrap(); + let pools = vec![create_resource_pool_with_count(512, 2)]; + let result = Allocation::allocate_by_cpu_count(pools, Some(0)); + assert!(result.is_ok()); + let _ = configuration::clear_everything_in_numa_node(); + } + + #[test] + fn test_find_n_allocate_with_insufficient_memory() { + let _lock = TEST_MUTEX.lock().unwrap(); + let pools = vec![create_resource_pool_with_count(1024000, 2)]; + let result = Allocation::allocate_by_cpu_count(pools, Some(0)); + assert!(result.is_err()); + } + + #[test] + fn test_find_n_allocate_with_insufficient_cpu() { + let _lock = TEST_MUTEX.lock().unwrap(); + let pools = vec![create_resource_pool_with_count(1024, usize::MAX)]; + let result = Allocation::allocate_by_cpu_count(pools, Some(0)); + assert!(result.is_err()); + } + + #[test] + fn test_allocate_by_cpu_pools() { + let _lock = TEST_MUTEX.lock().unwrap(); + println!("Testing allocate_by_cpu_pools"); + let pools = vec![ + create_resource_pool_with_pool(1024, "1,5"), + create_resource_pool_with_pool(512, "2,6"), + ]; + println!("Created pools: {:?}", pools); + + match Allocation::allocate_by_cpu_pools(pools) { + Ok(numa) => println!("allocate_by_cpu_pools successful with NUMA node: {}", numa.unwrap()), + Err(e) => panic!("allocate_by_cpu_pools failed with error: {:?}", e), + } + let _ = configuration::clear_everything_in_numa_node(); + } +}