Skip to content
Draft
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions api/axfeat/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ documentation = "https://arceos-org.github.io/arceos/axfeat/index.html"
default = []

# Multicore
smp = ["axhal/smp", "axruntime/smp", "axtask?/smp", "kspin/smp"]
smp = ["axhal/smp", "axruntime/smp", "axtask?/smp", "kspin/smp", "axmm/smp", "axipi/smp", "ipi"]

# Floating point/SIMD
fp-simd = ["axhal/fp-simd"]
Expand Down Expand Up @@ -116,7 +116,8 @@ axinput = { workspace = true, optional = true }
axipi = { workspace = true, optional = true }
axlog.workspace = true
axnet = { workspace = true, optional = true }
axruntime.workspace = true
axmm = {workspace = true, optional = true}
axruntime = { workspace = true }
axsync = { workspace = true, optional = true }
axtask = { workspace = true, optional = true }
kspin = { workspace = true, optional = true }
5 changes: 5 additions & 0 deletions modules/axipi/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ documentation = "https://arceos-org.github.io/arceos/axipi/index.html"

[features]
default = []
smp = []

[dependencies]
axconfig.workspace = true
Expand All @@ -19,3 +20,7 @@ kspin.workspace = true
lazyinit.workspace = true
log.workspace = true
percpu.workspace = true
crate_interface = "0.1.4"
axcpu = { git = "https://github.com/arceos-org/axcpu.git", tag = "dev-v03" }
Copy link

Copilot AI Dec 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The axcpu dependency is added but does not appear to be used in any of the modified axipi source files. Consider removing this dependency if it's not needed, or add it when it's actually required.

Suggested change
axcpu = { git = "https://github.com/arceos-org/axcpu.git", tag = "dev-v03" }

Copilot uses AI. Check for mistakes.
axtask = { workspace = true, features = ["task-ext"] }
page_table_multiarch.workspace = true
3 changes: 3 additions & 0 deletions modules/axipi/src/event.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use alloc::{boxed::Box, sync::Arc};
use core::sync::atomic::AtomicBool;

/// A callback function that will be called when an [`IpiEvent`] is received and handled.
pub struct Callback(Box<dyn FnOnce()>);
Expand Down Expand Up @@ -50,8 +51,10 @@ impl<T: Fn() + 'static> From<T> for MulticastCallback {

/// An IPI event that is sent from a source CPU to the target CPU.
pub struct IpiEvent {
pub name: &'static str,
/// The source CPU ID that sent the IPI event.
pub src_cpu_id: usize,
/// The callback function that will be called when the IPI event is handled.
pub callback: Callback,
pub done: Option<Arc<AtomicBool>>,
}
183 changes: 170 additions & 13 deletions modules/axipi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,16 +6,34 @@
extern crate log;
extern crate alloc;

use axhal::irq::{IPI_IRQ, IpiTarget};
use axhal::percpu::this_cpu_id;
use alloc::{sync::Arc, vec::Vec};
use core::sync::atomic::{AtomicBool, Ordering};

use axhal::{
irq::{IPI_IRQ, IpiTarget},
percpu::this_cpu_id,
};
use axtask::AxCpuMask;
use kspin::SpinNoIrq;
use lazyinit::LazyInit;
use queue::IpiEventQueue;

use crate::event::{Callback, MulticastCallback};

mod event;
mod queue;
#[cfg(feature = "smp")]
mod tlb;

pub use event::{Callback, MulticastCallback};
use queue::IpiEventQueue;
static SECONDARY_CPUS_STARTED: AtomicBool = AtomicBool::new(false);

pub fn start_secondary_cpus_done() {
SECONDARY_CPUS_STARTED.store(true, Ordering::Release);
}

pub fn secondary_cpus_ready() -> bool {
SECONDARY_CPUS_STARTED.load(Ordering::Acquire)
}

#[percpu::def_percpu]
static IPI_EVENT_QUEUE: LazyInit<SpinNoIrq<IpiEventQueue>> = LazyInit::new();
Expand All @@ -28,34 +46,161 @@ pub fn init() {
}

/// Executes a callback on the specified destination CPU via IPI.
pub fn run_on_cpu<T: Into<Callback>>(dest_cpu: usize, callback: T) {
info!("Send IPI event to CPU {dest_cpu}");
pub fn run_on_cpu<T: Into<Callback>>(name: &'static str, dest_cpu: usize, callback: T, wait: bool) {
info!("Send IPI event to CPU {}", dest_cpu);
if dest_cpu == this_cpu_id() {
// Execute callback on current CPU immediately
callback.into().call();
} else {
let done_flag = if wait {
Some(Arc::new(AtomicBool::new(false)))
} else {
None
};
unsafe { IPI_EVENT_QUEUE.remote_ref_raw(dest_cpu) }
.lock()
.push(this_cpu_id(), callback.into());
.push(name, this_cpu_id(), callback.into(), done_flag.clone());
axhal::irq::send_ipi(IPI_IRQ, IpiTarget::Other { cpu_id: dest_cpu });
if wait {
if let Some(df) = done_flag {
while !df.load(Ordering::Acquire) {
core::hint::spin_loop();
}
}
}
}
}

pub fn run_on_bitmask_except_self<T: Into<MulticastCallback>>(
name: &'static str,
callback: T,
cpu_mask: AxCpuMask,
wait: bool,
) {
let current_cpu_id = this_cpu_id();
let cpu_num = axconfig::plat::CPU_NUM;
let callback = callback.into();

let mut done_flags: Vec<Arc<AtomicBool>> = Vec::new(cpu_num - 1);

for cpu_id in 0..cpu_num {
if cpu_id != current_cpu_id && cpu_mask.get(cpu_id) {
let done_flag = if wait {
Some(Arc::new(AtomicBool::new(false)))
} else {
None
};
if let Some(df) = &done_flag {
done_flags.push(df.clone());
}

unsafe { IPI_EVENT_QUEUE.remote_ref_raw(cpu_id) }
.lock()
.push(
name,
current_cpu_id,
callback.clone().into_unicast(),
done_flag,
);
}
}
for cpu_id in 0..cpu_num {
if cpu_id != current_cpu_id && cpu_mask.get(cpu_id) {
axhal::irq::send_ipi(IPI_IRQ, IpiTarget::Other { cpu_id });
}
}
if wait {
for df in done_flags {
while !df.load(Ordering::Acquire) {
core::hint::spin_loop();
}
}
}
}

pub fn run_on_each_cpu_except_self<T: Into<MulticastCallback>>(
name: &'static str,
callback: T,
wait: bool,
) {
let current_cpu_id = this_cpu_id();
let cpu_num = axconfig::plat::CPU_NUM;
let callback = callback.into();

let mut done_flags: Vec<Arc<AtomicBool>> = Vec::new(cpu_num - 1);
Copy link

Copilot AI Dec 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Vec::new() does not accept a capacity parameter. Use Vec::with_capacity(cpu_num - 1) instead to preallocate the vector with the correct capacity.

Copilot uses AI. Check for mistakes.

// Push the callback to all other CPUs' IPI event queues
for cpu_id in 0..cpu_num {
if cpu_id != current_cpu_id {
let done_flag = if wait {
Some(Arc::new(AtomicBool::new(false)))
} else {
None
};
if let Some(df) = &done_flag {
done_flags.push(df.clone());
}

unsafe { IPI_EVENT_QUEUE.remote_ref_raw(cpu_id) }
.lock()
.push(
name,
current_cpu_id,
callback.clone().into_unicast(),
done_flag,
);
}
}
if done_flags.is_empty() {
return;
Copy link

Copilot AI Dec 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The early return for empty done_flags in run_on_each_cpu_except_self prevents IPI from being sent when wait is false. This logic appears inconsistent with run_on_bitmask_except_self (lines 107-111) which always sends IPIs regardless of wait flag. Consider whether IPIs should be sent even when wait=false and done_flags is empty.

Copilot uses AI. Check for mistakes.
}
// Send IPI to all other CPUs to trigger their callbacks
axhal::irq::send_ipi(
IPI_IRQ,
IpiTarget::AllExceptCurrent {
cpu_id: current_cpu_id,
cpu_num,
},
);
if wait {
for df in done_flags {
while !df.load(Ordering::Acquire) {
core::hint::spin_loop();
}
}
}
}

/// Executes a callback on all other CPUs via IPI.
pub fn run_on_each_cpu<T: Into<MulticastCallback>>(callback: T) {
pub fn run_on_each_cpu<T: Into<MulticastCallback>>(name: &'static str, callback: T, wait: bool) {
info!("Send IPI event to all other CPUs");
let current_cpu_id = this_cpu_id();
let cpu_num = axconfig::plat::CPU_NUM;
let callback = callback.into();

// Execute callback on current CPU immediately
callback.clone().call();

let mut done_flags: Vec<Arc<AtomicBool>> = Vec::new(cpu_num);
Copy link

Copilot AI Dec 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Vec::new() does not accept a capacity parameter. Use Vec::with_capacity(cpu_num) instead to preallocate the vector with the correct capacity.

Copilot uses AI. Check for mistakes.
// Push the callback to all other CPUs' IPI event queues
for cpu_id in 0..cpu_num {
if cpu_id != current_cpu_id {
let done_flag = if wait {
Some(Arc::new(AtomicBool::new(false)))
} else {
None
};
if let Some(df) = &done_flag {
done_flags.push(df.clone());
}
unsafe { IPI_EVENT_QUEUE.remote_ref_raw(cpu_id) }
.lock()
.push(current_cpu_id, callback.clone().into_unicast());
.push(
name,
current_cpu_id,
callback.clone().into_unicast(),
done_flag,
);
}
}
// Send IPI to all other CPUs to trigger their callbacks
Expand All @@ -66,15 +211,27 @@ pub fn run_on_each_cpu<T: Into<MulticastCallback>>(callback: T) {
cpu_num,
},
);
if wait {
for df in done_flags {
while !df.load(Ordering::Acquire) {
core::hint::spin_loop();
}
}
}
}

/// The handler for IPI events. It retrieves the events from the queue and calls the corresponding callbacks.
/// The handler for IPI events. It retrieves the events from the queue and calls
/// the corresponding callbacks.
pub fn ipi_handler() {
while let Some((src_cpu_id, callback)) = unsafe { IPI_EVENT_QUEUE.current_ref_mut_raw() }
.lock()
.pop_one()
while let Some((_name, src_cpu_id, callback, done)) =
unsafe { IPI_EVENT_QUEUE.current_ref_raw() }
.lock()
.pop_one()
{
debug!("Received IPI event from CPU {src_cpu_id}");
callback.call();
if let Some(done) = done {
done.store(true, Ordering::Release);
}
}
}
17 changes: 13 additions & 4 deletions modules/axipi/src/queue.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use alloc::collections::VecDeque;
use alloc::{collections::VecDeque, sync::Arc};
use core::sync::atomic::AtomicBool;

use crate::event::{Callback, IpiEvent};

Expand Down Expand Up @@ -26,20 +27,28 @@ impl IpiEventQueue {
}

/// Push a new event into the queue.
pub fn push(&mut self, src_cpu_id: usize, callback: Callback) {
pub fn push(
&mut self,
name: &'static str,
src_cpu_id: usize,
callback: Callback,
done: Option<Arc<AtomicBool>>,
) {
self.events.push_back(IpiEvent {
name,
src_cpu_id,
callback,
done,
});
}

/// Try to pop the latest event that exists in the queue.
///
/// Return `None` if no event is available.
#[must_use]
pub fn pop_one(&mut self) -> Option<(usize, Callback)> {
pub fn pop_one(&mut self) -> Option<(&'static str, usize, Callback, Option<Arc<AtomicBool>>)> {
if let Some(e) = self.events.pop_front() {
Some((e.src_cpu_id, e.callback))
Some((e.name, e.src_cpu_id, e.callback, e.done))
} else {
None
}
Expand Down
30 changes: 30 additions & 0 deletions modules/axipi/src/tlb.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
use axhal::mem::VirtAddr;
use axtask::{TaskExt, current};
use page_table_multiarch::TlbFlushIf;

use crate::{
MulticastCallback, run_on_bitmask_except_self, run_on_each_cpu_except_self,
secondary_cpus_ready,
};

struct TlbFlushImpl;

#[crate_interface::impl_interface]
impl TlbFlushIf for TlbFlushImpl {
fn flush_all(vaddr: Option<VirtAddr>) {
if axconfig::plat::CPU_NUM == 1 || !secondary_cpus_ready() {
// local
axhal::asm::flush_tlb(None);
Copy link

Copilot AI Dec 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The local TLB flush is called with None regardless of the vaddr parameter value. This should pass vaddr to maintain consistency with the remote flush behavior on line 20.

Suggested change
axhal::asm::flush_tlb(None);
axhal::asm::flush_tlb(vaddr);

Copilot uses AI. Check for mistakes.
} else {
let callback = MulticastCallback::new(move || {
axhal::asm::flush_tlb(vaddr);
});
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why isn't the local CPU flushing?

if let Some(ext) = current().task_ext() {
let on_cpu_mask = ext.on_cpu_mask();
run_on_bitmask_except_self("flush", callback, on_cpu_mask, true);
} else {
run_on_each_cpu_except_self("flush", callback, true);
}
}
}
}
1 change: 1 addition & 0 deletions modules/axmm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ documentation = "https://arceos-org.github.io/arceos/axmm/index.html"
[features]
default = []
copy = ["page_table_multiarch/copy-from"]
smp = ["page_table_multiarch/smp"]

[dependencies]
axalloc = { workspace = true }
Expand Down
11 changes: 8 additions & 3 deletions modules/axruntime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,8 @@ pub fn rust_main(cpu_id: usize, arg: usize) -> ! {
while !is_init_ok() {
core::hint::spin_loop();
}
#[cfg(all(feature = "smp", feature = "ipi"))]
axipi::start_secondary_cpus_done();

unsafe { main() };

Expand Down Expand Up @@ -300,9 +302,12 @@ fn init_interrupt() {
});

#[cfg(feature = "ipi")]
axhal::irq::register(axhal::irq::IPI_IRQ, || {
axipi::ipi_handler();
});
{
axipi::init();
axhal::irq::register(axhal::irq::IPI_IRQ, || {
axipi::ipi_handler();
});
}

// Enable IRQs before starting app
axhal::asm::enable_irqs();
Expand Down
Loading