diff --git a/Cargo.lock b/Cargo.lock index b798cba0a..f72c95bdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4150,10 +4150,13 @@ name = "tlb_shootdown" version = "0.1.0" dependencies = [ "apic", + "cpu", + "interrupts", "irq_safety", "log", "memory", - "x86_64", + "memory_aarch64", + "memory_x86_64", ] [[package]] diff --git a/kernel/captain/Cargo.toml b/kernel/captain/Cargo.toml index 17cffb52f..f41818fa2 100644 --- a/kernel/captain/Cargo.toml +++ b/kernel/captain/Cargo.toml @@ -12,6 +12,7 @@ irq_safety = { git = "https://github.com/theseus-os/irq_safety" } dfqueue = { path = "../../libs/dfqueue", version = "0.1.0" } multicore_bringup = { path = "../multicore_bringup" } early_printer = { path = "../early_printer" } +tlb_shootdown = { path = "../tlb_shootdown" } kernel_config = { path = "../kernel_config" } interrupts = { path = "../interrupts" } scheduler = { path = "../scheduler" } @@ -30,7 +31,6 @@ logger_x86_64 = { path = "../logger_x86_64" } window_manager = { path = "../window_manager" } first_application = { path = "../first_application" } exceptions_full = { path = "../exceptions_full" } -tlb_shootdown = { path = "../tlb_shootdown" } multiple_heaps = { path = "../multiple_heaps" } tsc = { path = "../tsc" } acpi = { path = "../acpi" } diff --git a/kernel/captain/src/lib.rs b/kernel/captain/src/lib.rs index 5bab4f58e..9cc303ab4 100644 --- a/kernel/captain/src/lib.rs +++ b/kernel/captain/src/lib.rs @@ -156,8 +156,6 @@ pub fn init( // Now that other CPUs are fully booted, init TLB shootdowns, // which rely on Local APICs to broadcast an IPI to all running CPUs. - // arch-gate: no multicore support on aarch64 at the moment - #[cfg(target_arch = "x86_64")] tlb_shootdown::init(); // Initialize the per-core heaps. diff --git a/kernel/interrupts/src/aarch64/mod.rs b/kernel/interrupts/src/aarch64/mod.rs index 852664689..40ddd570c 100644 --- a/kernel/interrupts/src/aarch64/mod.rs +++ b/kernel/interrupts/src/aarch64/mod.rs @@ -9,8 +9,8 @@ use tock_registers::interfaces::Readable; use tock_registers::registers::InMemoryRegister; use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; +use gic::{ArmGic, InterruptNumber, IpiTargetCpu, Version as GicVersion}; use arm_boards::{BOARD_CONFIG, InterruptControllerConfig}; -use gic::{ArmGic, InterruptNumber, Version as GicVersion}; use irq_safety::{RwLockIrqSafe, MutexIrqSafe}; use memory::get_kernel_mmi_ref; use log::{info, error}; @@ -30,6 +30,13 @@ static INTERRUPT_CONTROLLER: MutexIrqSafe> = MutexIrqSafe::new(No // aarch64 manuals define the default timer IRQ number to be 30. pub const CPU_LOCAL_TIMER_IRQ: InterruptNumber = 30; +/// The IRQ/IPI number for TLB Shootdowns +/// +/// Note: This is arbitrarily defined in the range 0..16, +/// which is reserved for IPIs (SGIs - for software generated +/// interrupts - in GIC terminology). +pub const TLB_SHOOTDOWN_IPI: InterruptNumber = 2; + const MAX_IRQ_NUM: usize = 256; // Singleton which acts like an x86-style Interrupt Descriptor Table: @@ -175,7 +182,7 @@ pub fn init() -> Result<(), &'static str> { } /// This function registers an interrupt handler for the CPU-local -/// timer and handles INTERRUPT_CONTROLLER configuration for the timer interrupt. +/// timer and handles interrupt controller configuration for the timer interrupt. pub fn init_timer(timer_tick_handler: HandlerFunc) -> Result<(), &'static str> { // register/deregister the handler for the timer IRQ. if let Err(existing_handler) = register_interrupt(CPU_LOCAL_TIMER_IRQ, timer_tick_handler) { @@ -196,6 +203,30 @@ pub fn init_timer(timer_tick_handler: HandlerFunc) -> Result<(), &'static str> { Ok(()) } +/// This function registers an interrupt handler for an inter-processor interrupt +/// and handles interrupt controller configuration for that interrupt. +pub fn setup_ipi_handler(handler: HandlerFunc, irq_num: InterruptNumber) -> Result<(), &'static str> { + assert!(irq_num < 16, "Inter-processor interrupts must have a number in the range 0..16"); + + // register the handler + if let Err(existing_handler) = register_interrupt(irq_num, handler) { + if handler as *const HandlerFunc != existing_handler { + return Err("A different interrupt handler has already been setup for that IPI"); + } + } + + // Route the IRQ to this core (implicit as irq_num < 32) & Enable the interrupt. + { + let mut int_ctrl = INTERRUPT_CONTROLLER.lock(); + let int_ctrl = int_ctrl.as_mut().ok_or("INTERRUPT_CONTROLLER is uninitialized")?; + + // enable routing of this interrupt + int_ctrl.set_interrupt_state(irq_num, true); + } + + Ok(()) +} + /// Disables the timer, schedules its next tick, and re-enables it pub fn schedule_next_timer_tick() { enable_timer(false); @@ -275,6 +306,14 @@ pub fn deregister_interrupt(irq_num: InterruptNumber, func: HandlerFunc) -> Resu } } +/// Broadcast an Inter-Processor Interrupt to all other +/// cores in the system +pub fn send_ipi_to_all_other_cpus(irq_num: InterruptNumber) { + let mut int_ctrl = INTERRUPT_CONTROLLER.lock(); + let int_ctrl = int_ctrl.as_mut().expect("INTERRUPT_CONTROLLER is uninitialized"); + int_ctrl.send_ipi(irq_num, IpiTargetCpu::AllOtherCpus); +} + /// Send an "end of interrupt" signal, notifying the interrupt chip that /// the given interrupt request `irq` has been serviced. pub fn eoi(irq_num: InterruptNumber) { @@ -395,7 +434,7 @@ extern "C" fn current_elx_synchronous(e: &mut ExceptionContext) { extern "C" fn current_elx_irq(exc: &mut ExceptionContext) { // read IRQ num // read IRQ priority - // ackownledge IRQ to the INTERRUPT_CONTROLLER + // ackownledge IRQ to the interrupt controller let (irq_num, _priority) = { let mut int_ctrl = INTERRUPT_CONTROLLER.lock(); let int_ctrl = int_ctrl.as_mut().expect("INTERRUPT_CONTROLLER is uninitialized"); diff --git a/kernel/tlb_shootdown/Cargo.toml b/kernel/tlb_shootdown/Cargo.toml index 7682a40f4..3728d1f53 100644 --- a/kernel/tlb_shootdown/Cargo.toml +++ b/kernel/tlb_shootdown/Cargo.toml @@ -3,23 +3,21 @@ authors = ["Kevin Boos "] name = "tlb_shootdown" description = "Routines for handling TLB shootdowns" version = "0.1.0" +edition = "2021" [dependencies] -x86_64 = "0.14.8" - - -[dependencies.log] -version = "0.4.8" - -[dependencies.irq_safety] -git = "https://github.com/theseus-os/irq_safety" - -[dependencies.memory] -path = "../memory" - -[dependencies.apic] -path = "../apic" - +log = "0.4.8" +irq_safety = { git = "https://github.com/theseus-os/irq_safety" } +memory = { path = "../memory" } +cpu = { path = "../cpu" } + +[target.'cfg(target_arch = "x86_64")'.dependencies] +memory_x86_64 = { path = "../memory_x86_64" } +apic = { path = "../apic" } + +[target.'cfg(target_arch = "aarch64")'.dependencies] +memory_aarch64 = { path = "../memory_aarch64" } +interrupts = { path = "../interrupts" } [lib] crate-type = ["rlib"] diff --git a/kernel/tlb_shootdown/src/lib.rs b/kernel/tlb_shootdown/src/lib.rs index 6ce1aa732..87ae397dc 100644 --- a/kernel/tlb_shootdown/src/lib.rs +++ b/kernel/tlb_shootdown/src/lib.rs @@ -2,19 +2,17 @@ #![no_std] -// #[macro_use] extern crate log; -extern crate irq_safety; -extern crate memory; -extern crate apic; -extern crate x86_64; - - use core::sync::atomic::{AtomicBool, AtomicU32, Ordering}; use irq_safety::{hold_interrupts, RwLockIrqSafe}; use memory::PageRange; -use apic::{LocalApic, get_my_apic, cpu_count, LapicIpiDestination}; +use cpu::cpu_count; use core::hint::spin_loop; +#[cfg(target_arch = "x86_64")] +use memory_x86_64::tlb_flush_virt_addr; + +#[cfg(target_arch = "aarch64")] +use memory_aarch64::tlb_flush_virt_addr; /// The number of remaining cores that still need to handle the current TLB shootdown IPI pub static TLB_SHOOTDOWN_IPI_COUNT: AtomicU32 = AtomicU32::new(0); @@ -28,20 +26,11 @@ pub static TLB_SHOOTDOWN_IPI_PAGES: RwLockIrqSafe> = RwLockIrq /// TODO: redesign this, it's weird and silly just to set one callback. pub fn init() { memory::set_broadcast_tlb_shootdown_cb(broadcast_tlb_shootdown); -} - -/// Broadcasts TLB shootdown IPI to all other AP cores. -/// Do not invoke this directly, but rather pass it as a callback to the memory subsystem, -/// which will invoke it as needed (on remap/unmap operations). -fn broadcast_tlb_shootdown(pages_to_invalidate: PageRange) { - if let Some(my_lapic) = get_my_apic() { - // log::info!("broadcast_tlb_shootdown(): AP {}, pages: {:?}", my_lapic.read().apic_id(), pages_to_invalidate); - send_tlb_shootdown_ipi(&mut my_lapic.write(), pages_to_invalidate); - } + #[cfg(target_arch = "aarch64")] + interrupts::setup_ipi_handler(tlb_shootdown_ipi_handler, interrupts::TLB_SHOOTDOWN_IPI).unwrap(); } - /// Handles a TLB shootdown ipi by flushing the `VirtualAddress`es /// covered by the given range of `pages_to_invalidate`. /// @@ -50,26 +39,34 @@ pub fn handle_tlb_shootdown_ipi(pages_to_invalidate: PageRange) { // log::trace!("handle_tlb_shootdown_ipi(): AP {}, pages: {:?}", apic::current_cpu(), pages_to_invalidate); for page in pages_to_invalidate { - x86_64::instructions::tlb::flush(x86_64::VirtAddr::new(page.start_address().value() as u64)); + tlb_flush_virt_addr(page.start_address()); } + TLB_SHOOTDOWN_IPI_COUNT.fetch_sub(1, Ordering::SeqCst); } +/// Broadcasts TLB shootdown IPI to all other AP cores. +/// +/// Do not invoke this directly, but rather pass it as a callback to the memory subsystem, +/// which will invoke it as needed (on remap/unmap operations). +/// /// Sends an IPI to all other cores (except me) to trigger /// a TLB flush of the given pages' virtual addresses. -pub fn send_tlb_shootdown_ipi(my_lapic: &mut LocalApic, pages_to_invalidate: PageRange) { +fn broadcast_tlb_shootdown(pages_to_invalidate: PageRange) { // skip sending IPIs if there are no other cores running let cpu_count = cpu_count(); if cpu_count <= 1 { return; } - // log::trace!("send_tlb_shootdown_ipi(): from AP {}, cpu_count: {}, {:?}", my_lapic.apic_id(), cpu_count, pages_to_invalidate); + if false { + log::trace!("send_tlb_shootdown_ipi(): from CPU {:?}, cpu_count: {}, {:?}", cpu::current_cpu(), cpu_count, pages_to_invalidate); + } // interrupts must be disabled here, because this IPI sequence must be fully synchronous with other cores, // and we wouldn't want this core to be interrupted while coordinating IPI responses across multiple cores. - let _held_ints = hold_interrupts(); + let _held_ints = hold_interrupts(); // acquire lock // TODO: add timeout!! @@ -85,8 +82,16 @@ pub fn send_tlb_shootdown_ipi(my_lapic: &mut LocalApic, pages_to_invalidate: Pag *TLB_SHOOTDOWN_IPI_PAGES.write() = Some(pages_to_invalidate); TLB_SHOOTDOWN_IPI_COUNT.store(cpu_count - 1, Ordering::SeqCst); // -1 to exclude this core - // let's try to use NMI instead, since it will interrupt everyone forcibly and result in the fastest handling - my_lapic.send_nmi_ipi(LapicIpiDestination::AllButMe); // send IPI to all other cores but this one + #[cfg(target_arch = "x86_64")] { + let my_lapic = apic::get_my_apic() + .expect("BUG: broadcast_tlb_shootdown(): couldn't get LocalApic"); + + // use NMI, since it will interrupt everyone forcibly and result in the fastest handling + my_lapic.write().send_nmi_ipi(apic::LapicIpiDestination::AllButMe); // send IPI to all other cores but this one + } + + #[cfg(target_arch = "aarch64")] + interrupts::send_ipi_to_all_other_cpus(interrupts::TLB_SHOOTDOWN_IPI); // wait for all other cores to handle this IPI // it must be a blocking, synchronous operation to ensure stale TLB entries don't cause problems @@ -101,3 +106,16 @@ pub fn send_tlb_shootdown_ipi(my_lapic: &mut LocalApic, pages_to_invalidate: Pag // release lock TLB_SHOOTDOWN_IPI_LOCK.store(false, Ordering::Release); } + +/// Interrupt Handler for TLB Shootdowns on aarch64 +#[cfg(target_arch = "aarch64")] +extern "C" fn tlb_shootdown_ipi_handler(_exc: &interrupts::ExceptionContext) -> interrupts::EoiBehaviour { + if let Some(pages_to_invalidate) = TLB_SHOOTDOWN_IPI_PAGES.read().clone() { + // trace!("nmi_handler (AP {})", cpu::current_cpu()); + handle_tlb_shootdown_ipi(pages_to_invalidate); + } else { + panic!("Unexpected TLB Shootdown IPI!"); + } + + interrupts::EoiBehaviour::CallerMustSignalEoi +}