diff --git a/Cargo.lock b/Cargo.lock index 37c05c2764..cde217fe8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -325,7 +325,6 @@ dependencies = [ "path", "pmu_x86", "rendezvous", - "runqueue", "scheduler", "simple_ipc", "spawn", @@ -1102,7 +1101,6 @@ dependencies = [ "log", "memory", "pmu_x86", - "runqueue", "signal_handler", "stack_trace", "task", @@ -1450,7 +1448,6 @@ dependencies = [ "libtest", "log", "qp-trie", - "runqueue", "spawn", ] @@ -1699,7 +1696,6 @@ dependencies = [ "pic", "pit_clock_basic", "rand", - "runqueue", "spin 0.9.4", "sync_irq", "virtual_nic", @@ -1747,7 +1743,6 @@ dependencies = [ "app_io", "debugit", "getopts", - "runqueue", "task", ] @@ -1823,7 +1818,6 @@ dependencies = [ "log", "memory", "pmu_x86", - "runqueue", "spin 0.9.4", "task", ] @@ -2559,7 +2553,6 @@ dependencies = [ "log", "memory", "mod_mgmt", - "runqueue", "stack_trace", "stack_trace_frame_pointers", "task", @@ -3039,24 +3032,11 @@ dependencies = [ [[package]] name = "rq" version = "0.1.0" -dependencies = [ - "apic", - "app_io", - "getopts", - "runqueue", - "task", -] - -[[package]] -name = "rq_access_eval" -version = "0.1.0" dependencies = [ "app_io", "cpu", "getopts", - "irq_safety", - "runqueue", - "time", + "task", ] [[package]] @@ -3069,7 +3049,6 @@ dependencies = [ "hpet", "libtest", "log", - "runqueue", "spawn", "task", ] @@ -3105,55 +3084,6 @@ dependencies = [ "x86_64", ] -[[package]] -name = "runqueue" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "cfg-if 1.0.0", - "lazy_static", - "log", - "runqueue_epoch", - "runqueue_priority", - "runqueue_round_robin", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - -[[package]] -name = "runqueue_epoch" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - -[[package]] -name = "runqueue_priority" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "sync_preemption", - "task", - "time", -] - -[[package]] -name = "runqueue_round_robin" -version = "0.1.0" -dependencies = [ - "atomic_linked_list", - "log", - "single_simd_task_optimization", - "sync_preemption", - "task", -] - [[package]] name = "rustc-demangle" version = "0.1.19" @@ -3192,9 +3122,6 @@ dependencies = [ "cpu", "interrupts", "log", - "scheduler_epoch", - "scheduler_priority", - "scheduler_round_robin", "sleep", "task", "x86_64", @@ -3205,8 +3132,6 @@ name = "scheduler_epoch" version = "0.1.0" dependencies = [ "log", - "runqueue", - "runqueue_epoch", "spin 0.9.4", "task", ] @@ -3228,7 +3153,6 @@ name = "scheduler_priority" version = "0.1.0" dependencies = [ "log", - "runqueue_priority", "task", "time", ] @@ -3238,8 +3162,6 @@ name = "scheduler_round_robin" version = "0.1.0" dependencies = [ "log", - "runqueue", - "runqueue_round_robin", "spin 0.9.4", "task", ] @@ -3384,7 +3306,6 @@ dependencies = [ "log", "path", "root", - "runqueue", "scheduler", "spawn", "spin 0.9.4", @@ -3499,6 +3420,7 @@ name = "spawn" version = "0.1.0" dependencies = [ "catch_unwind", + "cfg-if 1.0.0", "context_switch", "cpu", "debugit", @@ -3513,8 +3435,10 @@ dependencies = [ "no_drop", "path", "preemption", - "runqueue", "scheduler", + "scheduler_epoch", + "scheduler_priority", + "scheduler_round_robin", "spin 0.9.4", "stack", "task", @@ -3747,6 +3671,7 @@ dependencies = [ "stack", "static_assertions", "sync_irq", + "sync_preemption", "task_struct", "waker_generic", ] @@ -4061,7 +3986,6 @@ dependencies = [ "raw_mode", "rm", "rq", - "rq_access_eval", "rq_eval", "scheduler_eval", "seconds_counter", diff --git a/applications/bm/Cargo.toml b/applications/bm/Cargo.toml index ddce10d3df..b18fa0d227 100644 --- a/applications/bm/Cargo.toml +++ b/applications/bm/Cargo.toml @@ -33,9 +33,6 @@ path = "../../kernel/spawn" [dependencies.path] path = "../../kernel/path" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.heapfile] path = "../../kernel/heapfile" # [dependencies.application_main_fn] diff --git a/applications/bm/src/lib.rs b/applications/bm/src/lib.rs index 67efeae3a1..fab5fd45cf 100644 --- a/applications/bm/src/lib.rs +++ b/applications/bm/src/lib.rs @@ -18,7 +18,6 @@ extern crate apic; extern crate cpu; extern crate spawn; extern crate path; -extern crate runqueue; extern crate heapfile; extern crate scheduler; extern crate libtest; diff --git a/applications/heap_eval/Cargo.toml b/applications/heap_eval/Cargo.toml index 42162f7d42..5fa0114de9 100644 --- a/applications/heap_eval/Cargo.toml +++ b/applications/heap_eval/Cargo.toml @@ -23,9 +23,6 @@ path = "../../kernel/apic" [dependencies.cpu] path = "../../kernel/cpu" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.hashbrown] version = "0.11.2" features = ["nightly"] diff --git a/applications/heap_eval/src/lib.rs b/applications/heap_eval/src/lib.rs index 3f7eb9c753..435e08e91a 100644 --- a/applications/heap_eval/src/lib.rs +++ b/applications/heap_eval/src/lib.rs @@ -7,7 +7,6 @@ extern crate hpet; extern crate hashbrown; extern crate qp_trie; extern crate apic; -extern crate runqueue; extern crate libtest; extern crate spawn; extern crate cpu; diff --git a/applications/kill/Cargo.toml b/applications/kill/Cargo.toml index 337337aa8b..b3952482b1 100644 --- a/applications/kill/Cargo.toml +++ b/applications/kill/Cargo.toml @@ -15,8 +15,5 @@ path = "../../kernel/app_io" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - # [dependencies.application_main_fn] # path = "../../compiler_plugins" diff --git a/applications/kill/src/lib.rs b/applications/kill/src/lib.rs index 9b51a27bb6..11cdee2271 100755 --- a/applications/kill/src/lib.rs +++ b/applications/kill/src/lib.rs @@ -4,7 +4,6 @@ extern crate alloc; // #[macro_use] extern crate debugit; extern crate task; -extern crate runqueue; extern crate getopts; use getopts::Options; diff --git a/applications/ps/src/lib.rs b/applications/ps/src/lib.rs index b4bd2f67fb..6bb2cb151b 100644 --- a/applications/ps/src/lib.rs +++ b/applications/ps/src/lib.rs @@ -60,7 +60,7 @@ pub fn main(args: Vec) -> isize { else {" "} ; #[cfg(any(epoch_scheduler, priority_scheduler))] { - let priority = scheduler::get_priority(&task).map(|priority| format!("{}", priority)).unwrap_or_else(|| String::from("-")); + let priority = scheduler::priority(&task).map(|priority| format!("{}", priority)).unwrap_or_else(|| String::from("-")); task_string.push_str( &format!("{0:<5} {1:<10} {2:<4} {3:<4} {4:<5} {5:<10} {6}\n", id, runstate, cpu, pinned, task_type, priority, task.name) diff --git a/applications/rq/Cargo.toml b/applications/rq/Cargo.toml index 961c896b13..948587e4b7 100644 --- a/applications/rq/Cargo.toml +++ b/applications/rq/Cargo.toml @@ -2,6 +2,7 @@ name = "rq" version = "0.1.0" authors = ["Christine Wang "] +edition = "2021" [dependencies] getopts = "0.2.21" @@ -9,14 +10,11 @@ getopts = "0.2.21" [dependencies.app_io] path = "../../kernel/app_io" -[dependencies.apic] -path = "../../kernel/apic" +[dependencies.cpu] +path = "../../kernel/cpu" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - # [dependencies.application_main_fn] # path = "../../compiler_plugins" diff --git a/applications/rq/src/lib.rs b/applications/rq/src/lib.rs index 6027c032df..906bf591ed 100644 --- a/applications/rq/src/lib.rs +++ b/applications/rq/src/lib.rs @@ -1,66 +1,56 @@ #![no_std] -extern crate alloc; -#[macro_use] extern crate app_io; -extern crate apic; -extern crate getopts; -extern crate task; -extern crate runqueue; +extern crate alloc; -use getopts::Options; use alloc::{ fmt::Write, - string::{ - String, - ToString, - }, + string::{String, ToString}, vec::Vec, }; -use apic::get_lapics; + +use app_io::{print, println}; +use getopts::Options; pub fn main(args: Vec) -> isize { let mut opts = Options::new(); opts.optflag("h", "help", "print this help menu"); let matches = match opts.parse(args) { - Ok(m) => { m } - Err(_f) => { println!("{} \n", _f); - return -1; } + Ok(m) => m, + Err(_f) => { + println!("{} \n", _f); + return -1; + } }; if matches.opt_present("h") { - return print_usage(opts) + return print_usage(opts); } + let bootstrap_cpu = cpu::bootstrap_cpu(); - let all_lapics = get_lapics(); - for lapic in all_lapics.iter() { - let lapic = lapic.1; - let apic_id = lapic.read().apic_id(); - let processor = lapic.read().processor_id(); - let is_bootstrap_cpu = lapic.read().is_bootstrap_cpu(); - let core_type = if is_bootstrap_cpu { "Boot CPU" } else { "Secondary CPU" }; + for (cpu, task_list) in task::scheduler::tasks() { + let core_type = if Some(cpu) == bootstrap_cpu { + "Boot CPU" + } else { + "Secondary CPU" + }; - println!("\n{} (apic: {}, proc: {})", core_type, apic_id, processor); - - if let Some(runqueue) = runqueue::get_runqueue(apic_id.value() as u8).map(|rq| rq.read().clone()) { - let mut runqueue_contents = String::new(); - for task in runqueue.iter() { - writeln!(runqueue_contents, " {} ({}) {}", - task.name, - task.id, - if task.is_running() { "*" } else { "" } - ) - .expect("Failed to write to runqueue_contents"); - } - print!("{}", runqueue_contents); - } - - else { - println!("Can't retrieve runqueue for core {}", apic_id); - return -1; + println!("\n{} (CPU: {})", core_type, cpu); + + let mut runqueue_contents = String::new(); + for task in task_list.iter() { + writeln!( + runqueue_contents, + " {} ({}) {}", + task.name, + task.id, + if task.is_running() { "*" } else { "" } + ) + .expect("Failed to write to runqueue_contents"); } + print!("{}", runqueue_contents); } - + println!(""); 0 } @@ -68,7 +58,10 @@ pub fn main(args: Vec) -> isize { fn print_usage(opts: Options) -> isize { let mut brief = "Usage: rq \n \n".to_string(); - brief.push_str("Prints each CPU's ID, the tasks on its runqueue ('*' identifies the currently running task), and whether it is the boot CPU or not"); + brief.push_str( + "Prints each CPU's ID, the tasks on its runqueue ('*' identifies the currently running \ + task), and whether it is the boot CPU or not", + ); println!("{} \n", opts.usage(&brief)); diff --git a/applications/rq_access_eval/Cargo.toml b/applications/rq_access_eval/Cargo.toml deleted file mode 100644 index 16582f73bb..0000000000 --- a/applications/rq_access_eval/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "rq_access_eval" -version = "0.1.0" -authors = ["Klim Tsoutsman "] -description = "Run queue access benchmark" -edition = "2021" - -[dependencies] -app_io = { path = "../../kernel/app_io" } -cpu = { path = "../../kernel/cpu" } -getopts = "0.2" -irq_safety = { git = "https://github.com/theseus-os/irq_safety" } -runqueue = { path = "../../kernel/runqueue" } -time = { path = "../../kernel/time" } diff --git a/applications/rq_access_eval/src/lib.rs b/applications/rq_access_eval/src/lib.rs deleted file mode 100644 index d2fb2691fe..0000000000 --- a/applications/rq_access_eval/src/lib.rs +++ /dev/null @@ -1,79 +0,0 @@ -#![no_std] - -extern crate alloc; - -use alloc::{string::String, vec::Vec}; -use app_io::println; -use time::{now, Duration, Monotonic}; - -pub fn main(args: Vec) -> isize { - let guard = irq_safety::hold_interrupts(); - let mut options = getopts::Options::new(); - options - .optflag("h", "help", "Display this message") - .optflag("l", "least-busy", "Get the least busy core") - .optopt("c", "core", "Get 's runqueue", "") - .optopt("n", "num", "Perform iterations", ""); - - let matches = match options.parse(args) { - Ok(matches) => matches, - Err(e) => { - println!("{}", e); - print_usage(options); - return 1; - } - }; - - let least_busy = matches.opt_present("l"); - let core = matches.opt_get::("c").expect("failed to parse core"); - - if least_busy && core.is_some() { - panic!("both the least-busy and core flags can't be specified"); - } - - let num = matches - .opt_get_default("n", 1_000_000) - .expect("failed to parse num"); - - let duration = if least_busy { - run( - |_| { - runqueue::get_least_busy_core(); - }, - num, - ) - } else if let Some(core) = core { - run( - |_| { - runqueue::get_runqueue(core); - }, - num, - ) - } else { - let cpu_count = cpu::cpu_count(); - run( - |count| { - runqueue::get_runqueue((count % cpu_count) as u8); - }, - num, - ) - }; - drop(guard); - - println!("time: {:#?}", duration); - - 0 -} - -fn run(f: impl Fn(u32), num: u32) -> Duration { - let start = now::(); - for i in 0..num { - f(i); - } - now::().duration_since(start) -} - -fn print_usage(options: getopts::Options) { - let brief = alloc::format!("Usage: {} [OPTIONS]", env!("CARGO_CRATE_NAME")); - println!("{}", options.usage(&brief)); -} diff --git a/applications/rq_eval/Cargo.toml b/applications/rq_eval/Cargo.toml index df0e8284a0..2a2324239b 100644 --- a/applications/rq_eval/Cargo.toml +++ b/applications/rq_eval/Cargo.toml @@ -23,9 +23,6 @@ path = "../../kernel/task" [dependencies.cpu] path = "../../kernel/cpu" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.app_io] path = "../../kernel/app_io" diff --git a/applications/rq_eval/src/lib.rs b/applications/rq_eval/src/lib.rs index 0b5080e19a..0e960f2610 100644 --- a/applications/rq_eval/src/lib.rs +++ b/applications/rq_eval/src/lib.rs @@ -19,7 +19,6 @@ extern crate alloc; extern crate task; extern crate cpu; extern crate spawn; -extern crate runqueue; extern crate getopts; extern crate hpet; extern crate libtest; @@ -158,8 +157,8 @@ fn run_single(iterations: usize) -> Result<(), &'static str> { let start = hpet.get_counter(); for _ in 0..iterations { - runqueue::add_task_to_specific_runqueue(cpu::current_cpu().into_u8(), taskref.clone())?; - runqueue::remove_task_from_all(&taskref)?; + task::scheduler::add_task_to(cpu::current_cpu(), taskref.clone()); + assert!(task::scheduler::remove_task(&taskref)); } let end = hpet.get_counter(); diff --git a/applications/shell/Cargo.toml b/applications/shell/Cargo.toml index 8304f5ddc8..575a16a3bc 100644 --- a/applications/shell/Cargo.toml +++ b/applications/shell/Cargo.toml @@ -28,9 +28,6 @@ path = "../../kernel/spawn" [dependencies.task] path = "../../kernel/task" -[dependencies.runqueue] -path = "../../kernel/runqueue" - [dependencies.window_manager] path = "../../kernel/window_manager" diff --git a/applications/shell/src/lib.rs b/applications/shell/src/lib.rs index bbe489e012..748f7ec303 100644 --- a/applications/shell/src/lib.rs +++ b/applications/shell/src/lib.rs @@ -10,7 +10,6 @@ extern crate spin; extern crate dfqueue; extern crate spawn; extern crate task; -extern crate runqueue; extern crate event_types; extern crate window_manager; extern crate path; @@ -409,9 +408,7 @@ impl Shell { if task_ref.has_exited() { continue; } match task_ref.kill(KillReason::Requested) { Ok(_) => { - if let Err(e) = runqueue::remove_task_from_all(task_ref) { - error!("Killed task but could not remove it from runqueue: {}", e); - } + task::scheduler::remove_task(task_ref); } Err(e) => error!("Could not kill task, error: {}", e), } diff --git a/applications/test_scheduler/src/lib.rs b/applications/test_scheduler/src/lib.rs index 8bc0731d06..238034f249 100644 --- a/applications/test_scheduler/src/lib.rs +++ b/applications/test_scheduler/src/lib.rs @@ -22,8 +22,8 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref1, 30) { - error!("scheduler_eval(): Could not set priority to taskref1: {}", e); + if !scheduler::set_priority(&taskref1, 30) { + error!("scheduler_eval(): Could not set priority to taskref1"); } debug!("Spawned Task 1"); @@ -33,8 +33,8 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref2, 20) { - error!("scheduler_eval(): Could not set priority to taskref2: {}", e); + if !scheduler::set_priority(&taskref2, 20) { + error!("scheduler_eval(): Could not set priority to taskref2"); } debug!("Spawned Task 2"); @@ -44,17 +44,17 @@ pub fn main(_args: Vec) -> isize { .pin_on_cpu(cpu_1) .spawn().expect("failed to initiate task"); - if let Err(e) = scheduler::set_priority(&taskref3, 10) { - error!("scheduler_eval(): Could not set priority to taskref3: {}", e); + if !scheduler::set_priority(&taskref3, 10) { + error!("scheduler_eval(): Could not set priority to taskref3"); } debug!("Spawned Task 3"); debug!("Spawned all tasks"); - let _priority1 = scheduler::get_priority(&taskref1); - let _priority2 = scheduler::get_priority(&taskref2); - let _priority3 = scheduler::get_priority(&taskref3); + let _priority1 = scheduler::priority(&taskref1); + let _priority2 = scheduler::priority(&taskref2); + let _priority3 = scheduler::priority(&taskref3); #[cfg(epoch_scheduler)] { diff --git a/kernel/exceptions_full/Cargo.toml b/kernel/exceptions_full/Cargo.toml index dfa5114f4a..ff7864a5d9 100644 --- a/kernel/exceptions_full/Cargo.toml +++ b/kernel/exceptions_full/Cargo.toml @@ -25,9 +25,6 @@ path = "../tlb_shootdown" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.fault_log] path = "../fault_log" diff --git a/kernel/ixgbe/Cargo.toml b/kernel/ixgbe/Cargo.toml index 27f8e670f7..ebd42c623c 100644 --- a/kernel/ixgbe/Cargo.toml +++ b/kernel/ixgbe/Cargo.toml @@ -55,9 +55,6 @@ features = [ "alloc", "small_rng" ] [dependencies.hpet] path = "../acpi/hpet" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.nic_initialization] path = "../nic_initialization" diff --git a/kernel/ixgbe/src/lib.rs b/kernel/ixgbe/src/lib.rs index ee6f3d1656..1da6533a00 100644 --- a/kernel/ixgbe/src/lib.rs +++ b/kernel/ixgbe/src/lib.rs @@ -27,7 +27,6 @@ extern crate volatile; extern crate mpmc; extern crate rand; extern crate hpet; -extern crate runqueue; extern crate net; extern crate nic_initialization; extern crate intel_ethernet; diff --git a/kernel/libtest/Cargo.toml b/kernel/libtest/Cargo.toml index 58f10fc4fe..83c77941f4 100644 --- a/kernel/libtest/Cargo.toml +++ b/kernel/libtest/Cargo.toml @@ -24,9 +24,6 @@ path = "../memory" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - [dependencies.hpet] path = "../acpi/hpet" diff --git a/kernel/libtest/src/lib.rs b/kernel/libtest/src/lib.rs index 6106e2b17b..43e4828991 100644 --- a/kernel/libtest/src/lib.rs +++ b/kernel/libtest/src/lib.rs @@ -6,7 +6,6 @@ extern crate memory; extern crate apic; extern crate cpu; extern crate hpet; -extern crate runqueue; extern crate pmu_x86; extern crate libm; #[macro_use] extern crate log; @@ -42,10 +41,7 @@ macro_rules! CPU_ID { /// Helper function return the tasks in a given `cpu`'s runqueue pub fn nr_tasks_in_rq(cpu: CpuId) -> Option { - match runqueue::get_runqueue(cpu.into_u8()).map(|rq| rq.read()) { - Some(rq) => { Some(rq.len()) } - _ => { None } - } + return task::scheduler::busyness(cpu); } diff --git a/kernel/panic_wrapper/Cargo.toml b/kernel/panic_wrapper/Cargo.toml index 55150c486b..dfbd4f4117 100644 --- a/kernel/panic_wrapper/Cargo.toml +++ b/kernel/panic_wrapper/Cargo.toml @@ -13,7 +13,6 @@ log = "0.4.8" fault_log = { path = "../fault_log" } memory = { path = "../memory" } mod_mgmt = { path = "../mod_mgmt" } -runqueue = { path = "../runqueue" } task = { path = "../task" } [target.'cfg(target_arch = "x86_64")'.dependencies] diff --git a/kernel/runqueue/Cargo.toml b/kernel/runqueue/Cargo.toml deleted file mode 100644 index 2bbca36f62..0000000000 --- a/kernel/runqueue/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -authors = ["Kevin Boos "] -name = "runqueue" -description = "Functions and types for handling runqueues, i.e., lists of tasks for scheduling purposes" -version = "0.1.0" - -[dependencies] - -[dependencies.cfg-if] -version = "1.0.0" - -[dependencies.log] -version = "0.4.8" - -[dependencies.lazy_static] -features = ["spin_no_std"] -version = "1.4.0" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -[dependencies.runqueue_round_robin] -path = "../runqueue_round_robin" - -[dependencies.runqueue_epoch] -path = "../runqueue_epoch" - -[dependencies.runqueue_priority] -path = "../runqueue_priority" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue/src/lib.rs b/kernel/runqueue/src/lib.rs deleted file mode 100644 index 9ef1da87b8..0000000000 --- a/kernel/runqueue/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! This crate contains the API of the `RunQueue` structure, Runqueue Structure should contain -//! list of tasks with additional scheduling information depending on the scheduler. -//! All crates except the scheduler should refer to this crate to access functions on `RunQueue`. - -#![no_std] - -extern crate alloc; -extern crate sync_preemption; -extern crate atomic_linked_list; -extern crate task; -#[macro_use] extern crate cfg_if; -cfg_if! { - if #[cfg(epoch_scheduler)] { - extern crate runqueue_epoch as runqueue; - } else if #[cfg(priority_scheduler)] { - extern crate runqueue_priority as runqueue; - } else { - extern crate runqueue_round_robin as runqueue; - } -} - -#[cfg(single_simd_task_optimization)] -extern crate single_simd_task_optimization; - -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; -use runqueue::RunQueue; - - -/// Creates a new `RunQueue` for the given core, which is an `apic_id`. -pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - RunQueue::init(which_core, idle_task) -} - -/// Returns the `RunQueue` of the given core, which is an `apic_id`. -pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RunQueue::get_runqueue(which_core) -} - -/// Returns the "least busy" core -pub fn get_least_busy_core() -> Option { - RunQueue::get_least_busy_core() -} - -/// Chooses the "least busy" core's runqueue -/// and adds the given `Task` reference to that core's runqueue. -pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - RunQueue::add_task_to_any_runqueue(task) -} - -/// Adds the given `Task` reference to given core's runqueue. -pub fn add_task_to_specific_runqueue(which_core: u8, task: TaskRef) -> Result<(), &'static str> { - RunQueue::add_task_to_specific_runqueue(which_core, task) -} - -/// Removes a `TaskRef` from all `RunQueue`s that exist on the entire system. -pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - RunQueue::remove_task_from_all(task) -} diff --git a/kernel/runqueue_epoch/Cargo.toml b/kernel/runqueue_epoch/Cargo.toml deleted file mode 100644 index 91157107bf..0000000000 --- a/kernel/runqueue_epoch/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -authors = ["Namitha Liyanage "] -name = "runqueue_epoch" -description = "Run queue for the epoch scheduler" -version = "0.1.0" -edition = "2021" - -[dependencies] -log = "0.4.8" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_epoch/src/lib.rs b/kernel/runqueue_epoch/src/lib.rs deleted file mode 100644 index 1868c027e1..0000000000 --- a/kernel/runqueue_epoch/src/lib.rs +++ /dev/null @@ -1,334 +0,0 @@ -//! This crate contains the `RunQueue` structure, for the epoch scheduler. -//! `RunQueue` structure is essentially a list of Tasks -//! that it used for scheduling purposes. - -#![no_std] -#![feature(let_chains)] - -extern crate alloc; - -use alloc::collections::VecDeque; -use atomic_linked_list::atomic_map::AtomicMap; -use core::ops::{Deref, DerefMut}; -use log::{debug, error, trace}; -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; - -pub const MAX_PRIORITY: u8 = 40; -pub const DEFAULT_PRIORITY: u8 = 20; -pub const INITIAL_TOKENS: usize = 10; - -#[derive(Debug, Clone)] -pub struct EpochTaskRef { - task: TaskRef, - pub priority: u8, - /// Remaining tokens in this epoch. A task will be scheduled in an epoch - /// until tokens run out - pub tokens_remaining: usize, -} - -impl Deref for EpochTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.task - } -} - -impl DerefMut for EpochTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.task - } -} - -impl EpochTaskRef { - /// Creates a new `EpochTaskRef` that wraps the given `TaskRef`. - /// We just give an initial number of tokens to run the task till - /// next scheduling epoch - pub fn new(task: TaskRef) -> EpochTaskRef { - EpochTaskRef { - task, - priority: DEFAULT_PRIORITY, - tokens_remaining: INITIAL_TOKENS, - } - } -} - -/// There is one runqueue per core, each core only accesses its own private -/// runqueue and allows the scheduler to select a task from that runqueue to -/// schedule in. -static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of references to `Task`s (`EpochTaskRef`s) -/// that is used to store the `Task`s (and associated scheduler related data) -/// that are runnable on a given core. -/// A queue is used for the token based epoch scheduler. -/// `Runqueue` implements `Deref` and `DerefMut` traits, which dereferences to -/// `VecDeque`. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - queue: VecDeque, - idle_task: TaskRef, -} - -impl Deref for RunQueue { - type Target = VecDeque; - - fn deref(&self) -> &Self::Target { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.queue - } -} - -impl RunQueue { - /// Moves the `TaskRef` at the given index in this `RunQueue` to the end - /// (back) of this `RunQueue`, and returns a cloned reference to that - /// `TaskRef`. The number of tokens is reduced by one and number of context - /// switches is increased by one. This function is used when the task is - /// selected by the scheduler - pub fn update_and_move_to_end(&mut self, index: usize, tokens: usize) -> Option { - if let Some(mut priority_task_ref) = self.remove(index) { - priority_task_ref.tokens_remaining = tokens; - let task_ref = priority_task_ref.task.clone(); - self.push_back(priority_task_ref); - Some(task_ref) - } else { - None - } - } - - /// Creates a new `RunQueue` for the given core, which is an `apic_id` - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - trace!("Created runqueue (priority) for core {}", which_core); - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - queue: VecDeque::new(), - idle_task, - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!( - "BUG: RunQueue::init(): runqueue already exists for core {}!", - which_core - ); - Err("runqueue already exists for this core") - } else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - /// Returns `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - /// Returns the "least busy" core, which is currently very simple, based on - /// runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple - /// runqueue-size-based load balancing) and adds the given `Task` - /// reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task) - } - - /// Convenience method that adds the given `Task` reference to given core's - /// runqueue. - pub fn add_task_to_specific_runqueue( - which_core: u8, - task: TaskRef, - ) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task) - } - - /// Adds a `TaskRef` to this RunQueue. - fn add_task(&mut self, task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - debug!("Adding task to runqueue_priority {}, {:?}", self.core, task); - let priority_task_ref = EpochTaskRef::new(task); - self.push_back(priority_task_ref); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::ADD_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_added_to_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - debug!( - "Removing task from runqueue_priority {}, {:?}", - self.core, task - ); - self.retain(|x| &x.task != task); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::REMOVE_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_removed_from_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire - /// system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - fn get_priority(&self, task: &TaskRef) -> Option { - for epoch_task in self.iter() { - if &epoch_task.task == task { - return Some(epoch_task.priority); - } - } - None - } - - /// Sets the priority of the given task. - /// - /// Returns whether the task was found in the run queue. - fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { - for epoch_task in self.iter_mut() { - if &epoch_task.task == task { - epoch_task.priority = priority; - return true; - } - } - false - } -} - -/// Returns the priority of the given task if it exists, otherwise none. -pub fn get_priority(task: &TaskRef) -> Option { - for (_, run_queue) in RUNQUEUES.iter() { - if let Some(priority) = run_queue.read().get_priority(task) { - return Some(priority); - } - } - - None -} - -pub fn set_priority(task: &TaskRef, priority: u8) { - for (_, run_queue) in RUNQUEUES.iter() { - if run_queue.write().set_priority(task, priority) { - break; - } - } -} - -/// Lowers the task's priority to its previous value when dropped. -pub struct PriorityInheritanceGuard<'a> { - inner: Option<(&'a TaskRef, u8)>, -} - -impl<'a> Drop for PriorityInheritanceGuard<'a> { - fn drop(&mut self) { - if let Some((task, priority)) = self.inner { - set_priority(task, priority) - } - } -} - -/// Modifies the given task's priority to be the maximum of its priority and the -/// current task's priority. -/// -/// Returns a guard which reverts the change when dropped. -pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { - let current_task = task::get_my_current_task().unwrap(); - - let mut current_priority = None; - let mut other_priority = None; - - 'outer: for (core, run_queue) in RUNQUEUES.iter() { - for epoch_task in run_queue.read().iter() { - if epoch_task.task == current_task { - current_priority = Some(epoch_task.priority); - if other_priority.is_some() { - break 'outer; - } - } else if &epoch_task.task == task { - other_priority = Some((core, epoch_task.priority)); - if current_priority.is_some() { - break 'outer; - } - } - } - } - - if let (Some(current_priority), Some((core, other_priority))) = - (current_priority, other_priority) && current_priority > other_priority - { - // NOTE: This assumes no task migration. - debug_assert!(RUNQUEUES.get(core).unwrap().write().set_priority(task, current_priority)); - } - - PriorityInheritanceGuard { - inner: if let (Some(current_priority), Some((_, other_priority))) = - (current_priority, other_priority) - && current_priority > other_priority - { - Some((task, other_priority)) - } else { - None - }, - } -} diff --git a/kernel/runqueue_priority/Cargo.toml b/kernel/runqueue_priority/Cargo.toml deleted file mode 100644 index 9dfb38397f..0000000000 --- a/kernel/runqueue_priority/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -authors = ["Jacob Earle "] -name = "runqueue_priority" -description = "Functions and types for handling runqueues in a priority scheduling context" -version = "0.1.0" -edition = "2021" - -[dependencies] -atomic_linked_list = { path = "../../libs/atomic_linked_list" } -log = "0.4.8" -sync_preemption = { path = "../sync_preemption" } -task = { path = "../task" } -time = { path = "../time" } - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_priority/src/lib.rs b/kernel/runqueue_priority/src/lib.rs deleted file mode 100644 index 12d608f7d3..0000000000 --- a/kernel/runqueue_priority/src/lib.rs +++ /dev/null @@ -1,321 +0,0 @@ -//! Runqueue structures for a priority scheduler. -//! -//! The `RunQueue` structure is essentially a list of `Task`s used for -//! scheduling purposes. Each `PriorityTaskRef` element in the runqueue contains -//! a `TaskRef` representing an underlying task and as well as a `period` value. - -#![no_std] -#![feature(let_chains)] - -extern crate alloc; - -use alloc::collections::BinaryHeap; -use atomic_linked_list::atomic_map::AtomicMap; -use core::{ - cmp::Ordering, - ops::{Deref, DerefMut}, -}; -use log::{error, trace}; -use sync_preemption::PreemptionSafeRwLock; -use task::TaskRef; -use time::Instant; - -const DEFAULT_PRIORITY: u8 = 0; - -/// A reference to a task with its period for priority scheduling. -/// -/// `PriorityTaskRef` implements `Deref` and `DerefMut` traits, which -/// dereferences to `TaskRef`. -#[derive(Debug, Clone)] -pub struct PriorityTaskRef { - pub task: TaskRef, - pub last_ran: Instant, - priority: u8, -} - -impl PartialEq for PriorityTaskRef { - fn eq(&self, other: &Self) -> bool { - self.priority.eq(&other.priority) && self.last_ran.eq(&other.last_ran) - } -} - -// The equivalence relation is reflexive. -impl Eq for PriorityTaskRef {} - -impl PartialOrd for PriorityTaskRef { - fn partial_cmp(&self, other: &Self) -> Option { - match self.priority.cmp(&other.priority) { - // Tasks that were ran longer ago should be prioritised. - Ordering::Equal => Some(self.last_ran.cmp(&other.last_ran).reverse()), - ordering => Some(ordering), - } - } -} - -impl Ord for PriorityTaskRef { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.priority.cmp(&other.priority) - } -} - -impl Deref for PriorityTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.task - } -} - -impl DerefMut for PriorityTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.task - } -} - -/// There is one runqueue per core, each core only accesses its own private -/// runqueue and allows the scheduler to select a task from that runqueue to -/// schedule in -static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of `Task`s and their associated priority scheduler data that may be -/// run on a given CPU core. -/// -/// In rate monotonic scheduling, tasks are sorted in order of increasing -/// periods. Thus, the `period` value acts as a form of task "priority", -/// with higher priority (shorter period) tasks coming first. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - queue: BinaryHeap, - idle_task: TaskRef, -} - -impl Deref for RunQueue { - type Target = BinaryHeap; - - fn deref(&self) -> &BinaryHeap { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut BinaryHeap { - &mut self.queue - } -} - -impl RunQueue { - /// Creates a new `RunQueue` for the given core, which is an `apic_id` - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(loscd_eval))] - trace!("Created runqueue (priority) for core {}", which_core); - - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - queue: BinaryHeap::new(), - idle_task, - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!("BUG: RunQueue::init(): runqueue already exists for core {which_core}!"); - Err("runqueue already exists for this core") - } else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - /// Returns `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - /// Returns the "least busy" core, which is currently very simple, based on - /// runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple - /// runqueue-size-based load balancing) and adds the given `Task` - /// reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task, DEFAULT_PRIORITY) - } - - /// Convenience method that adds the given `Task` reference to given core's - /// runqueue. - pub fn add_task_to_specific_runqueue( - which_core: u8, - task: TaskRef, - ) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task, DEFAULT_PRIORITY) - } - - /// Adds a `TaskRef` to this runqueue with the given priority value. - fn add_task(&mut self, task: TaskRef, priority: u8) -> Result<(), &'static str> { - let priority_task = PriorityTaskRef { - task, - priority, - last_ran: Instant::ZERO, - }; - self.queue.push(priority_task); - Ok(()) - } - - /// The internal function that actually removes the task from the runqueue. - fn remove_internal(&mut self, task: &TaskRef) -> Result<(), &'static str> { - self.queue.retain(|x| &x.task != task); - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - self.remove_internal(task) - } - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire - /// system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } - - fn get_priority(&self, task: &TaskRef) -> Option { - for t in self.queue.iter() { - if t.task == *task { - return Some(t.priority); - } - } - None - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { - let previous_len = self.queue.len(); - self.queue.retain(|t| t.task != *task); - - if previous_len != self.queue.len() { - debug_assert_eq!(self.queue.len() + 1, previous_len); - self.queue.push(PriorityTaskRef { - // TODO: Don't take reference? - task: task.clone(), - priority, - // Not technically correct, but this will be reset next time it is run. - last_ran: Instant::ZERO, - }); - true - } else { - false - } - } -} - -pub fn get_priority(task: &TaskRef) -> Option { - for (_, run_queue) in RUNQUEUES.iter() { - if let Some(priority) = run_queue.read().get_priority(task) { - return Some(priority); - } - } - None -} - -pub fn set_priority(task: &TaskRef, priority: u8) { - for (_, run_queue) in RUNQUEUES.iter() { - if run_queue.write().set_priority(task, priority) { - break; - } - } -} - -/// Lowers the task's priority to its previous value when dropped. -pub struct PriorityInheritanceGuard<'a> { - inner: Option<(&'a TaskRef, u8)>, -} - -impl<'a> Drop for PriorityInheritanceGuard<'a> { - fn drop(&mut self) { - if let Some((task, priority)) = self.inner { - set_priority(task, priority) - } - } -} - -/// Modifies the given task's priority to be the maximum of its priority and the -/// current task's priority. -/// -/// Returns a guard which reverts the change when dropped. -pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { - let current_task = task::get_my_current_task().unwrap(); - - let mut current_priority = None; - let mut other_priority = None; - - 'outer: for (core, run_queue) in RUNQUEUES.iter() { - for epoch_task in run_queue.read().iter() { - if epoch_task.task == current_task { - current_priority = Some(epoch_task.priority); - if other_priority.is_some() { - break 'outer; - } - } else if &epoch_task.task == task { - other_priority = Some((core, epoch_task.priority)); - if current_priority.is_some() { - break 'outer; - } - } - } - } - - if let (Some(current_priority), Some((core, other_priority))) = - (current_priority, other_priority) && current_priority > other_priority - { - // NOTE: This assumes no task migration. - debug_assert!(RUNQUEUES.get(core).unwrap().write().set_priority(task, current_priority)); - } - - PriorityInheritanceGuard { - inner: if let (Some(current_priority), Some((_, other_priority))) = - (current_priority, other_priority) - && current_priority > other_priority - { - Some((task, other_priority)) - } else { - None - }, - } -} diff --git a/kernel/runqueue_round_robin/Cargo.toml b/kernel/runqueue_round_robin/Cargo.toml deleted file mode 100644 index 65f6d9652b..0000000000 --- a/kernel/runqueue_round_robin/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -authors = ["Kevin Boos "] -name = "runqueue_round_robin" -description = "Functions and types for handling runqueues, i.e., lists of tasks for scheduling purposes" -version = "0.1.0" - -[dependencies] -log = "0.4.8" - -[dependencies.sync_preemption] -path = "../sync_preemption" - -[dependencies.atomic_linked_list] -path = "../../libs/atomic_linked_list" - -[dependencies.task] -path = "../task" - -## This should be dependent upon 'cfg(single_simd_task_optimization)', -## but it cannot be because of https://github.com/rust-lang/cargo/issues/5499. -## Therefore, it has to be unconditionally included. -[dependencies.single_simd_task_optimization] -path = "../single_simd_task_optimization" - - -[lib] -crate-type = ["rlib"] diff --git a/kernel/runqueue_round_robin/src/lib.rs b/kernel/runqueue_round_robin/src/lib.rs deleted file mode 100644 index e02e218535..0000000000 --- a/kernel/runqueue_round_robin/src/lib.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! This crate contains the `RunQueue` structure, for round robin scheduler. -//! `RunQueue` structure is essentially a list of Tasks -//! that is used for scheduling purposes. -//! - -#![no_std] - -extern crate alloc; -#[macro_use] extern crate log; -extern crate sync_preemption; -extern crate atomic_linked_list; -extern crate task; - -#[cfg(single_simd_task_optimization)] -extern crate single_simd_task_optimization; - -use alloc::collections::VecDeque; -use sync_preemption::PreemptionSafeRwLock; -use atomic_linked_list::atomic_map::AtomicMap; -use task::TaskRef; -use core::ops::{Deref, DerefMut}; - -/// A cloneable reference to a `Taskref` that exposes more methods -/// related to task scheduling -/// -/// The `RoundRobinTaskRef` type is necessary since differnt scheduling algorithms -/// require different data associated with the task to be stored alongside. -/// This makes storing them alongside the task prohibitive. -/// Since round robin is the most primitive scheduling policy -/// no additional scheduling information is needed. -/// context_switches indicate the number of context switches -/// the task has undergone. -/// context_switches is not used in scheduling algorithm. -/// `RoundRobinTaskRef` implements `Deref` and `DerefMut` traits, which dereferences to `TaskRef`. -#[derive(Debug, Clone)] -pub struct RoundRobinTaskRef{ - /// `TaskRef` wrapped by `RoundRobinTaskRef` - taskref: TaskRef, - - /// Number of context switches the task has undergone. Not used in scheduling algorithm - context_switches: usize, -} - -// impl Drop for RoundRobinTaskRef { -// fn drop(&mut self) { -// warn!("DROPPING RoundRobinTaskRef with taskref {:?}", self.taskref); -// } -// } - -impl Deref for RoundRobinTaskRef { - type Target = TaskRef; - - fn deref(&self) -> &TaskRef { - &self.taskref - } -} - -impl DerefMut for RoundRobinTaskRef { - fn deref_mut(&mut self) -> &mut TaskRef { - &mut self.taskref - } -} - -impl RoundRobinTaskRef { - /// Creates a new `RoundRobinTaskRef` that wraps the given `TaskRef`. - pub fn new(taskref: TaskRef) -> RoundRobinTaskRef { - RoundRobinTaskRef { - taskref, - context_switches: 0, - } - } - - /// Increment the number of times the task is picked - pub fn increment_context_switches(&mut self) { - self.context_switches = self.context_switches.saturating_add(1); - } -} - -/// There is one runqueue per core, each core only accesses its own private runqueue -/// and allows the scheduler to select a task from that runqueue to schedule in. -pub static RUNQUEUES: AtomicMap> = AtomicMap::new(); - -/// A list of references to `Task`s (`RoundRobinTaskRef`s). -/// This is used to store the `Task`s (and associated scheduler related data) -/// that are runnable on a given core. -/// A queue is used for the round robin scheduler. -/// `Runqueue` implements `Deref` and `DerefMut` traits, which dereferences to `VecDeque`. -#[derive(Debug)] -pub struct RunQueue { - core: u8, - idle_task: TaskRef, - queue: VecDeque, -} -// impl Drop for RunQueue { -// fn drop(&mut self) { -// warn!("DROPPING Round Robing Runqueue for core {}", self.core); -// } -// } - -impl Deref for RunQueue { - type Target = VecDeque; - - fn deref(&self) -> &VecDeque { - &self.queue - } -} - -impl DerefMut for RunQueue { - fn deref_mut(&mut self) -> &mut VecDeque { - &mut self.queue - } -} - -impl RunQueue { - - /// Moves the `TaskRef` at the given index into this `RunQueue` to the end (back) of this `RunQueue`, - /// and returns a cloned reference to that `TaskRef`. - pub fn move_to_end(&mut self, index: usize) -> Option { - self.swap_remove_front(index).map(|rr_taskref| { - let taskref = rr_taskref.taskref.clone(); - self.push_back(rr_taskref); - taskref - }) - } - - /// Creates a new `RunQueue` for the given core, which is an `apic_id`. - pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> { - trace!("Created runqueue (round robin) for core {}", which_core); - let new_rq = PreemptionSafeRwLock::new(RunQueue { - core: which_core, - idle_task, - queue: VecDeque::new(), - }); - - if RUNQUEUES.insert(which_core, new_rq).is_some() { - error!("BUG: RunQueue::init(): runqueue already exists for core {}!", which_core); - Err("runqueue already exists for this core") - } - else { - // there shouldn't already be a RunQueue for this core - Ok(()) - } - } - - pub fn idle_task(&self) -> &TaskRef { - &self.idle_task - } - - /// Returns the `RunQueue` for the given core, which is an `apic_id`. - pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock> { - RUNQUEUES.get(&which_core) - } - - - /// Returns the "least busy" core, which is currently very simple, based on runqueue size. - pub fn get_least_busy_core() -> Option { - Self::get_least_busy_runqueue().map(|rq| rq.read().core) - } - - - /// Returns the `RunQueue` for the "least busy" core. - /// See [`get_least_busy_core()`](#method.get_least_busy_core) - fn get_least_busy_runqueue() -> Option<&'static PreemptionSafeRwLock> { - let mut min_rq: Option<(&'static PreemptionSafeRwLock, usize)> = None; - - for (_, rq) in RUNQUEUES.iter() { - let rq_size = rq.read().queue.len(); - - if let Some(min) = min_rq { - if rq_size < min.1 { - min_rq = Some((rq, rq_size)); - } - } - else { - min_rq = Some((rq, rq_size)); - } - } - - min_rq.map(|m| m.0) - } - - /// Chooses the "least busy" core's runqueue (based on simple runqueue-size-based load balancing) - /// and adds the given `Task` reference to that core's runqueue. - pub fn add_task_to_any_runqueue(task: TaskRef) -> Result<(), &'static str> { - let rq = RunQueue::get_least_busy_runqueue() - .or_else(|| RUNQUEUES.iter().next().map(|r| r.1)) - .ok_or("couldn't find any runqueues to add the task to!")?; - - rq.write().add_task(task) - } - - /// Convenience method that adds the given `Task` reference to given core's runqueue. - pub fn add_task_to_specific_runqueue(which_core: u8, task: TaskRef) -> Result<(), &'static str> { - RunQueue::get_runqueue(which_core) - .ok_or("Couldn't get RunQueue for the given core")? - .write() - .add_task(task) - } - - /// Adds a `TaskRef` to this RunQueue. - fn add_task(&mut self, task: TaskRef) -> Result<(), &'static str> { - #[cfg(not(rq_eval))] - debug!("Adding task to runqueue_round_robin {}, {:?}", self.core, task); - - let round_robin_taskref = RoundRobinTaskRef::new(task); - self.push_back(round_robin_taskref); - - #[cfg(single_simd_task_optimization)] - { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::ADD_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_added_to_core(self.iter(), self.core); - } - } - - Ok(()) - } - - /// Removes a `TaskRef` from this RunQueue. - pub fn remove_task(&mut self, task: &TaskRef) -> Result<(), &'static str> { - #[cfg(not(rq_eval))] - debug!("Removing task from runqueue_round_robin {}, {:?}", self.core, task); - self.retain(|x| &x.taskref != task); - - #[cfg(single_simd_task_optimization)] { - warn!("USING SINGLE_SIMD_TASK_OPTIMIZATION VERSION OF RUNQUEUE::REMOVE_TASK"); - // notify simd_personality crate about runqueue change, but only for SIMD tasks - if task.simd { - single_simd_task_optimization::simd_tasks_removed_from_core(self.iter(), self.core); - } - } - - Ok(()) - } - - - /// Removes a `TaskRef` from all `RunQueue`s that exist on the entire system. - /// - /// This is a brute force approach that iterates over all runqueues. - pub fn remove_task_from_all(task: &TaskRef) -> Result<(), &'static str> { - for (_core, rq) in RUNQUEUES.iter() { - rq.write().remove_task(task)?; - } - Ok(()) - } -} diff --git a/kernel/scheduler/Cargo.toml b/kernel/scheduler/Cargo.toml index 14f61e37fa..b12521db3a 100644 --- a/kernel/scheduler/Cargo.toml +++ b/kernel/scheduler/Cargo.toml @@ -14,9 +14,5 @@ interrupts = { path = "../interrupts" } sleep = { path = "../sleep" } task = { path = "../task" } -scheduler_round_robin = { path = "../scheduler_round_robin" } -scheduler_epoch = { path = "../scheduler_epoch" } -scheduler_priority = { path = "../scheduler_priority" } - [target.'cfg(target_arch = "x86_64")'.dependencies] x86_64 = "0.14.8" diff --git a/kernel/scheduler/src/lib.rs b/kernel/scheduler/src/lib.rs index af0d3bb9cd..dce7b756bb 100644 --- a/kernel/scheduler/src/lib.rs +++ b/kernel/scheduler/src/lib.rs @@ -14,21 +14,10 @@ #![no_std] #![cfg_attr(target_arch = "x86_64", feature(abi_x86_interrupt))] -cfg_if::cfg_if! { - if #[cfg(epoch_scheduler)] { - extern crate scheduler_epoch as scheduler; - } else if #[cfg(priority_scheduler)] { - extern crate scheduler_priority as scheduler; - } else { - extern crate scheduler_round_robin as scheduler; - } -} - use interrupts::{self, CPU_LOCAL_TIMER_IRQ, interrupt_handler, eoi, EoiBehaviour}; -use task::{self, TaskRef}; -/// A re-export of [`task::schedule()`] for convenience and legacy compatibility. -pub use task::schedule; +/// Re-exports for convenience and legacy compatibility. +pub use task::scheduler::{inherit_priority, priority, schedule, set_priority}; /// Initializes the scheduler on this system using the policy set at compiler time. @@ -41,8 +30,6 @@ pub use task::schedule; /// - `make THESEUS_CONFIG=epoch_scheduler`: epoch scheduler /// - `make THESEUS_CONFIG=priority_scheduler`: priority scheduler pub fn init() -> Result<(), &'static str> { - task::set_scheduler_policy(scheduler::select_next_task); - #[cfg(target_arch = "x86_64")] { interrupts::register_interrupt( CPU_LOCAL_TIMER_IRQ, @@ -91,34 +78,3 @@ interrupt_handler!(timer_tick_handler, None, _stack_frame, { EoiBehaviour::HandlerSentEoi }); - -/// Changes the priority of the given task with the given priority level. -/// Priority values must be between 40 (maximum priority) and 0 (minimum prriority). -/// This function returns an error when a scheduler without priority is loaded. -pub fn set_priority(_task: &TaskRef, _priority: u8) -> Result<(), &'static str> { - #[cfg(any(epoch_scheduler, priority_scheduler))] - { - Ok(scheduler::set_priority(_task, _priority)) - } - #[cfg(not(any(epoch_scheduler, priority_scheduler)))] - { - Err("called set priority on scheduler that doesn't support set priority") - } -} - -/// Returns the priority of a given task. -/// This function returns None when a scheduler without priority is loaded. -pub fn get_priority(_task: &TaskRef) -> Option { - #[cfg(any(epoch_scheduler, priority_scheduler))] - { - scheduler::get_priority(_task) - } - #[cfg(not(any(epoch_scheduler, priority_scheduler)))] - { - None - } -} - -pub fn inherit_priority(task: &TaskRef) -> scheduler::PriorityInheritanceGuard<'_> { - scheduler::inherit_priority(task) -} diff --git a/kernel/scheduler_epoch/Cargo.toml b/kernel/scheduler_epoch/Cargo.toml index 0427799eb6..e0b65c5a15 100644 --- a/kernel/scheduler_epoch/Cargo.toml +++ b/kernel/scheduler_epoch/Cargo.toml @@ -14,11 +14,5 @@ version = "0.4.8" [dependencies.task] path = "../task" -[dependencies.runqueue] -path = "../runqueue" - -[dependencies.runqueue_epoch] -path = "../runqueue_epoch" - [lib] crate-type = ["rlib"] diff --git a/kernel/scheduler_epoch/src/lib.rs b/kernel/scheduler_epoch/src/lib.rs index 298f8d88d3..22a6917466 100644 --- a/kernel/scheduler_epoch/src/lib.rs +++ b/kernel/scheduler_epoch/src/lib.rs @@ -1,181 +1,223 @@ -//! This crate picks the next task on token based scheduling policy. -//! At the begining of each scheduling epoch a set of tokens is distributed -//! among tasks depending on their priority. -//! [tokens assigned to each task = (prioirty of each task / prioirty of all -//! tasks) * length of epoch]. Each time a task is picked, the token count of -//! the task is decremented by 1. A task is executed only if it has tokens -//! remaining. When all tokens of all runnable task are exhausted a new -//! scheduling epoch is initiated. In addition this crate offers the interfaces -//! to set and get priorities of each task. +//! This crate implements a token-based epoch scheduling policy. +//! +//! At the begining of each scheduling epoch, a set of tokens is distributed +//! among all runnable tasks, based on their priority relative to all other +//! runnable tasks in the runqueue. The formula for this is: +//! ```ignore +//! tokens_assigned_to_task_i = (priority_task_i / sum_priority_all_tasks) * epoch_length; +//! ``` +//! * Each time a task is picked, its token count is decremented by 1. +//! * A task can only be selected for next execution if it has tokens remaining. +//! * When all tokens of all runnable task are exhausted, a new scheduling epoch begins. +//! +//! This epoch scheduler is also a priority-based scheduler, so it allows +//! getting and setting the priorities of each task. #![no_std] extern crate alloc; -use log::error; -use runqueue_epoch::{RunQueue, MAX_PRIORITY}; +use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; +use core::ops::{Deref, DerefMut}; use task::TaskRef; -pub use runqueue_epoch::{inherit_priority, PriorityInheritanceGuard}; +const MAX_PRIORITY: u8 = 40; +const DEFAULT_PRIORITY: u8 = 20; +const INITIAL_TOKENS: usize = 10; -/// A data structure to transfer data from select_next_task_priority -/// to select_next_task -struct NextTaskResult { - taskref: Option, - idle_task: bool, +/// An instance of an epoch scheduler, typically one per CPU. +pub struct Scheduler { + idle_task: TaskRef, + queue: VecDeque, } -/// Changes the priority of the given task with the given priority level. -/// Priority values must be between 40 (maximum priority) and 0 (minimum -/// prriority). -pub fn set_priority(task: &TaskRef, priority: u8) { - let priority = core::cmp::min(priority, MAX_PRIORITY); - runqueue_epoch::set_priority(task, priority); -} +impl Scheduler { + /// Creates a new epoch scheduler instance with the given idle task. + pub const fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: VecDeque::new(), + } + } -/// Returns the priority of the given task. -pub fn get_priority(task: &TaskRef) -> Option { - runqueue_epoch::get_priority(task) -} + /// Moves the `TaskRef` at the given `index` in this scheduler's runqueue + /// to the end (back) of the runqueue. + /// + /// Sets the number of tokens for that task to the given `tokens` + /// and increments that task's number of context switches. + /// + /// Returns a cloned reference to the `TaskRef` at the given `index`. + fn update_and_move_to_end(&mut self, index: usize, tokens: usize) -> Option { + if let Some(mut priority_task_ref) = self.queue.remove(index) { + priority_task_ref.tokens_remaining = tokens; + let task_ref = priority_task_ref.task.clone(); + self.queue.push_back(priority_task_ref); + Some(task_ref) + } else { + None + } + } -/// This defines the priority scheduler policy. -/// Returns None if there is no schedule-able task. -pub fn select_next_task(apic_id: u8) -> Option { - let next_task = select_next_task_priority(apic_id)?; - // If the selected task is idle task we begin a new scheduling epoch - if next_task.idle_task { - assign_tokens(apic_id); - select_next_task_priority(apic_id)?.taskref - } else { - next_task.taskref + fn try_next(&mut self) -> Option { + if let Some((task_index, _)) = self + .queue + .iter() + .enumerate() + .find(|(_, task)| task.is_runnable() && task.tokens_remaining > 0) + { + let chosen_task = self.queue.get(task_index).unwrap(); + let modified_tokens = chosen_task.tokens_remaining.saturating_sub(1); + self.update_and_move_to_end(task_index, modified_tokens) + } else { + None + } } -} -/// this defines the priority scheduler policy. -/// Returns None if there is no runqueue -/// Otherwise returns a task with a flag indicating whether its an idle task. -fn select_next_task_priority(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - // #[cfg(not(loscd_eval))] - // error!("BUG: select_next_task_priority(): couldn't get runqueue for core {}", - // apic_id); - return None; + fn assign_tokens(&mut self) { + // We begin with total priorities = 1 to avoid division by zero + let mut total_priorities: usize = 1; + + // This loop calculates the total priorities of the runqueue + for (_i, t) in self.queue.iter().enumerate() { + // we assign tokens only to runnable tasks + if !t.is_runnable() { + continue; + } + + total_priorities = total_priorities + .saturating_add(1) + .saturating_add(t.priority as usize); } - }; - - if let Some((task_index, _)) = runqueue_locked - .iter() - .enumerate() - .find(|(_, task)| task.is_runnable()) - { - let modified_tokens = { - let chosen_task = runqueue_locked.get(task_index); - match chosen_task.map(|m| m.tokens_remaining) { - Some(x) => x.saturating_sub(1), - None => 0, + + // Each epoch lasts for a total of 100 tokens by default. + // However, as this granularity could skip over low priority tasks + // when many concurrent tasks are running, we increase the epoch in such cases. + let epoch: usize = core::cmp::max(total_priorities, 100); + + for (_i, t) in self.queue.iter_mut().enumerate() { + // we give zero tokens to the idle tasks + if t.is_an_idle_task { + continue; } - }; - let task = runqueue_locked.update_and_move_to_end(task_index, modified_tokens); + // we give zero tokens to non-runnable tasks + if !t.is_runnable() { + continue; + } - Some(NextTaskResult { - taskref: task, - idle_task: false, - }) - } else { - Some(NextTaskResult { - taskref: Some(runqueue_locked.idle_task().clone()), - idle_task: true, - }) + // task_tokens = epoch * (taskref + 1) / total_priorities; + let task_tokens = epoch + .saturating_mul((t.priority as usize).saturating_add(1)) + .wrapping_div(total_priorities); + + t.tokens_remaining = task_tokens; + // debug!("assign_tokens(): CPU {} chose Task {:?}", cpu_id, &*t); + } } } -/// This assigns tokens between tasks. -/// Returns true if successful. -/// Tokens are assigned based on (prioirty of each task / prioirty of all -/// tasks). -fn assign_tokens(apic_id: u8) -> bool { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - // #[cfg(not(loscd_eval))] - // error!("BUG: assign_tokens(): couldn't get runqueue for core {}", apic_id); - return false; - } - }; +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + self.try_next() + .or_else(|| { + self.assign_tokens(); + self.try_next() + }) + .unwrap_or_else(|| self.idle_task.clone()) + } - // We begin with total priorities = 1 to avoid division by zero - let mut total_priorities: usize = 1; + fn add(&mut self, task: TaskRef) { + let priority_task_ref = EpochTaskRef::new(task); + self.queue.push_back(priority_task_ref); + } - // This loop calculates the total priorities of the runqueue - for (_i, t) in runqueue_locked.iter().enumerate() { - // we skip the idle task, it contains zero tokens as it is picked last - if t.is_an_idle_task { - continue; - } + fn busyness(&self) -> usize { + self.queue.len() + } - // we assign tokens only to runnable tasks - if !t.is_runnable() { - continue; + fn remove(&mut self, task: &TaskRef) -> bool { + let mut task_index = None; + for (i, t) in self.queue.iter().enumerate() { + if **t == *task { + task_index = Some(i); + break; + } } - // if this task is pinned, it must not be pinned to a different core - if let Some(pinned) = t.pinned_cpu() { - if pinned.into_u8() != apic_id { - // with per-core runqueues, this should never happen! - error!( - "select_next_task() (AP {}) found a task pinned to a different core: {:?}", - apic_id, t - ); - return false; - } + if let Some(task_index) = task_index { + self.queue.remove(task_index); + true + } else { + false } + } - total_priorities = total_priorities - .saturating_add(1) - .saturating_add(t.priority as usize); + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + Some(self) } - // We keep each epoch for 100 tokens by default - // However since this granularity could miss low priority tasks when - // many concurrent tasks are running, we increase the epoch in such cases - let epoch: usize = core::cmp::max(total_priorities, 100); + fn drain(&mut self) -> Box + '_> { + Box::new(self.queue.drain(..).map(|epoch_task| epoch_task.task)) + } - // We iterate through each task in runqueue - // We dont use iterator as items are modified in the process - for (_i, t) in runqueue_locked.iter_mut().enumerate() { - // we give zero tokens to the idle tasks - if t.is_an_idle_task { - continue; - } + fn tasks(&self) -> Vec { + self.queue + .clone() + .into_iter() + .map(|epoch_task| epoch_task.task) + .collect() + } +} - // we give zero tokens to none runnable tasks - if !t.is_runnable() { - continue; +impl task::scheduler::PriorityScheduler for Scheduler { + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { + let priority = core::cmp::min(priority, MAX_PRIORITY); + for epoch_task in self.queue.iter_mut() { + if epoch_task.task == *task { + epoch_task.priority = priority; + return true; + } } + false + } - // if this task is pinned, it must not be pinned to a different core - if let Some(pinned) = t.pinned_cpu() { - if pinned.into_u8() != apic_id { - // with per-core runqueues, this should never happen! - error!( - "select_next_task() (AP {}) found a task pinned to a different core: {:?}", - apic_id, &*t - ); - return false; + fn priority(&mut self, task: &TaskRef) -> Option { + for epoch_task in self.queue.iter() { + if epoch_task.task == *task { + return Some(epoch_task.priority); } } - // task_tokens = epoch * (taskref + 1) / total_priorities; - let task_tokens = epoch - .saturating_mul((t.priority as usize).saturating_add(1)) - .wrapping_div(total_priorities); + None + } +} + +#[derive(Debug, Clone)] +struct EpochTaskRef { + task: TaskRef, + priority: u8, + tokens_remaining: usize, +} + +impl Deref for EpochTaskRef { + type Target = TaskRef; + + fn deref(&self) -> &TaskRef { + &self.task + } +} - t.tokens_remaining = task_tokens; - // debug!("assign_tokens(): AP {} chose Task {:?}", apic_id, &*t); - // break; +impl DerefMut for EpochTaskRef { + fn deref_mut(&mut self) -> &mut TaskRef { + &mut self.task } +} - true +impl EpochTaskRef { + fn new(task: TaskRef) -> EpochTaskRef { + EpochTaskRef { + task, + priority: DEFAULT_PRIORITY, + tokens_remaining: INITIAL_TOKENS, + } + } } diff --git a/kernel/scheduler_priority/Cargo.toml b/kernel/scheduler_priority/Cargo.toml index 5965429131..6f452d1c46 100644 --- a/kernel/scheduler_priority/Cargo.toml +++ b/kernel/scheduler_priority/Cargo.toml @@ -7,7 +7,6 @@ edition = "2021" [dependencies] log = "0.4.8" -runqueue_priority = { path = "../runqueue_priority" } task = { path = "../task" } time = { path = "../time" } diff --git a/kernel/scheduler_priority/src/lib.rs b/kernel/scheduler_priority/src/lib.rs index d22d042de7..1c11e6b6b8 100644 --- a/kernel/scheduler_priority/src/lib.rs +++ b/kernel/scheduler_priority/src/lib.rs @@ -4,43 +4,156 @@ extern crate alloc; -use alloc::vec::Vec; -use log::error; -use runqueue_priority::RunQueue; +use alloc::{boxed::Box, collections::BinaryHeap, vec::Vec}; +use core::cmp::Ordering; + use task::TaskRef; +use time::Instant; + +const DEFAULT_PRIORITY: u8 = 0; + +pub struct Scheduler { + idle_task: TaskRef, + queue: BinaryHeap, +} -pub use runqueue_priority::{ - get_priority, inherit_priority, set_priority, PriorityInheritanceGuard, -}; - -/// This defines the priority scheduler policy. -/// Returns None if there is no schedule-able task -pub fn select_next_task(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - error!("BUG: select_next_task_priority(): couldn't get runqueue for core {apic_id}",); - return None; +impl Scheduler { + pub fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: BinaryHeap::new(), } - }; - - // This is a temporary solution before the PR to only store runnable tasks in - // the run queue is merged. - let mut blocked_tasks = Vec::with_capacity(2); - while let Some(mut task) = runqueue_locked.pop() { - if task.is_runnable() { - for t in blocked_tasks { - runqueue_locked.push(t) + } +} + +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + // This is a temporary solution before the PR to only store runnable tasks in + // the run queue is merged. + let mut blocked_tasks = Vec::with_capacity(2); + while let Some(mut task) = self.queue.pop() { + if task.task.is_runnable() { + for t in blocked_tasks { + self.queue.push(t) + } + task.last_ran = time::now::(); + self.queue.push(task.clone()); + return task.task; + } else { + blocked_tasks.push(task); } - task.last_ran = time::now::(); - runqueue_locked.push(task.clone()); - return Some(task.task); + } + for task in blocked_tasks { + self.queue.push(task); + } + self.idle_task.clone() + } + + fn add(&mut self, task: TaskRef) { + self.queue + .push(PriorityTaskRef::new(task, DEFAULT_PRIORITY)); + } + + fn busyness(&self) -> usize { + self.queue.len() + } + + fn remove(&mut self, task: &TaskRef) -> bool { + let old_len = self.queue.len(); + self.queue + .retain(|priority_task| priority_task.task != *task); + let new_len = self.queue.len(); + // We should have removed at most one task from the run queue. + debug_assert!( + old_len - new_len < 2, + "difference between run queue lengths was: {}", + old_len - new_len + ); + new_len != old_len + } + + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + Some(self) + } + + fn drain(&mut self) -> alloc::boxed::Box + '_> { + Box::new(self.queue.drain().map(|priority_task| priority_task.task)) + } + + fn tasks(&self) -> Vec { + self.queue + .clone() + .into_iter() + .map(|priority_task| priority_task.task) + .collect() + } +} + +impl task::scheduler::PriorityScheduler for Scheduler { + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool { + let previous_len = self.queue.len(); + self.queue.retain(|t| t.task != *task); + + if previous_len != self.queue.len() { + // We should have at most removed one task from the run queue. + debug_assert_eq!(self.queue.len() + 1, previous_len); + self.queue.push(PriorityTaskRef { + task: task.clone(), + priority, + // Not technically correct, but this will be reset next time it is run. + last_ran: Instant::ZERO, + }); + true } else { - blocked_tasks.push(task); + false + } + } + + fn priority(&mut self, task: &TaskRef) -> Option { + for priority_task in self.queue.iter() { + if priority_task.task == *task { + return Some(priority_task.priority); + } + } + None + } +} + +#[derive(Clone, Debug, Eq)] +struct PriorityTaskRef { + task: TaskRef, + priority: u8, + last_ran: Instant, +} + +impl PriorityTaskRef { + pub const fn new(task: TaskRef, priority: u8) -> Self { + Self { + task, + priority, + last_ran: Instant::ZERO, } } - for task in blocked_tasks { - runqueue_locked.push(task); +} + +impl PartialEq for PriorityTaskRef { + fn eq(&self, other: &Self) -> bool { + self.priority.eq(&other.priority) && self.last_ran.eq(&other.last_ran) + } +} + +impl PartialOrd for PriorityTaskRef { + fn partial_cmp(&self, other: &Self) -> Option { + match self.priority.cmp(&other.priority) { + // Tasks that were ran longer ago should be prioritised. + Ordering::Equal => Some(self.last_ran.cmp(&other.last_ran).reverse()), + ordering => Some(ordering), + } + } +} + +impl Ord for PriorityTaskRef { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.priority.cmp(&other.priority) } - Some(runqueue_locked.idle_task().clone()) } diff --git a/kernel/scheduler_round_robin/Cargo.toml b/kernel/scheduler_round_robin/Cargo.toml index 3563d77988..fe6e0b02a0 100644 --- a/kernel/scheduler_round_robin/Cargo.toml +++ b/kernel/scheduler_round_robin/Cargo.toml @@ -6,19 +6,9 @@ version = "0.1.0" edition = "2021" [dependencies] +log = "0.4.8" spin = "0.9.4" - -[dependencies.log] -version = "0.4.8" - -[dependencies.task] -path = "../task" - -[dependencies.runqueue] -path = "../runqueue" - -[dependencies.runqueue_round_robin] -path = "../runqueue_round_robin" +task = { path = "../task" } [lib] crate-type = ["rlib"] diff --git a/kernel/scheduler_round_robin/src/lib.rs b/kernel/scheduler_round_robin/src/lib.rs index cb85fee4dd..d022c7df6c 100644 --- a/kernel/scheduler_round_robin/src/lib.rs +++ b/kernel/scheduler_round_robin/src/lib.rs @@ -6,42 +6,74 @@ extern crate alloc; -use core::marker::PhantomData; +use alloc::{boxed::Box, collections::VecDeque, vec::Vec}; -use log::error; -use runqueue_round_robin::RunQueue; use task::TaskRef; -/// This defines the round robin scheduler policy. -/// Returns None if there is no schedule-able task -// TODO: Remove option? -// TODO: Return &'static TaskRef? -pub fn select_next_task(apic_id: u8) -> Option { - let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) { - Some(rq) => rq.write(), - _ => { - error!("BUG: select_next_task_round_robin(): couldn't get runqueue for core {apic_id}",); - return None; - } - }; +pub struct Scheduler { + idle_task: TaskRef, + queue: VecDeque, +} - if let Some((task_index, _)) = runqueue_locked - .iter() - .enumerate() - .find(|(_, task)| task.is_runnable()) - { - runqueue_locked.move_to_end(task_index) - } else { - Some(runqueue_locked.idle_task().clone()) +impl Scheduler { + pub const fn new(idle_task: TaskRef) -> Self { + Self { + idle_task, + queue: VecDeque::new(), + } } } -pub struct PriorityInheritanceGuard<'a> { - phantom: PhantomData<&'a ()>, -} +impl task::scheduler::Scheduler for Scheduler { + fn next(&mut self) -> TaskRef { + if let Some((task_index, _)) = self + .queue + .iter() + .enumerate() + .find(|(_, task)| task.is_runnable()) + { + let task = self.queue.swap_remove_front(task_index).unwrap(); + self.queue.push_back(task.clone()); + task + } else { + self.idle_task.clone() + } + } + + fn busyness(&self) -> usize { + self.queue.len() + } + + fn add(&mut self, task: TaskRef) { + self.queue.push_back(task); + } + + fn remove(&mut self, task: &TaskRef) -> bool { + let mut task_index = None; + for (i, t) in self.queue.iter().enumerate() { + if t == task { + task_index = Some(i); + break; + } + } + + if let Some(task_index) = task_index { + self.queue.remove(task_index); + true + } else { + false + } + } + + fn as_priority_scheduler(&mut self) -> Option<&mut dyn task::scheduler::PriorityScheduler> { + None + } + + fn drain(&mut self) -> Box + '_> { + Box::new(self.queue.drain(..)) + } -pub fn inherit_priority(_: &TaskRef) -> PriorityInheritanceGuard<'_> { - PriorityInheritanceGuard { - phantom: PhantomData, + fn tasks(&self) -> Vec { + self.queue.clone().into() } } diff --git a/kernel/spawn/Cargo.toml b/kernel/spawn/Cargo.toml index c3ce259cfa..1145da653b 100644 --- a/kernel/spawn/Cargo.toml +++ b/kernel/spawn/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] +cfg-if = "1.0.0" log = "0.4.8" spin = "0.9.4" lazy_static = { features = ["spin_no_std"], version = "1.4.0" } @@ -19,7 +20,6 @@ cpu = { path = "../cpu" } preemption = { path = "../preemption" } task = { path = "../task" } task_struct = { path = "../task_struct" } -runqueue = { path = "../runqueue" } scheduler = { path = "../scheduler" } mod_mgmt = { path = "../mod_mgmt" } context_switch = { path = "../context_switch" } @@ -29,6 +29,10 @@ thread_local_macro = { path = "../thread_local_macro" } no_drop = { path = "../no_drop" } early_tls = { path = "../early_tls" } +scheduler_epoch = { path = "../scheduler_epoch" } +scheduler_priority = { path = "../scheduler_priority" } +scheduler_round_robin = { path = "../scheduler_round_robin" } + [target.'cfg(target_arch = "x86_64")'.dependencies] fault_crate_swap = { path = "../fault_crate_swap" } catch_unwind = { path = "../catch_unwind" } diff --git a/kernel/spawn/src/lib.rs b/kernel/spawn/src/lib.rs index 043e6855e9..d0ff408b87 100755 --- a/kernel/spawn/src/lib.rs +++ b/kernel/spawn/src/lib.rs @@ -59,11 +59,17 @@ pub fn init( .spawn_restartable(None)? .clone(); - runqueue::init(cpu_id.into_u8(), idle_task)?; - runqueue::add_task_to_specific_runqueue( - cpu_id.into_u8(), - exitable_bootstrap_task.clone(), - )?; + cfg_if::cfg_if! { + if #[cfg(epoch_scheduler)] { + let scheduler = scheduler_epoch::Scheduler::new(idle_task); + } else if #[cfg(priority_scheduler)] { + let scheduler = scheduler_priority::Scheduler::new(idle_task); + } else { + let scheduler = scheduler_round_robin::Scheduler::new(idle_task); + } + } + task::scheduler::set_policy(cpu_id, scheduler); + task::scheduler::add_task_to(cpu_id, exitable_bootstrap_task.clone()); Ok(BootstrapTaskRef { cpu_id, @@ -439,9 +445,9 @@ impl TaskBuilder // Idle tasks are not stored on the run queue. if !self.idle { if let Some(cpu) = self.pin_on_cpu { - runqueue::add_task_to_specific_runqueue(cpu.into_u8(), task_ref.clone())?; + task::scheduler::add_task_to(cpu, task_ref.clone()); } else { - runqueue::add_task_to_any_runqueue(task_ref.clone())?; + task::scheduler::add_task(task_ref.clone()); } } @@ -877,7 +883,7 @@ fn task_restartable_cleanup_failure(current_task: ExitableTaskRef, kill #[inline(always)] fn task_cleanup_final_internal(current_task: &ExitableTaskRef) { // First, remove the task from its runqueue(s). - remove_current_task_from_runqueue(current_task); + task::scheduler::remove_task_from_current(current_task); // Second, run TLS object destructors, which will drop any TLS objects // that were lazily initialized during this execution of this task. @@ -994,21 +1000,7 @@ where /// Helper function to remove a task from its runqueue and drop it. fn remove_current_task_from_runqueue(current_task: &ExitableTaskRef) { - // Special behavior when evaluating runqueues - #[cfg(rq_eval)] { - runqueue::remove_task_from_all(current_task).unwrap(); - } - - // In the regular case, we do not perform task migration between cores, - // so we can use the heuristic that the task is only on the current core's runqueue. - #[cfg(not(rq_eval))] { - if let Err(e) = runqueue::get_runqueue(cpu::current_cpu().into_u8()) - .ok_or("couldn't get this CPU's ID or runqueue to remove exited task from it") - .and_then(|rq| rq.write().remove_task(current_task)) - { - error!("BUG: couldn't remove exited task from runqueue: {}", e); - } - } + task::scheduler::remove_task(current_task); } /// A basic idle task that does nothing but loop endlessly. diff --git a/kernel/task/Cargo.toml b/kernel/task/Cargo.toml index 2e0361ef4f..e16a670a14 100644 --- a/kernel/task/Cargo.toml +++ b/kernel/task/Cargo.toml @@ -23,5 +23,6 @@ no_drop = { path = "../no_drop" } preemption = { path = "../preemption" } stack = { path = "../stack" } sync_irq = { path = "../../libs/sync_irq" } +sync_preemption = { path = "../sync_preemption" } task_struct = { path = "../task_struct" } waker_generic = { path = "../waker_generic" } diff --git a/kernel/task/src/lib.rs b/kernel/task/src/lib.rs index b64af49d3a..2efd3947f6 100755 --- a/kernel/task/src/lib.rs +++ b/kernel/task/src/lib.rs @@ -26,9 +26,12 @@ #![no_std] #![feature(negative_impls)] #![feature(thread_local)] +#![feature(let_chains)] extern crate alloc; +pub mod scheduler; + use alloc::{ boxed::Box, collections::BTreeMap, @@ -45,7 +48,6 @@ use core::{ task::Waker, }; use cpu::CpuId; -use crossbeam_utils::atomic::AtomicCell; use irq_safety::hold_interrupts; use log::error; use environment::Environment; @@ -65,6 +67,7 @@ pub use task_struct::{ }; #[cfg(simd_personality)] pub use task_struct::SimdExt; +pub use scheduler::schedule; /// The list of all Tasks in the system. @@ -584,75 +587,6 @@ pub fn take_kill_handler() -> Option { .flatten() } - -pub use scheduler::*; -mod scheduler { - use super::*; - - /// Yields the current CPU by selecting a new `Task` to run next, - /// and then switches to that new `Task`. - /// - /// The new "next" `Task` to run will be selected by the currently-active - /// scheduler policy. - /// - /// Preemption will be disabled while this function runs, - /// but interrupts are not disabled because it is not necessary. - /// - /// ## Return - /// * `true` if a new task was selected and switched to. - /// * `false` if no new task was selected, - /// meaning the current task will continue running. - #[doc(alias("yield"))] - pub fn schedule() -> bool { - let preemption_guard = preemption::hold_preemption(); - // If preemption was not previously enabled (before we disabled it above), - // then we shouldn't perform a task switch here. - if !preemption_guard.preemption_was_enabled() { - // trace!("Note: preemption was disabled on CPU {}, skipping scheduler.", cpu::current_cpu()); - return false; - } - - let cpu_id = preemption_guard.cpu_id(); - - let Some(next_task) = (SELECT_NEXT_TASK_FUNC.load())(cpu_id.into_u8()) else { - return false; // keep running the same current task - }; - - let (did_switch, recovered_preemption_guard) = task_switch( - next_task, - cpu_id, - preemption_guard, - ); - - // trace!("AFTER TASK_SWITCH CALL (CPU {}) new current: {:?}, interrupts are {}", cpu_id, task::get_my_current_task(), irq_safety::interrupts_enabled()); - - drop(recovered_preemption_guard); - did_switch - } - - /// The signature for the function that selects the next task for the given CPU. - /// - /// This is used when the [`schedule()`] function is invoked. - pub type SchedulerFunc = fn(u8) -> Option; - - /// The function currently registered as the system-wide scheduler policy. - /// - /// This is initialized to a dummy function that returns no "next" task, - /// meaning that no scheduling will occur until it is initialized. - /// Currently, this is initialized from within `scheduler::init()`. - static SELECT_NEXT_TASK_FUNC: AtomicCell = AtomicCell::new(|_| None); - - /// Sets the active scheduler policy used by [`schedule()`] to select the next task. - /// - /// Currently, we only support one scheduler policy for the whole system, - /// but supporting different policies on a per-CPU, per-namespace, or per-arbitrary domain basis - /// would be a relatively simple immprovement. - pub fn set_scheduler_policy(select_next_task_func: SchedulerFunc) { - SELECT_NEXT_TASK_FUNC.store(select_next_task_func); - } -} - - /// Switches from the current task to the given `next` task. /// /// ## Arguments diff --git a/kernel/task/src/scheduler.rs b/kernel/task/src/scheduler.rs new file mode 100644 index 0000000000..404e86e199 --- /dev/null +++ b/kernel/task/src/scheduler.rs @@ -0,0 +1,300 @@ +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use core::ptr; + +use cpu::CpuId; +use spin::Mutex; +use sync_preemption::PreemptionSafeMutex; + +use crate::TaskRef; + +/// List of all the schedulers on the system. +/// +/// This is primarily used for spawning tasks, either to find the least busy CPU +/// or spawn a task pinned to a particular CPU. +/// +/// The outer mutex does not need to be preemption-safe, because it is never +/// accessed from `schedule`. In fact, ideally it would be a blocking mutex, but +/// that leads to circular dependencies. +static SCHEDULERS: Mutex)>> = Mutex::new(Vec::new()); + +/// A reference to the current CPUs scheduler. +/// +/// This isn't strictly necessary, but it greatly improves performance, as it +/// avoids having to lock the system-wide list of schedulers. +#[cls::cpu_local] +static SCHEDULER: Option> = None; + +type ConcurrentScheduler = PreemptionSafeMutex; + +/// Yields the current CPU by selecting a new `Task` to run next, +/// and then switches to that new `Task`. +/// +/// The new "next" `Task` to run will be selected by the currently-active +/// scheduler policy. +/// +/// Preemption will be disabled while this function runs, +/// but interrupts are not disabled because it is not necessary. +/// +/// ## Return +/// * `true` if a new task was selected and switched to. +/// * `false` if no new task was selected, meaning the current task will +/// continue running. +#[doc(alias("yield"))] +pub fn schedule() -> bool { + let preemption_guard = preemption::hold_preemption(); + // If preemption was not previously enabled (before we disabled it above), + // then we shouldn't perform a task switch here. + if !preemption_guard.preemption_was_enabled() { + // trace!("Note: preemption was disabled on CPU {}, skipping scheduler.", cpu::current_cpu()); + return false; + } + + let cpu_id = preemption_guard.cpu_id(); + + let next_task = SCHEDULER.update_guarded( + |scheduler| scheduler.as_ref().unwrap().lock().next(), + &preemption_guard, + ); + + let (did_switch, recovered_preemption_guard) = + super::task_switch(next_task, cpu_id, preemption_guard); + + // log::trace!("AFTER TASK_SWITCH CALL (CPU {}) new current: {:?}, interrupts are {}", cpu_id, super::get_my_current_task(), irq_safety::interrupts_enabled()); + + drop(recovered_preemption_guard); + did_switch +} + +/// Sets the scheduler policy for the given CPU. +pub fn set_policy(cpu_id: CpuId, scheduler: T) +where + T: Scheduler, +{ + let mutex = PreemptionSafeMutex::new(scheduler); + let scheduler = Arc::new(mutex); + + let mut locked = SCHEDULERS.lock(); + SCHEDULER.update(|current_scheduler| { + if let Some(old_scheduler) = current_scheduler { + let mut old_scheduler_index = None; + for (i, (cpu, scheduler)) in locked.iter().enumerate() { + if *cpu == cpu_id { + debug_assert!(ptr::eq(old_scheduler, scheduler)); + old_scheduler_index = Some(i); + break; + } + } + + if let Some(old_scheduler_index) = old_scheduler_index { + locked.swap_remove(old_scheduler_index); + } else { + log::error!("BUG: current scheduler not found in `SCHEDULERS`"); + } + + let mut new_scheduler = scheduler.lock(); + for task in old_scheduler.lock().drain() { + new_scheduler.add(task); + } + } + + locked.push((cpu_id, scheduler.clone() as _)); + *current_scheduler = Some(scheduler as _); + }); +} + +/// Adds the given task to the least busy run queue. +pub fn add_task(task: TaskRef) { + let locked = SCHEDULERS.lock(); + + let mut min_busyness = usize::MAX; + let mut least_busy_index = None; + + for (i, (_, scheduler)) in locked.iter().enumerate() { + let busyness = scheduler.lock().busyness(); + if busyness < min_busyness { + least_busy_index = Some(i); + min_busyness = busyness; + } + } + + locked[least_busy_index.unwrap()].1.lock().add(task); +} + +/// Adds the given task to the specified CPU's run queue. +pub fn add_task_to(cpu_id: CpuId, task: TaskRef) { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + scheduler.lock().add(task); + return; + } + } +} + +/// Adds the given task to the current CPU's run queue. +pub fn add_task_to_current(task: TaskRef) { + SCHEDULER.update(|scheduler| scheduler.as_ref().unwrap().lock().add(task)) +} + +/// Removes the given task from all run queues. +pub fn remove_task(task: &TaskRef) -> bool { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if scheduler.lock().remove(task) { + // A task will only be on one run queue. + return true; + } + } + false +} + +/// Removes the given task from the specified CPU's run queue. +pub fn remove_task_from(task: &TaskRef, cpu_id: CpuId) -> bool { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + return scheduler.lock().remove(task); + } + } + false +} + +/// Removes the given task from the current CPU's run queue. +pub fn remove_task_from_current(task: &TaskRef) -> bool { + SCHEDULER.update(|scheduler| scheduler.as_ref().unwrap().lock().remove(task)) +} + +/// A task scheduler. +pub trait Scheduler: Send + Sync + 'static { + /// Returns the next task to run. + fn next(&mut self) -> TaskRef; + + /// Adds a task to the run queue. + fn add(&mut self, task: TaskRef); + + /// Returns a measure of how busy the scheduler is, with higher values + /// representing a busier scheduler. + fn busyness(&self) -> usize; + + /// Removes a task from the run queue. + fn remove(&mut self, task: &TaskRef) -> bool; + + /// Returns a reference to this scheduler as a priority scheduler, if it is one. + fn as_priority_scheduler(&mut self) -> Option<&mut dyn PriorityScheduler>; + + /// Clears the scheduler's runqueue, returning an iterator over all contained tasks. + fn drain(&mut self) -> Box + '_>; + + /// Returns a cloned list of contained tasks being scheduled by this scheduler. + /// + /// The list should be considered out-of-date as soon as it is called, + /// but can be useful as a heuristic or for debugging. + fn tasks(&self) -> Vec; +} + +/// A task scheduler that supports some notion of priority. +pub trait PriorityScheduler { + /// Sets the priority of the given task. + fn set_priority(&mut self, task: &TaskRef, priority: u8) -> bool; + + /// Gets the priority of the given task. + fn priority(&mut self, task: &TaskRef) -> Option; +} + +/// Returns the priority of the given task. +/// +/// Returns `None` if the task is not on a priority run queue. +pub fn priority(task: &TaskRef) -> Option { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if let Some(priority) = scheduler + .lock() + .as_priority_scheduler() + .and_then(|priority_scheduler| priority_scheduler.priority(task)) + { + return Some(priority); + } + } + None +} + +/// Sets the priority of the given task. +/// +/// Returns `None` if the task is not on a priority run queue. +pub fn set_priority(task: &TaskRef, priority: u8) -> bool { + for (_, scheduler) in SCHEDULERS.lock().iter() { + if let Some(true) = scheduler + .lock() + .as_priority_scheduler() + .map(|priority_scheduler| priority_scheduler.set_priority(task, priority)) + { + return true; + } + } + false +} + +/// Returns the busyness of the scheduler on the given CPU, +/// in which higher values indicate a busier scheduler. +pub fn busyness(cpu_id: CpuId) -> Option { + for (cpu, scheduler) in SCHEDULERS.lock().iter() { + if *cpu == cpu_id { + return Some(scheduler.lock().busyness()); + } + } + None +} + +/// Modifies the given task's priority to be the maximum of its priority +/// and the current task's priority. +/// +/// Returns a guard which reverts the change when dropped. +pub fn inherit_priority(task: &TaskRef) -> PriorityInheritanceGuard<'_> { + let current_priority = super::with_current_task(priority).unwrap(); + let other_priority = priority(task); + + if let (Some(current_priority), Some(other_priority)) = + (current_priority, other_priority) && current_priority > other_priority + { + set_priority(task, current_priority); + } + + PriorityInheritanceGuard { + inner: if let (Some(current_priority), Some(other_priority)) = + (current_priority, other_priority) + && current_priority > other_priority + { + Some((task, other_priority)) + } else { + None + }, + } +} + +/// A guard that lowers a task's priority back to its previous value when dropped. +pub struct PriorityInheritanceGuard<'a> { + inner: Option<(&'a TaskRef, u8)>, +} +impl<'a> Drop for PriorityInheritanceGuard<'a> { + fn drop(&mut self) { + if let Some((task, priority)) = self.inner { + set_priority(task, priority); + } + } +} + +/// Returns the list of tasks running on each CPU. +/// +/// To avoid race conditions with migrating tasks, this function takes a lock +/// over all system schedulers. This is incredibly disruptive and should be +/// avoided at all costs. +pub fn tasks() -> Vec<(CpuId, Vec)> { + let schedulers = SCHEDULERS.lock().clone(); + let locked = schedulers + .iter() + .map(|(cpu, scheduler)| (cpu, scheduler.lock())) + // We eagerly evaluate so that all schedulers are actually locked. + .collect::>(); + let result = locked + .iter() + .map(|(cpu, locked_scheduler)| (**cpu, locked_scheduler.tasks())) + .collect(); + drop(locked); + result +} diff --git a/theseus_features/Cargo.toml b/theseus_features/Cargo.toml index 5eb9e46b0f..47bebb30aa 100644 --- a/theseus_features/Cargo.toml +++ b/theseus_features/Cargo.toml @@ -76,7 +76,6 @@ test_wasmtime = { path = "../applications/test_wasmtime", optional = true } bm = { path = "../applications/bm", optional = true } channel_eval = { path = "../applications/channel_eval", optional = true } heap_eval = { path = "../applications/heap_eval", optional = true } -rq_access_eval = { path = "../applications/rq_access_eval", optional = true } rq_eval = { path = "../applications/rq_eval", optional = true } scheduler_eval = { path = "../applications/scheduler_eval", optional = true } @@ -138,7 +137,6 @@ theseus_benchmarks = [ "bm", "channel_eval", "heap_eval", - "rq_access_eval", "rq_eval", "scheduler_eval", ]