Skip to content

Commit 18dc5f5

Browse files
authored
Don't store idle tasks on each runqueue (theseus-os#982)
* As part of our ongoing scheduler/runqueue redesign, there is no real reason to store each CPU's idle task in a scheduler runqueue. They are only used separately as a "last resort" when no runnable tasks exist, so we can store them alongside the runqueue rather than inside it. * Also, we change the creation of idle tasks to be done as part of initializing the tasking subsystem on each CPU rather than as an explicit step in the captain's init routine. * In the future, each CPU's idle task can be stored in CPU-local storage, since they ideologically "belong" to a CPU instance rather than a runqueue instance. * Unrelated: move `state_transfer` to old crates, as it currently doesn't build any more due to the ongoing scheduler/runqueue changes. Signed-off-by: Klimenty Tsoutsman <[email protected]>
1 parent a3b23da commit 18dc5f5

File tree

15 files changed

+104
-185
lines changed

15 files changed

+104
-185
lines changed

Cargo.lock

-17
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

kernel/ap_start/src/lib.rs

-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,6 @@ pub fn kstart_ap(
109109
// per-CPU storage, tasking, and create the idle task for this CPU.
110110
per_cpu::init(cpu_id).unwrap();
111111
let bootstrap_task = spawn::init(kernel_mmi_ref.clone(), cpu_id, this_ap_stack).unwrap();
112-
spawn::create_idle_task().unwrap();
113112

114113
// The PAT must be initialized explicitly on every CPU,
115114
// but it is not a fatal error if it doesn't exist.

kernel/captain/src/lib.rs

+3-5
Original file line numberDiff line numberDiff line change
@@ -212,13 +212,11 @@ pub fn init(
212212
// The following final initialization steps are important, and order matters:
213213
// 1. Drop any other local stack variables that still exist.
214214
drop(kernel_mmi_ref);
215-
// 2. Create the idle task for this CPU.
216-
spawn::create_idle_task()?;
217-
// 3. Cleanup bootstrap tasks, which handles this one and all other APs' bootstrap tasks.
215+
// 2. Cleanup bootstrap tasks, which handles this one and all other APs' bootstrap tasks.
218216
spawn::cleanup_bootstrap_tasks(cpu_count)?;
219-
// 4. "Finish" this bootstrap task, indicating it has exited and no longer needs to run.
217+
// 3. "Finish" this bootstrap task, indicating it has exited and no longer needs to run.
220218
bootstrap_task.finish();
221-
// 5. Enable interrupts such that other tasks can be scheduled in.
219+
// 4. Enable interrupts such that other tasks can be scheduled in.
222220
enable_interrupts();
223221
// ****************************************************
224222
// NOTE: nothing below here is guaranteed to run again!

kernel/runqueue/src/lib.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ use runqueue::RunQueue;
2828

2929

3030
/// Creates a new `RunQueue` for the given core, which is an `apic_id`.
31-
pub fn init(which_core: u8) -> Result<(), &'static str> {
32-
RunQueue::init(which_core)
31+
pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> {
32+
RunQueue::init(which_core, idle_task)
3333
}
3434

3535
/// Returns the `RunQueue` of the given core, which is an `apic_id`.

kernel/runqueue_epoch/src/lib.rs

+7-1
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ static RUNQUEUES: AtomicMap<u8, PreemptionSafeRwLock<RunQueue>> = AtomicMap::new
6868
pub struct RunQueue {
6969
core: u8,
7070
queue: VecDeque<EpochTaskRef>,
71+
idle_task: TaskRef,
7172
}
7273

7374
impl Deref for RunQueue {
@@ -102,12 +103,13 @@ impl RunQueue {
102103
}
103104

104105
/// Creates a new `RunQueue` for the given core, which is an `apic_id`
105-
pub fn init(which_core: u8) -> Result<(), &'static str> {
106+
pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> {
106107
#[cfg(not(loscd_eval))]
107108
trace!("Created runqueue (priority) for core {}", which_core);
108109
let new_rq = PreemptionSafeRwLock::new(RunQueue {
109110
core: which_core,
110111
queue: VecDeque::new(),
112+
idle_task,
111113
});
112114

113115
if RUNQUEUES.insert(which_core, new_rq).is_some() {
@@ -226,6 +228,10 @@ impl RunQueue {
226228
Ok(())
227229
}
228230

231+
pub fn idle_task(&self) -> &TaskRef {
232+
&self.idle_task
233+
}
234+
229235
fn get_priority(&self, task: &TaskRef) -> Option<u8> {
230236
for epoch_task in self.iter() {
231237
if &epoch_task.task == task {

kernel/runqueue_priority/src/lib.rs

+7-1
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ static RUNQUEUES: AtomicMap<u8, PreemptionSafeRwLock<RunQueue>> = AtomicMap::new
9494
pub struct RunQueue {
9595
core: u8,
9696
queue: VecDeque<PriorityTaskRef>,
97+
idle_task: TaskRef,
9798
}
9899

99100
impl Deref for RunQueue {
@@ -134,12 +135,13 @@ impl RunQueue {
134135
}
135136

136137
/// Creates a new `RunQueue` for the given core, which is an `apic_id`
137-
pub fn init(which_core: u8) -> Result<(), &'static str> {
138+
pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> {
138139
#[cfg(not(loscd_eval))]
139140
trace!("Created runqueue (priority) for core {}", which_core);
140141
let new_rq = PreemptionSafeRwLock::new(RunQueue {
141142
core: which_core,
142143
queue: VecDeque::new(),
144+
idle_task,
143145
});
144146

145147
if RUNQUEUES.insert(which_core, new_rq).is_some() {
@@ -262,6 +264,10 @@ impl RunQueue {
262264
Ok(())
263265
}
264266

267+
pub fn idle_task(&self) -> &TaskRef {
268+
&self.idle_task
269+
}
270+
265271
/// The internal function that sets the periodicity of a given `Task` in a single `RunQueue`
266272
/// then reinserts the `PriorityTaskRef` at the proper location
267273
fn set_periodicity_internal(

kernel/runqueue_round_robin/src/lib.rs

+9-1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ pub struct RoundRobinTaskRef{
4949

5050
impl Deref for RoundRobinTaskRef {
5151
type Target = TaskRef;
52+
5253
fn deref(&self) -> &TaskRef {
5354
&self.taskref
5455
}
@@ -87,6 +88,7 @@ pub static RUNQUEUES: AtomicMap<u8, PreemptionSafeRwLock<RunQueue>> = AtomicMap:
8788
#[derive(Debug)]
8889
pub struct RunQueue {
8990
core: u8,
91+
idle_task: TaskRef,
9092
queue: VecDeque<RoundRobinTaskRef>,
9193
}
9294
// impl Drop for RunQueue {
@@ -97,6 +99,7 @@ pub struct RunQueue {
9799

98100
impl Deref for RunQueue {
99101
type Target = VecDeque<RoundRobinTaskRef>;
102+
100103
fn deref(&self) -> &VecDeque<RoundRobinTaskRef> {
101104
&self.queue
102105
}
@@ -121,10 +124,11 @@ impl RunQueue {
121124
}
122125

123126
/// Creates a new `RunQueue` for the given core, which is an `apic_id`.
124-
pub fn init(which_core: u8) -> Result<(), &'static str> {
127+
pub fn init(which_core: u8, idle_task: TaskRef) -> Result<(), &'static str> {
125128
trace!("Created runqueue (round robin) for core {}", which_core);
126129
let new_rq = PreemptionSafeRwLock::new(RunQueue {
127130
core: which_core,
131+
idle_task,
128132
queue: VecDeque::new(),
129133
});
130134

@@ -138,6 +142,10 @@ impl RunQueue {
138142
}
139143
}
140144

145+
pub fn idle_task(&self) -> &TaskRef {
146+
&self.idle_task
147+
}
148+
141149
/// Returns the `RunQueue` for the given core, which is an `apic_id`.
142150
pub fn get_runqueue(which_core: u8) -> Option<&'static PreemptionSafeRwLock<RunQueue>> {
143151
RUNQUEUES.get(&which_core)

kernel/scheduler_epoch/src/lib.rs

+29-73
Original file line numberDiff line numberDiff line change
@@ -39,31 +39,18 @@ pub fn get_priority(task: &TaskRef) -> Option<u8> {
3939
/// This defines the priority scheduler policy.
4040
/// Returns None if there is no schedule-able task.
4141
pub fn select_next_task(apic_id: u8) -> Option<TaskRef> {
42-
let priority_taskref_with_result = select_next_task_priority(apic_id);
43-
match priority_taskref_with_result {
44-
// A task has been selected
45-
Some(task) => {
46-
// If the selected task is idle task we begin a new scheduling epoch
47-
if task.idle_task {
48-
assign_tokens(apic_id);
49-
select_next_task_priority(apic_id).and_then(|m| m.taskref)
50-
}
51-
// If the selected task is not idle we return the taskref
52-
else {
53-
task.taskref
54-
}
55-
}
56-
57-
// If no task is picked we pick a new scheduling epoch
58-
None => {
59-
assign_tokens(apic_id);
60-
select_next_task_priority(apic_id).and_then(|m| m.taskref)
61-
}
42+
let next_task = select_next_task_priority(apic_id)?;
43+
// If the selected task is idle task we begin a new scheduling epoch
44+
if next_task.idle_task {
45+
assign_tokens(apic_id);
46+
select_next_task_priority(apic_id)?.taskref
47+
} else {
48+
next_task.taskref
6249
}
6350
}
6451

6552
/// this defines the priority scheduler policy.
66-
/// Returns None if there is no schedule-able task.
53+
/// Returns None if there is no runqueue
6754
/// Otherwise returns a task with a flag indicating whether its an idle task.
6855
fn select_next_task_priority(apic_id: u8) -> Option<NextTaskResult> {
6956
let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) {
@@ -76,62 +63,31 @@ fn select_next_task_priority(apic_id: u8) -> Option<NextTaskResult> {
7663
}
7764
};
7865

79-
let mut idle_task_index: Option<usize> = None;
80-
let mut chosen_task_index: Option<usize> = None;
81-
let mut idle_task = true;
82-
83-
for (i, t) in runqueue_locked.iter().enumerate() {
84-
// we skip the idle task, and only choose it if no other tasks are runnable
85-
if t.is_an_idle_task {
86-
idle_task_index = Some(i);
87-
continue;
88-
}
89-
90-
// must be runnable
91-
if !t.is_runnable() {
92-
continue;
93-
}
94-
95-
// if this task is pinned, it must not be pinned to a different core
96-
if let Some(pinned) = t.pinned_cpu() {
97-
if pinned.into_u8() != apic_id {
98-
// with per-core runqueues, this should never happen!
99-
error!(
100-
"select_next_task() (AP {}) found a task pinned to a different core: {:?}",
101-
apic_id, t
102-
);
103-
return None;
66+
if let Some((task_index, _)) = runqueue_locked
67+
.iter()
68+
.enumerate()
69+
.find(|(_, task)| task.is_runnable())
70+
{
71+
let modified_tokens = {
72+
let chosen_task = runqueue_locked.get(task_index);
73+
match chosen_task.map(|m| m.tokens_remaining) {
74+
Some(x) => x.saturating_sub(1),
75+
None => 0,
10476
}
105-
}
106-
107-
// if the task has no remaining tokens we ignore the task
108-
if t.tokens_remaining == 0 {
109-
continue;
110-
}
111-
112-
// found a runnable task!
113-
chosen_task_index = Some(i);
114-
idle_task = false;
115-
// debug!("select_next_task(): AP {} chose Task {:?}", apic_id, &*t);
116-
break;
117-
}
77+
};
11878

119-
// We then reduce the number of tokens of the task by one
120-
let modified_tokens = {
121-
let chosen_task = chosen_task_index.and_then(|index| runqueue_locked.get(index));
122-
match chosen_task.map(|m| m.tokens_remaining) {
123-
Some(x) => x.saturating_sub(1),
124-
None => 0,
125-
}
126-
};
79+
let task = runqueue_locked.update_and_move_to_end(task_index, modified_tokens);
12780

128-
chosen_task_index
129-
.or(idle_task_index)
130-
.and_then(|index| runqueue_locked.update_and_move_to_end(index, modified_tokens))
131-
.map(|taskref| NextTaskResult {
132-
taskref: Some(taskref),
133-
idle_task,
81+
Some(NextTaskResult {
82+
taskref: task,
83+
idle_task: false,
13484
})
85+
} else {
86+
Some(NextTaskResult {
87+
taskref: Some(runqueue_locked.idle_task().clone()),
88+
idle_task: true,
89+
})
90+
}
13591
}
13692

13793
/// This assigns tokens between tasks.

kernel/scheduler_priority/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ authors = ["Jacob Earle <[email protected]>"]
33
name = "scheduler_priority"
44
description = "Provides a priority scheduler"
55
version = "0.1.0"
6+
edition = "2021"
67

78
[dependencies.log]
89
version = "0.4.8"

kernel/scheduler_priority/src/lib.rs

+14-32
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
//! This scheduler implements a priority algorithm.
22
//!
3-
//! Because the [`runqueue_priority::RunQueue`] internally sorts the tasks
4-
//! in increasing order of periodicity, it's trivially easy to choose the next task.
3+
//! Because the [`runqueue_priority::RunQueue`] internally sorts the tasks
4+
//! in increasing order of periodicity, it's trivially easy to choose the next
5+
//! task.
56
67
#![no_std]
78

89
extern crate alloc;
9-
#[macro_use] extern crate log;
10-
extern crate task;
11-
extern crate runqueue_priority;
1210

13-
use task::TaskRef;
11+
use log::error;
1412
use runqueue_priority::RunQueue;
13+
use task::TaskRef;
1514

1615
/// Set the periodicity of a given `Task` in all `RunQueue` structures.
1716
/// A reexport of the set_periodicity function from runqueue_priority
@@ -23,35 +22,18 @@ pub fn select_next_task(apic_id: u8) -> Option<TaskRef> {
2322
let mut runqueue_locked = match RunQueue::get_runqueue(apic_id) {
2423
Some(rq) => rq.write(),
2524
_ => {
26-
error!("BUG: select_next_task_round_robin(): couldn't get runqueue for core {}", apic_id);
25+
error!("BUG: select_next_task_priority(): couldn't get runqueue for core {apic_id}",);
2726
return None;
2827
}
2928
};
3029

31-
let mut idle_task_index: Option<usize> = None;
32-
let mut chosen_task_index: Option<usize> = None;
33-
34-
for (i, taskref) in runqueue_locked.iter().enumerate() {
35-
let t = taskref;
36-
37-
// we skip the idle task, and only choose it if no other tasks are runnable
38-
if t.is_an_idle_task {
39-
idle_task_index = Some(i);
40-
continue;
41-
}
42-
43-
// must be runnable
44-
if !t.is_runnable() {
45-
continue;
46-
}
47-
48-
// found a runnable task
49-
chosen_task_index = Some(i);
50-
break;
30+
if let Some((task_index, _)) = runqueue_locked
31+
.iter()
32+
.enumerate()
33+
.find(|(_, task)| task.is_runnable())
34+
{
35+
runqueue_locked.update_and_reinsert(task_index)
36+
} else {
37+
Some(runqueue_locked.idle_task().clone())
5138
}
52-
53-
// idle task is backup iff no other task has been chosen
54-
chosen_task_index
55-
.or(idle_task_index)
56-
.and_then(|index| runqueue_locked.update_and_reinsert(index))
5739
}

kernel/scheduler_round_robin/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ authors = ["Kevin Boos <[email protected]>"]
33
name = "scheduler_round_robin"
44
description = "Provides Round robin scheduling functionality and picks the next task"
55
version = "0.1.0"
6+
edition = "2021"
67

78
[dependencies]
89
spin = "0.9.4"

0 commit comments

Comments
 (0)