-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsched.rs
401 lines (376 loc) · 16.7 KB
/
sched.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
//! Tock core scheduler.
use core::cell::Cell;
use core::ptr;
use core::ptr::NonNull;
use callback;
use callback::{AppId, Callback};
use capabilities;
use common::cells::NumericCellExt;
use grant::Grant;
use ipc;
use mem::AppSlice;
use memop;
use platform::mpu::MPU;
use platform::systick::SysTick;
use platform::{Chip, Platform};
use process::{self, Task};
use returncode::ReturnCode;
use syscall::{ContextSwitchReason, Syscall};
/// The time a process is permitted to run before being pre-empted
const KERNEL_TICK_DURATION_US: u32 = 10000;
/// Skip re-scheduling a process if its quanta is nearly exhausted
const MIN_QUANTA_THRESHOLD_US: u32 = 500;
/// Main object for the kernel. Each board will need to create one.
pub struct Kernel {
/// How many "to-do" items exist at any given time. These include
/// outstanding callbacks and processes in the Running state.
work: Cell<usize>,
/// This holds a pointer to the static array of Process pointers.
processes: &'static [Option<&'static process::ProcessType>],
/// How many grant regions have been setup. This is incremented on every
/// call to `create_grant()`. We need to explicitly track this so that when
/// processes are created they can allocated pointers for each grant.
grant_counter: Cell<usize>,
/// Flag to mark that grants have been finalized. This means that the kernel
/// cannot support creating new grants because processes have already been
/// created and the data structures for grants have already been
/// established.
grants_finalized: Cell<bool>,
}
impl Kernel {
pub fn new(processes: &'static [Option<&'static process::ProcessType>]) -> Kernel {
Kernel {
work: Cell::new(0),
processes: processes,
grant_counter: Cell::new(0),
grants_finalized: Cell::new(false),
}
}
/// Something was scheduled for a process, so there is more work to do.
crate fn increment_work(&self) {
self.work.increment();
}
/// Something finished for a process, so we decrement how much work there is
/// to do.
crate fn decrement_work(&self) {
self.work.decrement();
}
/// Helper function for determining if we should service processes or go to
/// sleep.
fn processes_blocked(&self) -> bool {
self.work.get() == 0
}
/// Run a closure on a specific process if it exists. If the process does
/// not exist (i.e. it is `None` in the `processes` array) then `default`
/// will be returned. Otherwise the closure will executed and passed a
/// reference to the process.
crate fn process_map_or<F, R>(&self, default: R, process_index: usize, closure: F) -> R
where
F: FnOnce(&process::ProcessType) -> R,
{
if process_index > self.processes.len() {
return default;
}
self.processes[process_index].map_or(default, |process| closure(process))
}
/// Run a closure on every valid process. This will iterate the array of
/// processes and call the closure on every process that exists.
crate fn process_each_enumerate<F>(&self, closure: F)
where
F: Fn(usize, &process::ProcessType),
{
for (i, process) in self.processes.iter().enumerate() {
match process {
Some(p) => {
closure(i, *p);
}
None => {}
}
}
}
/// Run a closure on every process, but only continue if the closure returns
/// `FAIL`. That is, if the closure returns any other return code than
/// `FAIL`, that value will be returned from this function and the iteration
/// of the array of processes will stop.
crate fn process_each_enumerate_stop<F>(&self, closure: F) -> ReturnCode
where
F: Fn(usize, &process::ProcessType) -> ReturnCode,
{
for (i, process) in self.processes.iter().enumerate() {
match process {
Some(p) => {
let ret = closure(i, *p);
if ret != ReturnCode::FAIL {
return ret;
}
}
None => {}
}
}
ReturnCode::FAIL
}
/// Return how many processes this board supports.
crate fn number_of_process_slots(&self) -> usize {
self.processes.len()
}
/// Create a new grant. This is used in board initialization to setup grants
/// that capsules use to interact with processes.
///
/// Grants **must** only be created _before_ processes are initialized.
/// Processes use the number of grants that have been allocated to correctly
/// initialize the process's memory with a pointer for each grant. If a
/// grant is created after processes are initialized this will panic.
///
/// Calling this function is restricted to only certain users, and to
/// enforce this calling this function requires the
/// `MemoryAllocationCapability` capability.
pub fn create_grant<T: Default>(
&'static self,
_capability: &capabilities::MemoryAllocationCapability,
) -> Grant<T> {
if self.grants_finalized.get() {
panic!("Grants finalized. Cannot create a new grant.");
}
// Create and return a new grant.
let grant_index = self.grant_counter.get();
self.grant_counter.increment();
Grant::new(self, grant_index)
}
/// Returns the number of grants that have been setup in the system and
/// marks the grants as "finalized". This means that no more grants can
/// be created because data structures have been setup based on the number
/// of grants when this function is called.
///
/// In practice, this is called when processes are created, and the process
/// memory is setup based on the number of current grants.
crate fn get_grant_count_and_finalize(&self) -> usize {
self.grants_finalized.set(true);
self.grant_counter.get()
}
/// Cause all apps to fault.
///
/// This will call `set_fault_state()` on each app, causing the app to enter
/// the state as if it had crashed (for example with an MPU violation). If
/// the process is configured to be restarted it will be.
///
/// Only callers with the `ProcessManagementCapability` can call this
/// function. This restricts general capsules from being able to call this
/// function, since capsules should not be able to arbitrarily restart all
/// apps.
pub fn hardfault_all_apps<C: capabilities::ProcessManagementCapability>(&self, _c: &C) {
for p in self.processes.iter() {
p.map(|process| {
process.set_fault_state();
});
}
}
/// Main loop.
pub fn kernel_loop<P: Platform, C: Chip>(
&'static self,
platform: &P,
chip: &C,
ipc: Option<&ipc::IPC>,
_capability: &capabilities::MainLoopCapability,
) {
loop {
unsafe {
chip.service_pending_interrupts();
for (i, p) in self.processes.iter().enumerate() {
p.map(|process| {
self.do_process(
platform,
chip,
process,
callback::AppId::new(self, i),
ipc,
);
});
if chip.has_pending_interrupts() {
break;
}
}
chip.atomic(|| {
if !chip.has_pending_interrupts() && self.processes_blocked() {
chip.sleep();
}
});
};
}
}
unsafe fn do_process<P: Platform, C: Chip>(
&self,
platform: &P,
chip: &C,
process: &process::ProcessType,
appid: AppId,
ipc: Option<&::ipc::IPC>,
) {
let systick = chip.systick();
systick.reset();
systick.set_timer(KERNEL_TICK_DURATION_US);
systick.enable(true);
loop {
if chip.has_pending_interrupts()
|| systick.overflowed()
|| !systick.greater_than(MIN_QUANTA_THRESHOLD_US)
{
break;
}
match process.get_state() {
process::State::Running => {
// Running means that this process expects to be running,
// so go ahead and set things up and switch to executing
// the process.
process.setup_mpu();
chip.mpu().enable_mpu();
systick.enable(true);
let context_switch_reason = process.switch_to();
systick.enable(false);
chip.mpu().disable_mpu();
// Now the process has returned back to the kernel. Check
// why and handle the process as appropriate.
match context_switch_reason {
Some(ContextSwitchReason::Fault) => {
// Let process deal with it as appropriate.
process.set_fault_state();
}
Some(ContextSwitchReason::SyscallFired) => {
// Handle each of the syscalls.
match process.get_syscall() {
Some(Syscall::MEMOP { operand, arg0 }) => {
let res = memop::memop(process, operand, arg0);
process.set_syscall_return_value(res.into());
}
Some(Syscall::YIELD) => {
process.set_yielded_state();
process.pop_syscall_stack_frame();
// There might be already enqueued callbacks
continue;
}
Some(Syscall::SUBSCRIBE {
driver_number,
subdriver_number,
callback_ptr,
appdata,
}) => {
let callback_ptr = NonNull::new(callback_ptr);
let callback = callback_ptr
.map(|ptr| Callback::new(appid, appdata, ptr.cast()));
let res =
platform.with_driver(
driver_number,
|driver| match driver {
Some(d) => {
d.subscribe(subdriver_number, callback, appid)
}
None => ReturnCode::ENODEVICE,
},
);
process.set_syscall_return_value(res.into());
}
Some(Syscall::COMMAND {
driver_number,
subdriver_number,
arg0,
arg1,
}) => {
let res =
platform.with_driver(
driver_number,
|driver| match driver {
Some(d) => {
d.command(subdriver_number, arg0, arg1, appid)
}
None => ReturnCode::ENODEVICE,
},
);
process.set_syscall_return_value(res.into());
}
Some(Syscall::ALLOW {
driver_number,
subdriver_number,
allow_address,
allow_size,
}) => {
let res = platform.with_driver(driver_number, |driver| {
match driver {
Some(d) => {
if allow_address != ptr::null_mut() {
if process.in_app_owned_memory(
allow_address,
allow_size,
) {
let slice = AppSlice::new(
allow_address,
allow_size,
appid,
);
d.allow(
appid,
subdriver_number,
Some(slice),
)
} else {
ReturnCode::EINVAL /* memory not allocated to process */
}
} else {
d.allow(appid, subdriver_number, None)
}
}
None => ReturnCode::ENODEVICE,
}
});
process.set_syscall_return_value(res.into());
}
_ => {}
}
}
Some(ContextSwitchReason::TimesliceExpired) => {
// break to handle other processes.
break;
}
Some(ContextSwitchReason::Interrupted) => {
// break to handle other processes.
break;
}
None => {
// Something went wrong when switching to this
// process. Indicate this by putting it in a fault
// state.
process.set_fault_state();
}
}
}
process::State::Yielded => match process.dequeue_task() {
// If the process is yielded it might be waiting for a
// callback. If there is a task scheduled for this process
// go ahead and set the process to execute it.
None => break,
Some(cb) => match cb {
Task::FunctionCall(ccb) => {
process.push_function_call(ccb);
}
Task::IPC((otherapp, ipc_type)) => {
ipc.map_or_else(
|| {
assert!(
false,
"Kernel consistency error: IPC Task with no IPC"
);
},
|ipc| {
ipc.schedule_callback(appid, otherapp, ipc_type);
},
);
}
},
},
process::State::Fault => {
// We should never be scheduling a process in fault.
panic!("Attempted to schedule a faulty process");
}
}
}
systick.reset();
}
}