kernel/process_standard.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Tock default Process implementation.
6//!
7//! `ProcessStandard` is an implementation for a userspace process running on
8//! the Tock kernel.
9
10use core::cell::Cell;
11use core::cmp;
12use core::fmt::Write;
13use core::num::NonZeroU32;
14use core::ptr::NonNull;
15use core::{mem, ptr, slice, str};
16
17use crate::collections::queue::Queue;
18use crate::collections::ring_buffer::RingBuffer;
19use crate::config;
20use crate::debug;
21use crate::errorcode::ErrorCode;
22use crate::kernel::Kernel;
23use crate::platform::chip::Chip;
24use crate::platform::mpu::{self, MPU};
25use crate::process::BinaryVersion;
26use crate::process::ProcessBinary;
27use crate::process::{Error, FunctionCall, FunctionCallSource, Process, Task};
28use crate::process::{FaultAction, ProcessCustomGrantIdentifier, ProcessId};
29use crate::process::{ProcessAddresses, ProcessSizes, ShortId};
30use crate::process::{State, StoppedState};
31use crate::process_checker::AcceptedCredential;
32use crate::process_loading::ProcessLoadError;
33use crate::process_policies::ProcessFaultPolicy;
34use crate::process_policies::ProcessStandardStoragePermissionsPolicy;
35use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer};
36use crate::storage_permissions::StoragePermissions;
37use crate::syscall::{self, Syscall, SyscallReturn, UserspaceKernelBoundary};
38use crate::upcall::UpcallId;
39use crate::utilities::capability_ptr::{CapabilityPtr, CapabilityPtrPermissions};
40use crate::utilities::cells::{MapCell, NumericCellExt, OptionalCell};
41
42use tock_tbf::types::CommandPermissions;
43
44/// Interface supported by [`ProcessStandard`] for recording debug information.
45///
46/// This trait provides flexibility to users of [`ProcessStandard`] to determine
47/// how debugging information should be recorded, or if debugging information
48/// should be recorded at all.
49///
50/// Platforms that want to only maintain certain debugging information can
51/// implement only part of this trait.
52///
53/// Tock provides a default implementation of this trait on the `()` type.
54/// Kernels that wish to use [`ProcessStandard`] but do not need process-level
55/// debugging information can use `()` as the `ProcessStandardDebug` type.
56pub trait ProcessStandardDebug: Default {
57 /// Record the address in flash the process expects to start at.
58 fn set_fixed_address_flash(&self, address: u32);
59 /// Get the address in flash the process expects to start at, if it was
60 /// recorded.
61 fn get_fixed_address_flash(&self) -> Option<u32>;
62 /// Record the address in RAM the process expects to start at.
63 fn set_fixed_address_ram(&self, address: u32);
64 /// Get the address in RAM the process expects to start at, if it was
65 /// recorded.
66 fn get_fixed_address_ram(&self) -> Option<u32>;
67 /// Record the address where the process placed its heap.
68 fn set_app_heap_start_pointer(&self, ptr: *const u8);
69 /// Get the address where the process placed its heap, if it was recorded.
70 fn get_app_heap_start_pointer(&self) -> Option<*const u8>;
71 /// Record the address where the process placed its stack.
72 fn set_app_stack_start_pointer(&self, ptr: *const u8);
73 /// Get the address where the process placed its stack, if it was recorded.
74 fn get_app_stack_start_pointer(&self) -> Option<*const u8>;
75 /// Update the lowest address that the process's stack has reached.
76 fn set_app_stack_min_pointer(&self, ptr: *const u8);
77 /// Get the lowest address of the process's stack , if it was recorded.
78 fn get_app_stack_min_pointer(&self) -> Option<*const u8>;
79 /// Provide the current address of the bottom of the stack and record the
80 /// address if it is the lowest address that the process's stack has
81 /// reached.
82 fn set_new_app_stack_min_pointer(&self, ptr: *const u8);
83
84 /// Record the most recent system call the process called.
85 fn set_last_syscall(&self, syscall: Syscall);
86 /// Get the most recent system call the process called, if it was recorded.
87 fn get_last_syscall(&self) -> Option<Syscall>;
88 /// Clear any record of the most recent system call the process called.
89 fn reset_last_syscall(&self);
90
91 /// Increase the recorded count of the number of system calls the process
92 /// has called.
93 fn increment_syscall_count(&self);
94 /// Get the recorded count of the number of system calls the process has
95 /// called.
96 ///
97 /// This should return 0 if
98 /// [`ProcessStandardDebug::increment_syscall_count()`] is never called.
99 fn get_syscall_count(&self) -> usize;
100 /// Reset the recorded count of the number of system calls called by the app
101 /// to 0.
102 fn reset_syscall_count(&self);
103
104 /// Increase the recorded count of the number of upcalls that have been
105 /// dropped for the process.
106 fn increment_dropped_upcall_count(&self);
107 /// Get the recorded count of the number of upcalls that have been dropped
108 /// for the process.
109 ///
110 /// This should return 0 if
111 /// [`ProcessStandardDebug::increment_dropped_upcall_count()`] is never
112 /// called.
113 fn get_dropped_upcall_count(&self) -> usize;
114 /// Reset the recorded count of the number of upcalls that have been dropped
115 /// for the process to 0.
116 fn reset_dropped_upcall_count(&self);
117
118 /// Increase the recorded count of the number of times the process has
119 /// exceeded its timeslice.
120 fn increment_timeslice_expiration_count(&self);
121 /// Get the recorded count of the number times the process has exceeded its
122 /// timeslice.
123 ///
124 /// This should return 0 if
125 /// [`ProcessStandardDebug::increment_timeslice_expiration_count()`] is
126 /// never called.
127 fn get_timeslice_expiration_count(&self) -> usize;
128 /// Reset the recorded count of the number of the process has exceeded its
129 /// timeslice to 0.
130 fn reset_timeslice_expiration_count(&self);
131}
132
133/// A debugging implementation for [`ProcessStandard`] that records the full
134/// debugging state.
135pub struct ProcessStandardDebugFull {
136 /// Inner field for the debug state that is in a [`MapCell`] to provide
137 /// mutable access.
138 debug: MapCell<ProcessStandardDebugFullInner>,
139}
140
141/// Struct for debugging [`ProcessStandard`] processes that records the full set
142/// of debugging information.
143///
144/// These pointers and counters are not strictly required for kernel operation,
145/// but provide helpful information when an app crashes.
146#[derive(Default)]
147struct ProcessStandardDebugFullInner {
148 /// If this process was compiled for fixed addresses, save the address
149 /// it must be at in flash. This is useful for debugging and saves having
150 /// to re-parse the entire TBF header.
151 fixed_address_flash: Option<u32>,
152
153 /// If this process was compiled for fixed addresses, save the address
154 /// it must be at in RAM. This is useful for debugging and saves having
155 /// to re-parse the entire TBF header.
156 fixed_address_ram: Option<u32>,
157
158 /// Where the process has started its heap in RAM.
159 app_heap_start_pointer: Option<*const u8>,
160
161 /// Where the start of the stack is for the process. If the kernel does the
162 /// PIC setup for this app then we know this, otherwise we need the app to
163 /// tell us where it put its stack.
164 app_stack_start_pointer: Option<*const u8>,
165
166 /// How low have we ever seen the stack pointer.
167 app_stack_min_pointer: Option<*const u8>,
168
169 /// How many syscalls have occurred since the process started.
170 syscall_count: usize,
171
172 /// What was the most recent syscall.
173 last_syscall: Option<Syscall>,
174
175 /// How many upcalls were dropped because the queue was insufficiently
176 /// long.
177 dropped_upcall_count: usize,
178
179 /// How many times this process has been paused because it exceeded its
180 /// timeslice.
181 timeslice_expiration_count: usize,
182}
183
184impl ProcessStandardDebug for ProcessStandardDebugFull {
185 fn set_fixed_address_flash(&self, address: u32) {
186 self.debug.map(|d| d.fixed_address_flash = Some(address));
187 }
188 fn get_fixed_address_flash(&self) -> Option<u32> {
189 self.debug.map_or(None, |d| d.fixed_address_flash)
190 }
191 fn set_fixed_address_ram(&self, address: u32) {
192 self.debug.map(|d| d.fixed_address_ram = Some(address));
193 }
194 fn get_fixed_address_ram(&self) -> Option<u32> {
195 self.debug.map_or(None, |d| d.fixed_address_ram)
196 }
197 fn set_app_heap_start_pointer(&self, ptr: *const u8) {
198 self.debug.map(|d| d.app_heap_start_pointer = Some(ptr));
199 }
200 fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
201 self.debug.map_or(None, |d| d.app_heap_start_pointer)
202 }
203 fn set_app_stack_start_pointer(&self, ptr: *const u8) {
204 self.debug.map(|d| d.app_stack_start_pointer = Some(ptr));
205 }
206 fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
207 self.debug.map_or(None, |d| d.app_stack_start_pointer)
208 }
209 fn set_app_stack_min_pointer(&self, ptr: *const u8) {
210 self.debug.map(|d| d.app_stack_min_pointer = Some(ptr));
211 }
212 fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
213 self.debug.map_or(None, |d| d.app_stack_min_pointer)
214 }
215 fn set_new_app_stack_min_pointer(&self, ptr: *const u8) {
216 self.debug.map(|d| {
217 match d.app_stack_min_pointer {
218 None => d.app_stack_min_pointer = Some(ptr),
219 Some(asmp) => {
220 // Update max stack depth if needed.
221 if ptr < asmp {
222 d.app_stack_min_pointer = Some(ptr);
223 }
224 }
225 }
226 });
227 }
228
229 fn set_last_syscall(&self, syscall: Syscall) {
230 self.debug.map(|d| d.last_syscall = Some(syscall));
231 }
232 fn get_last_syscall(&self) -> Option<Syscall> {
233 self.debug.map_or(None, |d| d.last_syscall)
234 }
235 fn reset_last_syscall(&self) {
236 self.debug.map(|d| d.last_syscall = None);
237 }
238
239 fn increment_syscall_count(&self) {
240 self.debug.map(|d| d.syscall_count += 1);
241 }
242 fn get_syscall_count(&self) -> usize {
243 self.debug.map_or(0, |d| d.syscall_count)
244 }
245 fn reset_syscall_count(&self) {
246 self.debug.map(|d| d.syscall_count = 0);
247 }
248
249 fn increment_dropped_upcall_count(&self) {
250 self.debug.map(|d| d.dropped_upcall_count += 1);
251 }
252 fn get_dropped_upcall_count(&self) -> usize {
253 self.debug.map_or(0, |d| d.dropped_upcall_count)
254 }
255 fn reset_dropped_upcall_count(&self) {
256 self.debug.map(|d| d.dropped_upcall_count = 0);
257 }
258
259 fn increment_timeslice_expiration_count(&self) {
260 self.debug.map(|d| d.timeslice_expiration_count += 1);
261 }
262 fn get_timeslice_expiration_count(&self) -> usize {
263 self.debug.map_or(0, |d| d.timeslice_expiration_count)
264 }
265 fn reset_timeslice_expiration_count(&self) {
266 self.debug.map(|d| d.timeslice_expiration_count = 0);
267 }
268}
269
270impl Default for ProcessStandardDebugFull {
271 fn default() -> Self {
272 Self {
273 debug: MapCell::new(ProcessStandardDebugFullInner::default()),
274 }
275 }
276}
277
278impl ProcessStandardDebug for () {
279 fn set_fixed_address_flash(&self, _address: u32) {}
280 fn get_fixed_address_flash(&self) -> Option<u32> {
281 None
282 }
283 fn set_fixed_address_ram(&self, _address: u32) {}
284 fn get_fixed_address_ram(&self) -> Option<u32> {
285 None
286 }
287 fn set_app_heap_start_pointer(&self, _ptr: *const u8) {}
288 fn get_app_heap_start_pointer(&self) -> Option<*const u8> {
289 None
290 }
291 fn set_app_stack_start_pointer(&self, _ptr: *const u8) {}
292 fn get_app_stack_start_pointer(&self) -> Option<*const u8> {
293 None
294 }
295 fn set_app_stack_min_pointer(&self, _ptr: *const u8) {}
296 fn get_app_stack_min_pointer(&self) -> Option<*const u8> {
297 None
298 }
299 fn set_new_app_stack_min_pointer(&self, _ptr: *const u8) {}
300
301 fn set_last_syscall(&self, _syscall: Syscall) {}
302 fn get_last_syscall(&self) -> Option<Syscall> {
303 None
304 }
305 fn reset_last_syscall(&self) {}
306
307 fn increment_syscall_count(&self) {}
308 fn get_syscall_count(&self) -> usize {
309 0
310 }
311 fn reset_syscall_count(&self) {}
312 fn increment_dropped_upcall_count(&self) {}
313 fn get_dropped_upcall_count(&self) -> usize {
314 0
315 }
316 fn reset_dropped_upcall_count(&self) {}
317 fn increment_timeslice_expiration_count(&self) {}
318 fn get_timeslice_expiration_count(&self) -> usize {
319 0
320 }
321 fn reset_timeslice_expiration_count(&self) {}
322}
323
324/// Entry that is stored in the grant pointer table at the top of process
325/// memory.
326///
327/// One copy of this entry struct is stored per grant region defined in the
328/// kernel. This type allows the core kernel to lookup a grant based on the
329/// driver_num associated with the grant, and also holds the pointer to the
330/// memory allocated for the particular grant.
331#[repr(C)]
332struct GrantPointerEntry {
333 /// The syscall driver number associated with the allocated grant.
334 ///
335 /// This defaults to 0 if the grant has not been allocated. Note, however,
336 /// that 0 is a valid driver_num, and therefore cannot be used to check if a
337 /// grant is allocated or not.
338 driver_num: usize,
339
340 /// The start of the memory location where the grant has been allocated, or
341 /// null if the grant has not been allocated.
342 grant_ptr: *mut u8,
343}
344
345/// A type for userspace processes in Tock.
346///
347/// As its name implies, this is the standard implementation for Tock processes
348/// that exposes the full support for processes running on embedded hardware.
349///
350/// [`ProcessStandard`] is templated on two parameters:
351///
352/// - `C`: [`Chip`]: The implementation must know the [`Chip`] the kernel is
353/// running on to properly store architecture-specific and MPU state for the
354/// process.
355/// - `D`: [`ProcessStandardDebug`]: This configures the debugging mechanism the
356/// process uses for storing optional debugging data. Kernels that do not wish
357/// to store per-process debugging state can use the `()` type for this
358/// parameter.
359pub struct ProcessStandard<'a, C: 'static + Chip, D: 'static + ProcessStandardDebug + Default> {
360 /// Identifier of this process and the index of the process in the process
361 /// table.
362 process_id: Cell<ProcessId>,
363
364 /// An application ShortId, generated from process loading and
365 /// checking, which denotes the security identity of this process.
366 app_id: ShortId,
367
368 /// Pointer to the main Kernel struct.
369 kernel: &'static Kernel,
370
371 /// Pointer to the struct that defines the actual chip the kernel is running
372 /// on. This is used because processes have subtle hardware-based
373 /// differences. Specifically, the actual syscall interface and how
374 /// processes are switched to is architecture-specific, and how memory must
375 /// be allocated for memory protection units is also hardware-specific.
376 chip: &'static C,
377
378 /// Application memory layout:
379 ///
380 /// ```text
381 /// ╒════════ ← memory_start + memory_len
382 /// ╔═ │ Grant Pointers
383 /// ║ │ ──────
384 /// │ Process Control Block
385 /// D │ ──────
386 /// Y │ Grant Regions
387 /// N │
388 /// A │ ↓
389 /// M │ ────── ← kernel_memory_break
390 /// I │
391 /// C │ ────── ← app_break ═╗
392 /// │ ║
393 /// ║ │ ↑ A
394 /// ║ │ Heap P C
395 /// ╠═ │ ────── ← app_heap_start R C
396 /// │ Data O E
397 /// F │ ────── ← data_start_pointer C S
398 /// I │ Stack E S
399 /// X │ ↓ S I
400 /// E │ S B
401 /// D │ ────── ← current_stack_pointer L
402 /// │ ║ E
403 /// ╚═ ╘════════ ← memory_start ═╝
404 /// ```
405 ///
406 /// The start of process memory. We store this as a pointer and length and
407 /// not a slice due to Rust aliasing rules. If we were to store a slice,
408 /// then any time another slice to the same memory or an ProcessBuffer is
409 /// used in the kernel would be undefined behavior.
410 memory_start: *const u8,
411 /// Number of bytes of memory allocated to this process.
412 memory_len: usize,
413
414 /// Reference to the slice of `GrantPointerEntry`s stored in the process's
415 /// memory reserved for the kernel. These driver numbers are zero and
416 /// pointers are null if the grant region has not been allocated. When the
417 /// grant region is allocated these pointers are updated to point to the
418 /// allocated memory and the driver number is set to match the driver that
419 /// owns the grant. No other reference to these pointers exists in the Tock
420 /// kernel.
421 grant_pointers: MapCell<&'static mut [GrantPointerEntry]>,
422
423 /// Pointer to the end of the allocated (and MPU protected) grant region.
424 kernel_memory_break: Cell<*const u8>,
425
426 /// Pointer to the end of process RAM that has been sbrk'd to the process.
427 app_break: Cell<*const u8>,
428
429 /// Pointer to high water mark for process buffers shared through `allow`
430 allow_high_water_mark: Cell<*const u8>,
431
432 /// Process flash segment. This is the region of nonvolatile flash that
433 /// the process occupies.
434 flash: &'static [u8],
435
436 /// The footers of the process binary (may be zero-sized), which are metadata
437 /// about the process not covered by integrity. Used, among other things, to
438 /// store signatures.
439 footers: &'static [u8],
440
441 /// Collection of pointers to the TBF header in flash.
442 header: tock_tbf::types::TbfHeader,
443
444 /// Credential that was approved for this process, or `None` if the
445 /// credential was permitted to run without an accepted credential.
446 credential: Option<AcceptedCredential>,
447
448 /// State saved on behalf of the process each time the app switches to the
449 /// kernel.
450 stored_state:
451 MapCell<<<C as Chip>::UserspaceKernelBoundary as UserspaceKernelBoundary>::StoredState>,
452
453 /// The current state of the app. The scheduler uses this to determine
454 /// whether it can schedule this app to execute.
455 ///
456 /// The `state` is used both for bookkeeping for the scheduler as well as
457 /// for enabling control by other parts of the system. The scheduler keeps
458 /// track of if a process is ready to run or not by switching between the
459 /// `Running` and `Yielded` states. The system can control the process by
460 /// switching it to a "stopped" state to prevent the scheduler from
461 /// scheduling it.
462 state: Cell<State>,
463
464 /// How to respond if this process faults.
465 fault_policy: &'a dyn ProcessFaultPolicy,
466
467 /// Storage permissions for this process.
468 storage_permissions: StoragePermissions,
469
470 /// Configuration data for the MPU
471 mpu_config: MapCell<<<C as Chip>::MPU as MPU>::MpuConfig>,
472
473 /// MPU regions are saved as a pointer-size pair.
474 mpu_regions: [Cell<Option<mpu::Region>>; 6],
475
476 /// Essentially a list of upcalls that want to call functions in the
477 /// process.
478 tasks: MapCell<RingBuffer<'a, Task>>,
479
480 /// Count of how many times this process has entered the fault condition and
481 /// been restarted. This is used by some `ProcessRestartPolicy`s to
482 /// determine if the process should be restarted or not.
483 restart_count: Cell<usize>,
484
485 /// The completion code set by the process when it last exited, restarted,
486 /// or was terminated. If the process is has never terminated, then the
487 /// `OptionalCell` will be empty (i.e. `None`). If the process has exited,
488 /// restarted, or terminated, the `OptionalCell` will contain an optional 32
489 /// bit value. The option will be `None` if the process crashed or was
490 /// stopped by the kernel and there is no provided completion code. If the
491 /// process called the exit syscall then the provided completion code will
492 /// be stored as `Some(completion code)`.
493 completion_code: OptionalCell<Option<u32>>,
494
495 /// Values kept so that we can print useful debug messages when apps fault.
496 debug: D,
497}
498
499impl<C: Chip, D: 'static + ProcessStandardDebug> Process for ProcessStandard<'_, C, D> {
500 fn processid(&self) -> ProcessId {
501 self.process_id.get()
502 }
503
504 fn short_app_id(&self) -> ShortId {
505 self.app_id
506 }
507
508 fn binary_version(&self) -> Option<BinaryVersion> {
509 let version = self.header.get_binary_version();
510 match NonZeroU32::new(version) {
511 Some(version_nonzero) => Some(BinaryVersion::new(version_nonzero)),
512 None => None,
513 }
514 }
515
516 fn get_credential(&self) -> Option<AcceptedCredential> {
517 self.credential
518 }
519
520 fn enqueue_task(&self, task: Task) -> Result<(), ErrorCode> {
521 // If this app is in a `Fault` state then we shouldn't schedule
522 // any work for it.
523 if !self.is_running() {
524 return Err(ErrorCode::NODEVICE);
525 }
526
527 let ret = self.tasks.map_or(Err(ErrorCode::FAIL), |tasks| {
528 match tasks.enqueue(task) {
529 true => {
530 // The task has been successfully enqueued.
531 Ok(())
532 }
533 false => {
534 // The task could not be enqueued as there is
535 // insufficient space in the ring buffer.
536 Err(ErrorCode::NOMEM)
537 }
538 }
539 });
540
541 if ret.is_err() {
542 // On any error we were unable to enqueue the task. Record the
543 // error, but importantly do _not_ increment kernel work.
544 self.debug.increment_dropped_upcall_count();
545 }
546
547 ret
548 }
549
550 fn ready(&self) -> bool {
551 self.tasks.map_or(false, |ring_buf| ring_buf.has_elements())
552 || self.state.get() == State::Running
553 }
554
555 fn remove_pending_upcalls(&self, upcall_id: UpcallId) -> usize {
556 self.tasks.map_or(0, |tasks| {
557 let count_before = tasks.len();
558 tasks.retain(|task| match task {
559 // Remove only tasks that are function calls with an id equal
560 // to `upcall_id`.
561 Task::FunctionCall(function_call) => match function_call.source {
562 FunctionCallSource::Kernel => true,
563 FunctionCallSource::Driver(id) => id != upcall_id,
564 },
565 _ => true,
566 });
567 let count_after = tasks.len();
568 if config::CONFIG.trace_syscalls {
569 debug!(
570 "[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed",
571 self.processid(),
572 upcall_id.driver_num,
573 upcall_id.subscribe_num,
574 count_before - count_after,
575 );
576 }
577 count_after - count_before
578 })
579 }
580
581 fn is_running(&self) -> bool {
582 match self.state.get() {
583 State::Running | State::Yielded | State::YieldedFor(_) | State::Stopped(_) => true,
584 _ => false,
585 }
586 }
587
588 fn get_state(&self) -> State {
589 self.state.get()
590 }
591
592 fn set_yielded_state(&self) {
593 if self.state.get() == State::Running {
594 self.state.set(State::Yielded);
595 }
596 }
597
598 fn set_yielded_for_state(&self, upcall_id: UpcallId) {
599 if self.state.get() == State::Running {
600 self.state.set(State::YieldedFor(upcall_id));
601 }
602 }
603
604 fn stop(&self) {
605 match self.state.get() {
606 State::Running => self.state.set(State::Stopped(StoppedState::Running)),
607 State::Yielded => self.state.set(State::Stopped(StoppedState::Yielded)),
608 State::YieldedFor(upcall_id) => self
609 .state
610 .set(State::Stopped(StoppedState::YieldedFor(upcall_id))),
611 State::Stopped(_stopped_state) => {
612 // Already stopped, nothing to do.
613 }
614 State::Faulted | State::Terminated => {
615 // Stop has no meaning on a inactive process.
616 }
617 }
618 }
619
620 fn resume(&self) {
621 match self.state.get() {
622 State::Stopped(stopped_state) => match stopped_state {
623 StoppedState::Running => self.state.set(State::Running),
624 StoppedState::Yielded => self.state.set(State::Yielded),
625 StoppedState::YieldedFor(upcall_id) => self.state.set(State::YieldedFor(upcall_id)),
626 },
627 _ => {} // Do nothing
628 }
629 }
630
631 fn set_fault_state(&self) {
632 // Use the per-process fault policy to determine what action the kernel
633 // should take since the process faulted.
634 let action = self.fault_policy.action(self);
635 match action {
636 FaultAction::Panic => {
637 // process faulted. Panic and print status
638 self.state.set(State::Faulted);
639 panic!("Process {} had a fault", self.get_process_name());
640 }
641 FaultAction::Restart => {
642 self.try_restart(None);
643 }
644 FaultAction::Stop => {
645 // This looks a lot like restart, except we just leave the app
646 // how it faulted and mark it as `Faulted`. By clearing
647 // all of the app's todo work it will not be scheduled, and
648 // clearing all of the grant regions will cause capsules to drop
649 // this app as well.
650 self.terminate(None);
651 self.state.set(State::Faulted);
652 }
653 }
654 }
655
656 fn start(&self, _cap: &dyn crate::capabilities::ProcessStartCapability) {
657 // `start()` can only be called on a terminated process.
658 if self.get_state() != State::Terminated {
659 return;
660 }
661
662 // Reset to start the process.
663 if let Ok(()) = self.reset() {
664 self.state.set(State::Yielded);
665 }
666 }
667
668 fn try_restart(&self, completion_code: Option<u32>) {
669 // `try_restart()` cannot be called if the process is terminated. Only
670 // `start()` can start a terminated process.
671 if self.get_state() == State::Terminated {
672 return;
673 }
674
675 // Terminate the process, freeing its state and removing any
676 // pending tasks from the scheduler's queue.
677 self.terminate(completion_code);
678
679 // If there is a kernel policy that controls restarts, it should be
680 // implemented here. For now, always restart.
681 if let Ok(()) = self.reset() {
682 self.state.set(State::Yielded);
683 }
684
685 // Decide what to do with res later. E.g., if we can't restart
686 // want to reclaim the process resources.
687 }
688
689 fn terminate(&self, completion_code: Option<u32>) {
690 // A process can be terminated if it is running or in the `Faulted`
691 // state. Otherwise, you cannot terminate it and this method return
692 // early.
693 //
694 // The kernel can terminate in the `Faulted` state to return the process
695 // to a state in which it can run again (e.g., reset it).
696 if !self.is_running() && self.get_state() != State::Faulted {
697 return;
698 }
699
700 // And remove those tasks
701 self.tasks.map(|tasks| {
702 tasks.empty();
703 });
704
705 // Clear any grant regions this app has setup with any capsules.
706 unsafe {
707 self.grant_ptrs_reset();
708 }
709
710 // Save the completion code.
711 self.completion_code.set(completion_code);
712
713 // Mark the app as stopped so the scheduler won't try to run it.
714 self.state.set(State::Terminated);
715 }
716
717 fn get_restart_count(&self) -> usize {
718 self.restart_count.get()
719 }
720
721 fn has_tasks(&self) -> bool {
722 self.tasks.map_or(false, |tasks| tasks.has_elements())
723 }
724
725 fn dequeue_task(&self) -> Option<Task> {
726 self.tasks.map_or(None, |tasks| tasks.dequeue())
727 }
728
729 fn remove_upcall(&self, upcall_id: UpcallId) -> Option<Task> {
730 self.tasks.map_or(None, |tasks| {
731 tasks.remove_first_matching(|task| match task {
732 Task::FunctionCall(fc) => match fc.source {
733 FunctionCallSource::Driver(upid) => upid == upcall_id,
734 _ => false,
735 },
736 Task::ReturnValue(rv) => rv.upcall_id == upcall_id,
737 Task::IPC(_) => false,
738 })
739 })
740 }
741
742 fn pending_tasks(&self) -> usize {
743 self.tasks.map_or(0, |tasks| tasks.len())
744 }
745
746 fn get_command_permissions(&self, driver_num: usize, offset: usize) -> CommandPermissions {
747 self.header.get_command_permissions(driver_num, offset)
748 }
749
750 fn get_storage_permissions(&self) -> StoragePermissions {
751 self.storage_permissions
752 }
753
754 fn number_writeable_flash_regions(&self) -> usize {
755 self.header.number_writeable_flash_regions()
756 }
757
758 fn get_writeable_flash_region(&self, region_index: usize) -> (usize, usize) {
759 self.header.get_writeable_flash_region(region_index)
760 }
761
762 fn update_stack_start_pointer(&self, stack_pointer: *const u8) {
763 if stack_pointer >= self.mem_start() && stack_pointer < self.mem_end() {
764 self.debug.set_app_stack_start_pointer(stack_pointer);
765 // We also reset the minimum stack pointer because whatever
766 // value we had could be entirely wrong by now.
767 self.debug.set_app_stack_min_pointer(stack_pointer);
768 }
769 }
770
771 fn update_heap_start_pointer(&self, heap_pointer: *const u8) {
772 if heap_pointer >= self.mem_start() && heap_pointer < self.mem_end() {
773 self.debug.set_app_heap_start_pointer(heap_pointer);
774 }
775 }
776
777 fn setup_mpu(&self) {
778 self.mpu_config.map(|config| {
779 self.chip.mpu().configure_mpu(config);
780 });
781 }
782
783 fn add_mpu_region(
784 &self,
785 unallocated_memory_start: *const u8,
786 unallocated_memory_size: usize,
787 min_region_size: usize,
788 ) -> Option<mpu::Region> {
789 self.mpu_config.and_then(|config| {
790 let new_region = self.chip.mpu().allocate_region(
791 unallocated_memory_start,
792 unallocated_memory_size,
793 min_region_size,
794 mpu::Permissions::ReadWriteOnly,
795 config,
796 )?;
797
798 for region in self.mpu_regions.iter() {
799 if region.get().is_none() {
800 region.set(Some(new_region));
801 return Some(new_region);
802 }
803 }
804
805 // Not enough room in Process struct to store the MPU region.
806 None
807 })
808 }
809
810 fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode> {
811 self.mpu_config.map_or(Err(ErrorCode::INVAL), |config| {
812 // Find the existing mpu region that we are removing; it needs to match exactly.
813 if let Some(internal_region) = self.mpu_regions.iter().find(|r| r.get() == Some(region))
814 {
815 self.chip
816 .mpu()
817 .remove_memory_region(region, config)
818 .or(Err(ErrorCode::FAIL))?;
819
820 // Remove this region from the tracking cache of mpu_regions
821 internal_region.set(None);
822 Ok(())
823 } else {
824 Err(ErrorCode::INVAL)
825 }
826 })
827 }
828
829 fn sbrk(&self, increment: isize) -> Result<CapabilityPtr, Error> {
830 // Do not modify an inactive process.
831 if !self.is_running() {
832 return Err(Error::InactiveApp);
833 }
834
835 let new_break = self.app_break.get().wrapping_offset(increment);
836 self.brk(new_break)
837 }
838
839 fn brk(&self, new_break: *const u8) -> Result<CapabilityPtr, Error> {
840 // Do not modify an inactive process.
841 if !self.is_running() {
842 return Err(Error::InactiveApp);
843 }
844
845 self.mpu_config.map_or(Err(Error::KernelError), |config| {
846 if new_break < self.allow_high_water_mark.get() || new_break >= self.mem_end() {
847 Err(Error::AddressOutOfBounds)
848 } else if new_break > self.kernel_memory_break.get() {
849 Err(Error::OutOfMemory)
850 } else if let Err(()) = self.chip.mpu().update_app_memory_region(
851 new_break,
852 self.kernel_memory_break.get(),
853 mpu::Permissions::ReadWriteOnly,
854 config,
855 ) {
856 Err(Error::OutOfMemory)
857 } else {
858 let old_break = self.app_break.get();
859 self.app_break.set(new_break);
860 self.chip.mpu().configure_mpu(config);
861
862 let base = self.mem_start() as usize;
863 let break_result = unsafe {
864 CapabilityPtr::new_with_authority(
865 old_break as *const (),
866 base,
867 (new_break as usize) - base,
868 CapabilityPtrPermissions::ReadWrite,
869 )
870 };
871
872 Ok(break_result)
873 }
874 })
875 }
876
877 #[allow(clippy::not_unsafe_ptr_arg_deref)]
878 fn build_readwrite_process_buffer(
879 &self,
880 buf_start_addr: *mut u8,
881 size: usize,
882 ) -> Result<ReadWriteProcessBuffer, ErrorCode> {
883 if !self.is_running() {
884 // Do not operate on an inactive process
885 return Err(ErrorCode::FAIL);
886 }
887
888 // A process is allowed to pass any pointer if the buffer length is 0,
889 // as to revoke kernel access to a memory region without granting access
890 // to another one
891 if size == 0 {
892 // Clippy complains that we're dereferencing a pointer in a public
893 // and safe function here. While we are not dereferencing the
894 // pointer here, we pass it along to an unsafe function, which is as
895 // dangerous (as it is likely to be dereferenced down the line).
896 //
897 // Relevant discussion:
898 // https://github.com/rust-lang/rust-clippy/issues/3045
899 //
900 // It should be fine to ignore the lint here, as a buffer of length
901 // 0 will never allow dereferencing any memory in a safe manner.
902 //
903 // ### Safety
904 //
905 // We specify a zero-length buffer, so the implementation of
906 // `ReadWriteProcessBuffer` will handle any safety issues.
907 // Therefore, we can encapsulate the unsafe.
908 Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, 0, self.processid()) })
909 } else if self.in_app_owned_memory(buf_start_addr, size) {
910 // TODO: Check for buffer aliasing here
911
912 // Valid buffer, we need to adjust the app's watermark
913 // note: `in_app_owned_memory` ensures this offset does not wrap
914 let buf_end_addr = buf_start_addr.wrapping_add(size);
915 let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
916 self.allow_high_water_mark.set(new_water_mark);
917
918 // Clippy complains that we're dereferencing a pointer in a public
919 // and safe function here. While we are not dereferencing the
920 // pointer here, we pass it along to an unsafe function, which is as
921 // dangerous (as it is likely to be dereferenced down the line).
922 //
923 // Relevant discussion:
924 // https://github.com/rust-lang/rust-clippy/issues/3045
925 //
926 // It should be fine to ignore the lint here, as long as we make
927 // sure that we're pointing towards userspace memory (verified using
928 // `in_app_owned_memory`) and respect alignment and other
929 // constraints of the Rust references created by
930 // `ReadWriteProcessBuffer`.
931 //
932 // ### Safety
933 //
934 // We encapsulate the unsafe here on the condition in the TODO
935 // above, as we must ensure that this `ReadWriteProcessBuffer` will
936 // be the only reference to this memory.
937 Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, size, self.processid()) })
938 } else {
939 Err(ErrorCode::INVAL)
940 }
941 }
942
943 #[allow(clippy::not_unsafe_ptr_arg_deref)]
944 fn build_readonly_process_buffer(
945 &self,
946 buf_start_addr: *const u8,
947 size: usize,
948 ) -> Result<ReadOnlyProcessBuffer, ErrorCode> {
949 if !self.is_running() {
950 // Do not operate on an inactive process
951 return Err(ErrorCode::FAIL);
952 }
953
954 // A process is allowed to pass any pointer if the buffer length is 0,
955 // as to revoke kernel access to a memory region without granting access
956 // to another one
957 if size == 0 {
958 // Clippy complains that we're dereferencing a pointer in a public
959 // and safe function here. While we are not dereferencing the
960 // pointer here, we pass it along to an unsafe function, which is as
961 // dangerous (as it is likely to be dereferenced down the line).
962 //
963 // Relevant discussion:
964 // https://github.com/rust-lang/rust-clippy/issues/3045
965 //
966 // It should be fine to ignore the lint here, as a buffer of length
967 // 0 will never allow dereferencing any memory in a safe manner.
968 //
969 // ### Safety
970 //
971 // We specify a zero-length buffer, so the implementation of
972 // `ReadOnlyProcessBuffer` will handle any safety issues. Therefore,
973 // we can encapsulate the unsafe.
974 Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, 0, self.processid()) })
975 } else if self.in_app_owned_memory(buf_start_addr, size)
976 || self.in_app_flash_memory(buf_start_addr, size)
977 {
978 // TODO: Check for buffer aliasing here
979
980 if self.in_app_owned_memory(buf_start_addr, size) {
981 // Valid buffer, and since this is in read-write memory (i.e.
982 // not flash), we need to adjust the process's watermark. Note:
983 // `in_app_owned_memory()` ensures this offset does not wrap.
984 let buf_end_addr = buf_start_addr.wrapping_add(size);
985 let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
986 self.allow_high_water_mark.set(new_water_mark);
987 }
988
989 // Clippy complains that we're dereferencing a pointer in a public
990 // and safe function here. While we are not dereferencing the
991 // pointer here, we pass it along to an unsafe function, which is as
992 // dangerous (as it is likely to be dereferenced down the line).
993 //
994 // Relevant discussion:
995 // https://github.com/rust-lang/rust-clippy/issues/3045
996 //
997 // It should be fine to ignore the lint here, as long as we make
998 // sure that we're pointing towards userspace memory (verified using
999 // `in_app_owned_memory` or `in_app_flash_memory`) and respect
1000 // alignment and other constraints of the Rust references created by
1001 // `ReadWriteProcessBuffer`.
1002 //
1003 // ### Safety
1004 //
1005 // We encapsulate the unsafe here on the condition in the TODO
1006 // above, as we must ensure that this `ReadOnlyProcessBuffer` will
1007 // be the only reference to this memory.
1008 Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, size, self.processid()) })
1009 } else {
1010 Err(ErrorCode::INVAL)
1011 }
1012 }
1013
1014 unsafe fn set_byte(&self, addr: *mut u8, value: u8) -> bool {
1015 if self.in_app_owned_memory(addr, 1) {
1016 // We verify that this will only write process-accessible memory,
1017 // but this can still be undefined behavior if something else holds
1018 // a reference to this memory.
1019 *addr = value;
1020 true
1021 } else {
1022 false
1023 }
1024 }
1025
1026 fn grant_is_allocated(&self, grant_num: usize) -> Option<bool> {
1027 // Do not modify an inactive process.
1028 if !self.is_running() {
1029 return None;
1030 }
1031
1032 // Update the grant pointer to the address of the new allocation.
1033 self.grant_pointers.map_or(None, |grant_pointers| {
1034 // Implement `grant_pointers[grant_num]` without a chance of a
1035 // panic.
1036 grant_pointers
1037 .get(grant_num)
1038 .map(|grant_entry| !grant_entry.grant_ptr.is_null())
1039 })
1040 }
1041
1042 fn allocate_grant(
1043 &self,
1044 grant_num: usize,
1045 driver_num: usize,
1046 size: usize,
1047 align: usize,
1048 ) -> Result<(), ()> {
1049 // Do not modify an inactive process.
1050 if !self.is_running() {
1051 return Err(());
1052 }
1053
1054 // Verify the grant_num is valid.
1055 if grant_num >= self.kernel.get_grant_count_and_finalize() {
1056 return Err(());
1057 }
1058
1059 // Verify that the grant is not already allocated. If the pointer is not
1060 // null then the grant is already allocated.
1061 if let Some(is_allocated) = self.grant_is_allocated(grant_num) {
1062 if is_allocated {
1063 return Err(());
1064 }
1065 }
1066
1067 // Verify that there is not already a grant allocated with the same
1068 // `driver_num`.
1069 let exists = self.grant_pointers.map_or(false, |grant_pointers| {
1070 // Check our list of grant pointers if the driver number is used.
1071 grant_pointers.iter().any(|grant_entry| {
1072 // Check if the grant is both allocated (its grant pointer is
1073 // non null) and the driver number matches.
1074 (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1075 })
1076 });
1077 // If we find a match, then the `driver_num` must already be used and
1078 // the grant allocation fails.
1079 if exists {
1080 return Err(());
1081 }
1082
1083 // Use the shared grant allocator function to actually allocate memory.
1084 // Returns `None` if the allocation cannot be created.
1085 if let Some(grant_ptr) = self.allocate_in_grant_region_internal(size, align) {
1086 // Update the grant pointer to the address of the new allocation.
1087 self.grant_pointers.map_or(Err(()), |grant_pointers| {
1088 // Implement `grant_pointers[grant_num] = grant_ptr` without a
1089 // chance of a panic.
1090 grant_pointers
1091 .get_mut(grant_num)
1092 .map_or(Err(()), |grant_entry| {
1093 // Actually set the driver num and grant pointer.
1094 grant_entry.driver_num = driver_num;
1095 grant_entry.grant_ptr = grant_ptr.as_ptr();
1096
1097 // If all of this worked, return true.
1098 Ok(())
1099 })
1100 })
1101 } else {
1102 // Could not allocate the memory for the grant region.
1103 Err(())
1104 }
1105 }
1106
1107 fn allocate_custom_grant(
1108 &self,
1109 size: usize,
1110 align: usize,
1111 ) -> Result<(ProcessCustomGrantIdentifier, NonNull<u8>), ()> {
1112 // Do not modify an inactive process.
1113 if !self.is_running() {
1114 return Err(());
1115 }
1116
1117 // Use the shared grant allocator function to actually allocate memory.
1118 // Returns `None` if the allocation cannot be created.
1119 if let Some(ptr) = self.allocate_in_grant_region_internal(size, align) {
1120 // Create the identifier that the caller will use to get access to
1121 // this custom grant in the future.
1122 let identifier = self.create_custom_grant_identifier(ptr);
1123
1124 Ok((identifier, ptr))
1125 } else {
1126 // Could not allocate memory for the custom grant.
1127 Err(())
1128 }
1129 }
1130
1131 fn enter_grant(&self, grant_num: usize) -> Result<NonNull<u8>, Error> {
1132 // Do not try to access the grant region of an inactive process.
1133 if !self.is_running() {
1134 return Err(Error::InactiveApp);
1135 }
1136
1137 // Retrieve the grant pointer from the `grant_pointers` slice. We use
1138 // `[slice].get()` so that if the grant number is invalid this will
1139 // return `Err` and not panic.
1140 self.grant_pointers
1141 .map_or(Err(Error::KernelError), |grant_pointers| {
1142 // Implement `grant_pointers[grant_num]` without a chance of a
1143 // panic.
1144 match grant_pointers.get_mut(grant_num) {
1145 Some(grant_entry) => {
1146 // Get a copy of the actual grant pointer.
1147 let grant_ptr = grant_entry.grant_ptr;
1148
1149 // Check if the grant pointer is marked that the grant
1150 // has already been entered. If so, return an error.
1151 if (grant_ptr as usize) & 0x1 == 0x1 {
1152 // Lowest bit is one, meaning this grant has been
1153 // entered.
1154 Err(Error::AlreadyInUse)
1155 } else {
1156 // Now, to mark that the grant has been entered, we
1157 // set the lowest bit to one and save this as the
1158 // grant pointer.
1159 grant_entry.grant_ptr = (grant_ptr as usize | 0x1) as *mut u8;
1160
1161 // And we return the grant pointer to the entered
1162 // grant.
1163 Ok(unsafe { NonNull::new_unchecked(grant_ptr) })
1164 }
1165 }
1166 None => Err(Error::AddressOutOfBounds),
1167 }
1168 })
1169 }
1170
1171 fn enter_custom_grant(
1172 &self,
1173 identifier: ProcessCustomGrantIdentifier,
1174 ) -> Result<*mut u8, Error> {
1175 // Do not try to access the grant region of an inactive process.
1176 if !self.is_running() {
1177 return Err(Error::InactiveApp);
1178 }
1179
1180 // Get the address of the custom grant based on the identifier.
1181 let custom_grant_address = self.get_custom_grant_address(identifier);
1182
1183 // We never deallocate custom grants and only we can change the
1184 // `identifier` so we know this is a valid address for the custom grant.
1185 Ok(custom_grant_address as *mut u8)
1186 }
1187
1188 unsafe fn leave_grant(&self, grant_num: usize) {
1189 // Do not modify an inactive process.
1190 if !self.is_running() {
1191 return;
1192 }
1193
1194 self.grant_pointers.map(|grant_pointers| {
1195 // Implement `grant_pointers[grant_num]` without a chance of a
1196 // panic.
1197 if let Some(grant_entry) = grant_pointers.get_mut(grant_num) {
1198 // Get a copy of the actual grant pointer.
1199 let grant_ptr = grant_entry.grant_ptr;
1200
1201 // Now, to mark that the grant has been released, we set the
1202 // lowest bit back to zero and save this as the grant
1203 // pointer.
1204 grant_entry.grant_ptr = (grant_ptr as usize & !0x1) as *mut u8;
1205 }
1206 });
1207 }
1208
1209 fn grant_allocated_count(&self) -> Option<usize> {
1210 // Do not modify an inactive process.
1211 if !self.is_running() {
1212 return None;
1213 }
1214
1215 self.grant_pointers.map(|grant_pointers| {
1216 // Filter our list of grant pointers into just the non-null ones,
1217 // and count those. A grant is allocated if its grant pointer is
1218 // non-null.
1219 grant_pointers
1220 .iter()
1221 .filter(|grant_entry| !grant_entry.grant_ptr.is_null())
1222 .count()
1223 })
1224 }
1225
1226 fn lookup_grant_from_driver_num(&self, driver_num: usize) -> Result<usize, Error> {
1227 self.grant_pointers
1228 .map_or(Err(Error::KernelError), |grant_pointers| {
1229 // Filter our list of grant pointers into just the non null
1230 // ones, and count those. A grant is allocated if its grant
1231 // pointer is non-null.
1232 match grant_pointers.iter().position(|grant_entry| {
1233 // Only consider allocated grants.
1234 (!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
1235 }) {
1236 Some(idx) => Ok(idx),
1237 None => Err(Error::OutOfMemory),
1238 }
1239 })
1240 }
1241
1242 fn is_valid_upcall_function_pointer(&self, upcall_fn: *const ()) -> bool {
1243 let ptr = upcall_fn as *const u8;
1244 let size = mem::size_of::<*const u8>();
1245
1246 // It is okay if this function is in memory or flash.
1247 self.in_app_flash_memory(ptr, size) || self.in_app_owned_memory(ptr, size)
1248 }
1249
1250 fn get_process_name(&self) -> &'static str {
1251 self.header.get_package_name().unwrap_or("")
1252 }
1253
1254 fn get_completion_code(&self) -> Option<Option<u32>> {
1255 self.completion_code.get()
1256 }
1257
1258 fn set_syscall_return_value(&self, return_value: SyscallReturn) {
1259 match self.stored_state.map(|stored_state| unsafe {
1260 // Actually set the return value for a particular process.
1261 //
1262 // The UKB implementation uses the bounds of process-accessible
1263 // memory to verify that any memory changes are valid. Here, the
1264 // unsafe promise we are making is that the bounds passed to the UKB
1265 // are correct.
1266 self.chip
1267 .userspace_kernel_boundary()
1268 .set_syscall_return_value(
1269 self.mem_start(),
1270 self.app_break.get(),
1271 stored_state,
1272 return_value,
1273 )
1274 }) {
1275 Some(Ok(())) => {
1276 // If we get an `Ok` we are all set.
1277
1278 // The process is either already in the running state (having
1279 // just called a nonblocking syscall like command) or needs to
1280 // be moved to the running state having called Yield-WaitFor and
1281 // now needing to be resumed. Either way we can set the state to
1282 // running.
1283 self.state.set(State::Running);
1284 }
1285
1286 Some(Err(())) => {
1287 // If we get an `Err`, then the UKB implementation could not set
1288 // the return value, likely because the process's stack is no
1289 // longer accessible to it. All we can do is fault.
1290 self.set_fault_state();
1291 }
1292
1293 None => {
1294 // We should never be here since `stored_state` should always be
1295 // occupied.
1296 self.set_fault_state();
1297 }
1298 }
1299 }
1300
1301 fn set_process_function(&self, callback: FunctionCall) {
1302 // See if we can actually enqueue this function for this process.
1303 // Architecture-specific code handles actually doing this since the
1304 // exact method is both architecture- and implementation-specific.
1305 //
1306 // This can fail, for example if the process does not have enough memory
1307 // remaining.
1308 match self.stored_state.map(|stored_state| {
1309 // Let the UKB implementation handle setting the process's PC so
1310 // that the process executes the upcall function. We encapsulate
1311 // unsafe here because we are guaranteeing that the memory bounds
1312 // passed to `set_process_function` are correct.
1313 unsafe {
1314 self.chip.userspace_kernel_boundary().set_process_function(
1315 self.mem_start(),
1316 self.app_break.get(),
1317 stored_state,
1318 callback,
1319 )
1320 }
1321 }) {
1322 Some(Ok(())) => {
1323 // If we got an `Ok` we are all set and should mark that this
1324 // process is ready to be scheduled.
1325
1326 // Move this process to the "running" state so the scheduler
1327 // will schedule it.
1328 self.state.set(State::Running);
1329 }
1330
1331 Some(Err(())) => {
1332 // If we got an Error, then there was likely not enough room on
1333 // the stack to allow the process to execute this function given
1334 // the details of the particular architecture this is running
1335 // on. This process has essentially faulted, so we mark it as
1336 // such.
1337 self.set_fault_state();
1338 }
1339
1340 None => {
1341 // We should never be here since `stored_state` should always be
1342 // occupied.
1343 self.set_fault_state();
1344 }
1345 }
1346 }
1347
1348 fn switch_to(&self) -> Option<syscall::ContextSwitchReason> {
1349 // Cannot switch to an invalid process
1350 if !self.is_running() {
1351 return None;
1352 }
1353
1354 let (switch_reason, stack_pointer) =
1355 self.stored_state.map_or((None, None), |stored_state| {
1356 // Switch to the process. We guarantee that the memory pointers
1357 // we pass are valid, ensuring this context switch is safe.
1358 // Therefore we encapsulate the `unsafe`.
1359 unsafe {
1360 let (switch_reason, optional_stack_pointer) = self
1361 .chip
1362 .userspace_kernel_boundary()
1363 .switch_to_process(self.mem_start(), self.app_break.get(), stored_state);
1364 (Some(switch_reason), optional_stack_pointer)
1365 }
1366 });
1367
1368 // If the UKB implementation passed us a stack pointer, update our
1369 // debugging state. This is completely optional.
1370 if let Some(sp) = stack_pointer {
1371 self.debug.set_new_app_stack_min_pointer(sp);
1372 }
1373
1374 switch_reason
1375 }
1376
1377 fn debug_syscall_count(&self) -> usize {
1378 self.debug.get_syscall_count()
1379 }
1380
1381 fn debug_dropped_upcall_count(&self) -> usize {
1382 self.debug.get_dropped_upcall_count()
1383 }
1384
1385 fn debug_timeslice_expiration_count(&self) -> usize {
1386 self.debug.get_timeslice_expiration_count()
1387 }
1388
1389 fn debug_timeslice_expired(&self) {
1390 self.debug.increment_timeslice_expiration_count();
1391 }
1392
1393 fn debug_syscall_called(&self, last_syscall: Syscall) {
1394 self.debug.increment_syscall_count();
1395 self.debug.set_last_syscall(last_syscall);
1396 }
1397
1398 fn debug_syscall_last(&self) -> Option<Syscall> {
1399 self.debug.get_last_syscall()
1400 }
1401
1402 fn get_addresses(&self) -> ProcessAddresses {
1403 ProcessAddresses {
1404 flash_start: self.flash_start() as usize,
1405 flash_non_protected_start: self.flash_non_protected_start() as usize,
1406 flash_integrity_end: ((self.flash.as_ptr() as usize)
1407 + (self.header.get_binary_end() as usize))
1408 as *const u8,
1409 flash_end: self.flash_end() as usize,
1410 sram_start: self.mem_start() as usize,
1411 sram_app_brk: self.app_memory_break() as usize,
1412 sram_grant_start: self.kernel_memory_break() as usize,
1413 sram_end: self.mem_end() as usize,
1414 sram_heap_start: self.debug.get_app_heap_start_pointer().map(|p| p as usize),
1415 sram_stack_top: self.debug.get_app_stack_start_pointer().map(|p| p as usize),
1416 sram_stack_bottom: self.debug.get_app_stack_min_pointer().map(|p| p as usize),
1417 }
1418 }
1419
1420 fn get_sizes(&self) -> ProcessSizes {
1421 ProcessSizes {
1422 grant_pointers: mem::size_of::<GrantPointerEntry>()
1423 * self.kernel.get_grant_count_and_finalize(),
1424 upcall_list: Self::CALLBACKS_OFFSET,
1425 process_control_block: Self::PROCESS_STRUCT_OFFSET,
1426 }
1427 }
1428
1429 fn print_full_process(&self, writer: &mut dyn Write) {
1430 if !config::CONFIG.debug_panics {
1431 return;
1432 }
1433
1434 self.stored_state.map(|stored_state| {
1435 // We guarantee the memory bounds pointers provided to the UKB are
1436 // correct.
1437 unsafe {
1438 self.chip.userspace_kernel_boundary().print_context(
1439 self.mem_start(),
1440 self.app_break.get(),
1441 stored_state,
1442 writer,
1443 );
1444 }
1445 });
1446
1447 // Display grant information.
1448 let number_grants = self.kernel.get_grant_count_and_finalize();
1449 let _ = writer.write_fmt(format_args!(
1450 "\
1451 \r\n Total number of grant regions defined: {}\r\n",
1452 self.kernel.get_grant_count_and_finalize()
1453 ));
1454 let rows = number_grants.div_ceil(3);
1455
1456 // Access our array of grant pointers.
1457 self.grant_pointers.map(|grant_pointers| {
1458 // Iterate each grant and show its address.
1459 for i in 0..rows {
1460 for j in 0..3 {
1461 let index = i + (rows * j);
1462 if index >= number_grants {
1463 break;
1464 }
1465
1466 // Implement `grant_pointers[grant_num]` without a chance of
1467 // a panic.
1468 grant_pointers.get(index).map(|grant_entry| {
1469 if grant_entry.grant_ptr.is_null() {
1470 let _ =
1471 writer.write_fmt(format_args!(" Grant {:>2} : -- ", index));
1472 } else {
1473 let _ = writer.write_fmt(format_args!(
1474 " Grant {:>2} {:#x}: {:p}",
1475 index, grant_entry.driver_num, grant_entry.grant_ptr
1476 ));
1477 }
1478 });
1479 }
1480 let _ = writer.write_fmt(format_args!("\r\n"));
1481 }
1482 });
1483
1484 // Display the current state of the MPU for this process.
1485 self.mpu_config.map(|config| {
1486 let _ = writer.write_fmt(format_args!("{}", config));
1487 });
1488
1489 // Print a helpful message on how to re-compile a process to view the
1490 // listing file. If a process is PIC, then we also need to print the
1491 // actual addresses the process executed at so that the .lst file can be
1492 // generated for those addresses. If the process was already compiled
1493 // for a fixed address, then just generating a .lst file is fine.
1494
1495 if self.debug.get_fixed_address_flash().is_some() {
1496 // Fixed addresses, can just run `make lst`.
1497 let _ = writer.write_fmt(format_args!(
1498 "\
1499 \r\nTo debug libtock-c apps, run `make lst` in the app's\
1500 \r\nfolder and open the arch.{:#x}.{:#x}.lst file.\r\n\r\n",
1501 self.debug.get_fixed_address_flash().unwrap_or(0),
1502 self.debug.get_fixed_address_ram().unwrap_or(0)
1503 ));
1504 } else {
1505 // PIC, need to specify the addresses.
1506 let sram_start = self.mem_start() as usize;
1507 let flash_start = self.flash.as_ptr() as usize;
1508 let flash_init_fn = flash_start + self.header.get_init_function_offset() as usize;
1509
1510 let _ = writer.write_fmt(format_args!(
1511 "\
1512 \r\nTo debug libtock-c apps, run\
1513 \r\n`make debug RAM_START={:#x} FLASH_INIT={:#x}`\
1514 \r\nin the app's folder and open the .lst file.\r\n\r\n",
1515 sram_start, flash_init_fn
1516 ));
1517 }
1518 }
1519
1520 fn get_stored_state(&self, out: &mut [u8]) -> Result<usize, ErrorCode> {
1521 self.stored_state
1522 .map(|stored_state| {
1523 self.chip
1524 .userspace_kernel_boundary()
1525 .store_context(stored_state, out)
1526 })
1527 .unwrap_or(Err(ErrorCode::FAIL))
1528 }
1529}
1530
1531impl<C: 'static + Chip, D: 'static + ProcessStandardDebug> ProcessStandard<'_, C, D> {
1532 // Memory offset for upcall ring buffer (10 element length).
1533 const CALLBACK_LEN: usize = 10;
1534 const CALLBACKS_OFFSET: usize = mem::size_of::<Task>() * Self::CALLBACK_LEN;
1535
1536 // Memory offset to make room for this process's metadata.
1537 const PROCESS_STRUCT_OFFSET: usize = mem::size_of::<ProcessStandard<C, D>>();
1538
1539 /// Create a `ProcessStandard` object based on the found `ProcessBinary`.
1540 pub(crate) unsafe fn create<'a>(
1541 kernel: &'static Kernel,
1542 chip: &'static C,
1543 pb: ProcessBinary,
1544 remaining_memory: &'a mut [u8],
1545 fault_policy: &'static dyn ProcessFaultPolicy,
1546 storage_permissions_policy: &'static dyn ProcessStandardStoragePermissionsPolicy<C, D>,
1547 app_id: ShortId,
1548 index: usize,
1549 ) -> Result<(Option<&'static dyn Process>, &'a mut [u8]), (ProcessLoadError, &'a mut [u8])>
1550 {
1551 let process_name = pb.header.get_package_name();
1552 let process_ram_requested_size = pb.header.get_minimum_app_ram_size() as usize;
1553
1554 // Initialize MPU region configuration.
1555 let mut mpu_config = match chip.mpu().new_config() {
1556 Some(mpu_config) => mpu_config,
1557 None => return Err((ProcessLoadError::MpuConfigurationError, remaining_memory)),
1558 };
1559
1560 // Allocate MPU region for flash.
1561 if chip
1562 .mpu()
1563 .allocate_region(
1564 pb.flash.as_ptr(),
1565 pb.flash.len(),
1566 pb.flash.len(),
1567 mpu::Permissions::ReadExecuteOnly,
1568 &mut mpu_config,
1569 )
1570 .is_none()
1571 {
1572 if config::CONFIG.debug_load_processes {
1573 debug!(
1574 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash",
1575 pb.flash.as_ptr() as usize,
1576 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1577 process_name
1578 );
1579 }
1580 return Err((ProcessLoadError::MpuInvalidFlashLength, remaining_memory));
1581 }
1582
1583 // Determine how much space we need in the application's memory space
1584 // just for kernel and grant state. We need to make sure we allocate
1585 // enough memory just for that.
1586
1587 // Make room for grant pointers.
1588 let grant_ptr_size = mem::size_of::<GrantPointerEntry>();
1589 let grant_ptrs_num = kernel.get_grant_count_and_finalize();
1590 let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
1591
1592 // Initial size of the kernel-owned part of process memory can be
1593 // calculated directly based on the initial size of all kernel-owned
1594 // data structures.
1595 //
1596 // We require our kernel memory break (located at the end of the
1597 // MPU-returned allocated memory region) to be word-aligned. However, we
1598 // don't have any explicit alignment constraints from the MPU. To ensure
1599 // that the below kernel-owned data structures still fit into the
1600 // kernel-owned memory even with padding for alignment, add an extra
1601 // `sizeof(usize)` bytes.
1602 let initial_kernel_memory_size = grant_ptrs_offset
1603 + Self::CALLBACKS_OFFSET
1604 + Self::PROCESS_STRUCT_OFFSET
1605 + core::mem::size_of::<usize>();
1606
1607 // By default we start with the initial size of process-accessible
1608 // memory set to 0. This maximizes the flexibility that processes have
1609 // to allocate their memory as they see fit. If a process needs more
1610 // accessible memory it must use the `brk` memop syscalls to request
1611 // more memory.
1612 //
1613 // We must take into account any process-accessible memory required by
1614 // the context switching implementation and allocate at least that much
1615 // memory so that we can successfully switch to the process. This is
1616 // architecture and implementation specific, so we query that now.
1617 let min_process_memory_size = chip
1618 .userspace_kernel_boundary()
1619 .initial_process_app_brk_size();
1620
1621 // We have to ensure that we at least ask the MPU for
1622 // `min_process_memory_size` so that we can be sure that `app_brk` is
1623 // not set inside the kernel-owned memory region. Now, in practice,
1624 // processes should not request 0 (or very few) bytes of memory in their
1625 // TBF header (i.e. `process_ram_requested_size` will almost always be
1626 // much larger than `min_process_memory_size`), as they are unlikely to
1627 // work with essentially no available memory. But, we still must protect
1628 // for that case.
1629 let min_process_ram_size = cmp::max(process_ram_requested_size, min_process_memory_size);
1630
1631 // Minimum memory size for the process.
1632 let min_total_memory_size = min_process_ram_size + initial_kernel_memory_size;
1633
1634 // Check if this process requires a fixed memory start address. If so,
1635 // try to adjust the memory region to work for this process.
1636 //
1637 // Right now, we only support skipping some RAM and leaving a chunk
1638 // unused so that the memory region starts where the process needs it
1639 // to.
1640 let remaining_memory = if let Some(fixed_memory_start) = pb.header.get_fixed_address_ram() {
1641 // The process does have a fixed address.
1642 if fixed_memory_start == remaining_memory.as_ptr() as u32 {
1643 // Address already matches.
1644 remaining_memory
1645 } else if fixed_memory_start > remaining_memory.as_ptr() as u32 {
1646 // Process wants a memory address farther in memory. Try to
1647 // advance the memory region to make the address match.
1648 let diff = (fixed_memory_start - remaining_memory.as_ptr() as u32) as usize;
1649 if diff > remaining_memory.len() {
1650 // We ran out of memory.
1651 let actual_address =
1652 remaining_memory.as_ptr() as u32 + remaining_memory.len() as u32 - 1;
1653 let expected_address = fixed_memory_start;
1654 return Err((
1655 ProcessLoadError::MemoryAddressMismatch {
1656 actual_address,
1657 expected_address,
1658 },
1659 remaining_memory,
1660 ));
1661 } else {
1662 // Change the memory range to start where the process
1663 // requested it. Because of the if statement above we know this should
1664 // work. Doing it more cleanly would be good but was a bit beyond my borrow
1665 // ken; calling get_mut has a mutable borrow.-pal
1666 &mut remaining_memory[diff..]
1667 }
1668 } else {
1669 // Address is earlier in memory, nothing we can do.
1670 let actual_address = remaining_memory.as_ptr() as u32;
1671 let expected_address = fixed_memory_start;
1672 return Err((
1673 ProcessLoadError::MemoryAddressMismatch {
1674 actual_address,
1675 expected_address,
1676 },
1677 remaining_memory,
1678 ));
1679 }
1680 } else {
1681 remaining_memory
1682 };
1683
1684 // Determine where process memory will go and allocate an MPU region.
1685 //
1686 // `[allocation_start, allocation_size)` will cover both
1687 //
1688 // - the app-owned `min_process_memory_size`-long part of memory (at
1689 // some offset within `remaining_memory`), as well as
1690 //
1691 // - the kernel-owned allocation growing downward starting at the end
1692 // of this allocation, `initial_kernel_memory_size` bytes long.
1693 //
1694 let (allocation_start, allocation_size) = match chip.mpu().allocate_app_memory_region(
1695 remaining_memory.as_ptr(),
1696 remaining_memory.len(),
1697 min_total_memory_size,
1698 min_process_memory_size,
1699 initial_kernel_memory_size,
1700 mpu::Permissions::ReadWriteOnly,
1701 &mut mpu_config,
1702 ) {
1703 Some((memory_start, memory_size)) => (memory_start, memory_size),
1704 None => {
1705 // Failed to load process. Insufficient memory.
1706 if config::CONFIG.debug_load_processes {
1707 debug!(
1708 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate memory region of size >= {:#X}",
1709 pb.flash.as_ptr() as usize,
1710 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1711 process_name,
1712 min_total_memory_size
1713 );
1714 }
1715 return Err((ProcessLoadError::NotEnoughMemory, remaining_memory));
1716 }
1717 };
1718
1719 // Determine the offset of the app-owned part of the above memory
1720 // allocation. An MPU may not place it at the very start of
1721 // `remaining_memory` for internal alignment constraints. This can only
1722 // overflow if the MPU implementation is incorrect; a compliant
1723 // implementation must return a memory allocation within the
1724 // `remaining_memory` slice.
1725 let app_memory_start_offset =
1726 allocation_start as usize - remaining_memory.as_ptr() as usize;
1727
1728 // Check if the memory region is valid for the process. If a process
1729 // included a fixed address for the start of RAM in its TBF header (this
1730 // field is optional, processes that are position independent do not
1731 // need a fixed address) then we check that we used the same address
1732 // when we allocated it in RAM.
1733 if let Some(fixed_memory_start) = pb.header.get_fixed_address_ram() {
1734 let actual_address = remaining_memory.as_ptr() as u32 + app_memory_start_offset as u32;
1735 let expected_address = fixed_memory_start;
1736 if actual_address != expected_address {
1737 return Err((
1738 ProcessLoadError::MemoryAddressMismatch {
1739 actual_address,
1740 expected_address,
1741 },
1742 remaining_memory,
1743 ));
1744 }
1745 }
1746
1747 // With our MPU allocation, we can begin to divide up the
1748 // `remaining_memory` slice into individual regions for the process and
1749 // kernel, as follows:
1750 //
1751 //
1752 // +-----------------------------------------------------------------
1753 // | remaining_memory
1754 // +----------------------------------------------------+------------
1755 // v v
1756 // +----------------------------------------------------+
1757 // | allocated_padded_memory |
1758 // +--+-------------------------------------------------+
1759 // v v
1760 // +-------------------------------------------------+
1761 // | allocated_memory |
1762 // +-------------------------------------------------+
1763 // v v
1764 // +-----------------------+-------------------------+
1765 // | app_accessible_memory | allocated_kernel_memory |
1766 // +-----------------------+-------------------+-----+
1767 // v
1768 // kernel memory break
1769 // \---+/
1770 // v
1771 // optional padding
1772 //
1773 //
1774 // First split the `remaining_memory` into two slices:
1775 //
1776 // - `allocated_padded_memory`: the allocated memory region, containing
1777 //
1778 // 1. optional padding at the start of the memory region of
1779 // `app_memory_start_offset` bytes,
1780 //
1781 // 2. the app accessible memory region of `min_process_memory_size`,
1782 //
1783 // 3. optional unallocated memory, and
1784 //
1785 // 4. kernel-reserved memory, growing downward starting at
1786 // `app_memory_padding`.
1787 //
1788 // - `unused_memory`: the rest of the `remaining_memory`, not assigned
1789 // to this app.
1790 //
1791 let (allocated_padded_memory, unused_memory) =
1792 remaining_memory.split_at_mut(app_memory_start_offset + allocation_size);
1793
1794 // Now, slice off the (optional) padding at the start:
1795 let (_padding, allocated_memory) =
1796 allocated_padded_memory.split_at_mut(app_memory_start_offset);
1797
1798 // We continue to sub-slice the `allocated_memory` into
1799 // process-accessible and kernel-owned memory. Prior to that, store the
1800 // start and length ofthe overall allocation:
1801 let allocated_memory_start = allocated_memory.as_ptr();
1802 let allocated_memory_len = allocated_memory.len();
1803
1804 // Slice off the process-accessible memory:
1805 let (app_accessible_memory, allocated_kernel_memory) =
1806 allocated_memory.split_at_mut(min_process_memory_size);
1807
1808 // Set the initial process-accessible memory:
1809 let initial_app_brk = app_accessible_memory
1810 .as_ptr()
1811 .add(app_accessible_memory.len());
1812
1813 // Set the initial allow high water mark to the start of process memory
1814 // since no `allow` calls have been made yet.
1815 let initial_allow_high_water_mark = app_accessible_memory.as_ptr();
1816
1817 // Set up initial grant region.
1818 //
1819 // `kernel_memory_break` is set to the end of kernel-accessible memory
1820 // and grows downward.
1821 //
1822 // We require the `kernel_memory_break` to be aligned to a
1823 // word-boundary, as we rely on this during offset calculations to
1824 // kernel-accessed structs (e.g. the grant pointer table) below. As it
1825 // moves downward in the address space, we can't use the `align_offset`
1826 // convenience functions.
1827 //
1828 // Calling `wrapping_sub` is safe here, as we've factored in an optional
1829 // padding of at most `sizeof(usize)` bytes in the calculation of
1830 // `initial_kernel_memory_size` above.
1831 let mut kernel_memory_break = allocated_kernel_memory
1832 .as_ptr()
1833 .add(allocated_kernel_memory.len());
1834
1835 kernel_memory_break = kernel_memory_break
1836 .wrapping_sub(kernel_memory_break as usize % core::mem::size_of::<usize>());
1837
1838 // Now that we know we have the space we can setup the grant pointers.
1839 kernel_memory_break = kernel_memory_break.offset(-(grant_ptrs_offset as isize));
1840
1841 // This is safe, `kernel_memory_break` is aligned to a word-boundary,
1842 // and `grant_ptrs_offset` is a multiple of the word size.
1843 #[allow(clippy::cast_ptr_alignment)]
1844 // Set all grant pointers to null.
1845 let grant_pointers = slice::from_raw_parts_mut(
1846 kernel_memory_break as *mut GrantPointerEntry,
1847 grant_ptrs_num,
1848 );
1849 for grant_entry in grant_pointers.iter_mut() {
1850 grant_entry.driver_num = 0;
1851 grant_entry.grant_ptr = ptr::null_mut();
1852 }
1853
1854 // Now that we know we have the space we can setup the memory for the
1855 // upcalls.
1856 kernel_memory_break = kernel_memory_break.offset(-(Self::CALLBACKS_OFFSET as isize));
1857
1858 // This is safe today, as MPU constraints ensure that `memory_start`
1859 // will always be aligned on at least a word boundary, and that
1860 // memory_size will be aligned on at least a word boundary, and
1861 // `grant_ptrs_offset` is a multiple of the word size. Thus,
1862 // `kernel_memory_break` must be word aligned. While this is unlikely to
1863 // change, it should be more proactively enforced.
1864 //
1865 // TODO: https://github.com/tock/tock/issues/1739
1866 #[allow(clippy::cast_ptr_alignment)]
1867 // Set up ring buffer for upcalls to the process.
1868 let upcall_buf =
1869 slice::from_raw_parts_mut(kernel_memory_break as *mut Task, Self::CALLBACK_LEN);
1870 let tasks = RingBuffer::new(upcall_buf);
1871
1872 // Last thing in the kernel region of process RAM is the process struct.
1873 kernel_memory_break = kernel_memory_break.offset(-(Self::PROCESS_STRUCT_OFFSET as isize));
1874 let process_struct_memory_location = kernel_memory_break;
1875
1876 // Create the Process struct in the app grant region.
1877 // Note that this requires every field be explicitly initialized, as
1878 // we are just transforming a pointer into a structure.
1879 let process: &mut ProcessStandard<C, D> =
1880 &mut *(process_struct_memory_location as *mut ProcessStandard<'static, C, D>);
1881
1882 // Ask the kernel for a unique identifier for this process that is being
1883 // created.
1884 let unique_identifier = kernel.create_process_identifier();
1885
1886 // Save copies of these in case the app was compiled for fixed addresses
1887 // for later debugging.
1888 let fixed_address_flash = pb.header.get_fixed_address_flash();
1889 let fixed_address_ram = pb.header.get_fixed_address_ram();
1890
1891 process
1892 .process_id
1893 .set(ProcessId::new(kernel, unique_identifier, index));
1894 process.app_id = app_id;
1895 process.kernel = kernel;
1896 process.chip = chip;
1897 process.allow_high_water_mark = Cell::new(initial_allow_high_water_mark);
1898 process.memory_start = allocated_memory_start;
1899 process.memory_len = allocated_memory_len;
1900 process.header = pb.header;
1901 process.kernel_memory_break = Cell::new(kernel_memory_break);
1902 process.app_break = Cell::new(initial_app_brk);
1903 process.grant_pointers = MapCell::new(grant_pointers);
1904
1905 process.credential = pb.credential.get();
1906 process.footers = pb.footers;
1907 process.flash = pb.flash;
1908
1909 process.stored_state = MapCell::new(Default::default());
1910 // Mark this process as approved and leave it to the kernel to start it.
1911 process.state = Cell::new(State::Yielded);
1912 process.fault_policy = fault_policy;
1913 process.restart_count = Cell::new(0);
1914 process.completion_code = OptionalCell::empty();
1915
1916 process.mpu_config = MapCell::new(mpu_config);
1917 process.mpu_regions = [
1918 Cell::new(None),
1919 Cell::new(None),
1920 Cell::new(None),
1921 Cell::new(None),
1922 Cell::new(None),
1923 Cell::new(None),
1924 ];
1925 process.tasks = MapCell::new(tasks);
1926
1927 process.debug = D::default();
1928 if let Some(fix_addr_flash) = fixed_address_flash {
1929 process.debug.set_fixed_address_flash(fix_addr_flash);
1930 }
1931 if let Some(fix_addr_ram) = fixed_address_ram {
1932 process.debug.set_fixed_address_ram(fix_addr_ram);
1933 }
1934
1935 // Handle any architecture-specific requirements for a new process.
1936 //
1937 // NOTE! We have to ensure that the start of process-accessible memory
1938 // (`app_memory_start`) is word-aligned. Since we currently start
1939 // process-accessible memory at the beginning of the allocated memory
1940 // region, we trust the MPU to give us a word-aligned starting address.
1941 //
1942 // TODO: https://github.com/tock/tock/issues/1739
1943 match process.stored_state.map(|stored_state| {
1944 chip.userspace_kernel_boundary().initialize_process(
1945 app_accessible_memory.as_ptr(),
1946 initial_app_brk,
1947 stored_state,
1948 )
1949 }) {
1950 Some(Ok(())) => {}
1951 _ => {
1952 if config::CONFIG.debug_load_processes {
1953 debug!(
1954 "[!] flash={:#010X}-{:#010X} process={:?} - couldn't initialize process",
1955 pb.flash.as_ptr() as usize,
1956 pb.flash.as_ptr() as usize + pb.flash.len() - 1,
1957 process_name
1958 );
1959 }
1960 // Note that since remaining_memory was split by split_at_mut into
1961 // application memory and unused_memory, a failure here will leak
1962 // the application memory. Not leaking it requires being able to
1963 // reconstitute the original memory slice.
1964 return Err((ProcessLoadError::InternalError, unused_memory));
1965 }
1966 }
1967
1968 let flash_start = process.flash.as_ptr();
1969 let app_start =
1970 flash_start.wrapping_add(process.header.get_app_start_offset() as usize) as usize;
1971 let init_addr =
1972 flash_start.wrapping_add(process.header.get_init_function_offset() as usize) as usize;
1973 let fn_base = flash_start as usize;
1974 let fn_len = process.flash.len();
1975
1976 // We need to construct a capability with sufficient authority to cover all of a user's
1977 // code, with permissions to execute it. The entirety of flash is sufficient.
1978
1979 let init_fn = CapabilityPtr::new_with_authority(
1980 init_addr as *const (),
1981 fn_base,
1982 fn_len,
1983 CapabilityPtrPermissions::Execute,
1984 );
1985
1986 process.tasks.map(|tasks| {
1987 tasks.enqueue(Task::FunctionCall(FunctionCall {
1988 source: FunctionCallSource::Kernel,
1989 pc: init_fn,
1990 argument0: app_start,
1991 argument1: process.memory_start as usize,
1992 argument2: process.memory_len,
1993 argument3: (process.app_break.get() as usize).into(),
1994 }));
1995 });
1996
1997 // Set storage permissions. Put this at the end so that `process` is
1998 // completely formed before using it to determine the storage
1999 // permissions.
2000 process.storage_permissions = storage_permissions_policy.get_permissions(process);
2001
2002 // Return the process object and a remaining memory for processes slice.
2003 Ok((Some(process), unused_memory))
2004 }
2005
2006 /// Reset the process, resetting all of its state and re-initializing it so
2007 /// it can start running. Assumes the process is not running but is still in
2008 /// flash and still has its memory region allocated to it.
2009 fn reset(&self) -> Result<(), ErrorCode> {
2010 // We need a new process identifier for this process since the restarted
2011 // version is in effect a new process. This is also necessary to
2012 // invalidate any stored `ProcessId`s that point to the old version of
2013 // the process. However, the process has not moved locations in the
2014 // processes array, so we copy the existing index.
2015 let old_index = self.process_id.get().index;
2016 let new_identifier = self.kernel.create_process_identifier();
2017 self.process_id
2018 .set(ProcessId::new(self.kernel, new_identifier, old_index));
2019
2020 // Reset debug information that is per-execution and not per-process.
2021 self.debug.reset_last_syscall();
2022 self.debug.reset_syscall_count();
2023 self.debug.reset_dropped_upcall_count();
2024 self.debug.reset_timeslice_expiration_count();
2025
2026 // Reset MPU region configuration.
2027 //
2028 // TODO: ideally, this would be moved into a helper function used by
2029 // both create() and reset(), but process load debugging complicates
2030 // this. We just want to create new config with only flash and memory
2031 // regions.
2032 //
2033 // We must have a previous MPU configuration stored, fault the
2034 // process if this invariant is violated. We avoid allocating
2035 // a new MPU configuration, as this may eventually exhaust the
2036 // number of available MPU configurations.
2037 let mut mpu_config = self.mpu_config.take().ok_or(ErrorCode::FAIL)?;
2038 self.chip.mpu().reset_config(&mut mpu_config);
2039
2040 // Allocate MPU region for flash.
2041 let app_mpu_flash = self.chip.mpu().allocate_region(
2042 self.flash.as_ptr(),
2043 self.flash.len(),
2044 self.flash.len(),
2045 mpu::Permissions::ReadExecuteOnly,
2046 &mut mpu_config,
2047 );
2048 if app_mpu_flash.is_none() {
2049 // We were unable to allocate an MPU region for flash. This is very
2050 // unexpected since we previously ran this process. However, we
2051 // return now and leave the process faulted and it will not be
2052 // scheduled.
2053 return Err(ErrorCode::FAIL);
2054 }
2055
2056 // RAM
2057
2058 // Re-determine the minimum amount of RAM the kernel must allocate to
2059 // the process based on the specific requirements of the syscall
2060 // implementation.
2061 let min_process_memory_size = self
2062 .chip
2063 .userspace_kernel_boundary()
2064 .initial_process_app_brk_size();
2065
2066 // Recalculate initial_kernel_memory_size as was done in create()
2067 let grant_ptr_size = mem::size_of::<(usize, *mut u8)>();
2068 let grant_ptrs_num = self.kernel.get_grant_count_and_finalize();
2069 let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
2070
2071 let initial_kernel_memory_size =
2072 grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET;
2073
2074 let app_mpu_mem = self.chip.mpu().allocate_app_memory_region(
2075 self.mem_start(),
2076 self.memory_len,
2077 self.memory_len, //we want exactly as much as we had before restart
2078 min_process_memory_size,
2079 initial_kernel_memory_size,
2080 mpu::Permissions::ReadWriteOnly,
2081 &mut mpu_config,
2082 );
2083 let (app_mpu_mem_start, app_mpu_mem_len) = match app_mpu_mem {
2084 Some((start, len)) => (start, len),
2085 None => {
2086 // We couldn't configure the MPU for the process. This shouldn't
2087 // happen since we were able to start the process before, but at
2088 // this point it is better to leave the app faulted and not
2089 // schedule it.
2090 return Err(ErrorCode::NOMEM);
2091 }
2092 };
2093
2094 // Reset memory pointers now that we know the layout of the process
2095 // memory and know that we can configure the MPU.
2096
2097 // app_brk is set based on minimum syscall size above the start of
2098 // memory.
2099 let app_brk = app_mpu_mem_start.wrapping_add(min_process_memory_size);
2100 self.app_break.set(app_brk);
2101 // kernel_brk is calculated backwards from the end of memory the size of
2102 // the initial kernel data structures.
2103 let kernel_brk = app_mpu_mem_start
2104 .wrapping_add(app_mpu_mem_len)
2105 .wrapping_sub(initial_kernel_memory_size);
2106 self.kernel_memory_break.set(kernel_brk);
2107 // High water mark for `allow`ed memory is reset to the start of the
2108 // process's memory region.
2109 self.allow_high_water_mark.set(app_mpu_mem_start);
2110
2111 // Store the adjusted MPU configuration:
2112 self.mpu_config.replace(mpu_config);
2113
2114 // Handle any architecture-specific requirements for a process when it
2115 // first starts (as it would when it is new).
2116 let ukb_init_process = self.stored_state.map_or(Err(()), |stored_state| unsafe {
2117 self.chip.userspace_kernel_boundary().initialize_process(
2118 app_mpu_mem_start,
2119 app_brk,
2120 stored_state,
2121 )
2122 });
2123 match ukb_init_process {
2124 Ok(()) => {}
2125 Err(()) => {
2126 // We couldn't initialize the architecture-specific state for
2127 // this process. This shouldn't happen since the app was able to
2128 // be started before, but at this point the app is no longer
2129 // valid. The best thing we can do now is leave the app as still
2130 // faulted and not schedule it.
2131 return Err(ErrorCode::RESERVE);
2132 }
2133 }
2134
2135 self.restart_count.increment();
2136
2137 // Mark the state as `Yielded` for the scheduler.
2138 self.state.set(State::Yielded);
2139
2140 // And queue up this app to be restarted.
2141 let flash_start = self.flash_start();
2142 let app_start =
2143 flash_start.wrapping_add(self.header.get_app_start_offset() as usize) as usize;
2144 let init_addr =
2145 flash_start.wrapping_add(self.header.get_init_function_offset() as usize) as usize;
2146
2147 // We need to construct a capability with sufficient authority to cover all of a user's
2148 // code, with permissions to execute it. The entirety of flash is sufficient.
2149
2150 let init_fn = unsafe {
2151 CapabilityPtr::new_with_authority(
2152 init_addr as *const (),
2153 flash_start as usize,
2154 (self.flash_end() as usize) - (flash_start as usize),
2155 CapabilityPtrPermissions::Execute,
2156 )
2157 };
2158
2159 self.enqueue_task(Task::FunctionCall(FunctionCall {
2160 source: FunctionCallSource::Kernel,
2161 pc: init_fn,
2162 argument0: app_start,
2163 argument1: self.memory_start as usize,
2164 argument2: self.memory_len,
2165 argument3: (self.app_break.get() as usize).into(),
2166 }))
2167 }
2168
2169 /// Checks if the buffer represented by the passed in base pointer and size
2170 /// is within the RAM bounds currently exposed to the processes (i.e. ending
2171 /// at `app_break`). If this method returns `true`, the buffer is guaranteed
2172 /// to be accessible to the process and to not overlap with the grant
2173 /// region.
2174 fn in_app_owned_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2175 // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2176 // could skip this check.
2177 // CapabilityPtr needs to make it slightly further, and we need to add
2178 // interfaces that tell us how much assurance it gives on the current
2179 // platform.
2180 let buf_end_addr = buf_start_addr.wrapping_add(size);
2181
2182 buf_end_addr >= buf_start_addr
2183 && buf_start_addr >= self.mem_start()
2184 && buf_end_addr <= self.app_break.get()
2185 }
2186
2187 /// Checks if the buffer represented by the passed in base pointer and size
2188 /// are within the readable region of an application's flash memory. If
2189 /// this method returns true, the buffer is guaranteed to be readable to the
2190 /// process.
2191 fn in_app_flash_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
2192 // TODO: On some platforms, CapabilityPtr has sufficient authority that we
2193 // could skip this check.
2194 // CapabilityPtr needs to make it slightly further, and we need to add
2195 // interfaces that tell us how much assurance it gives on the current
2196 // platform.
2197 let buf_end_addr = buf_start_addr.wrapping_add(size);
2198
2199 buf_end_addr >= buf_start_addr
2200 && buf_start_addr >= self.flash_non_protected_start()
2201 && buf_end_addr <= self.flash_end()
2202 }
2203
2204 /// Reset all `grant_ptr`s to NULL.
2205 unsafe fn grant_ptrs_reset(&self) {
2206 self.grant_pointers.map(|grant_pointers| {
2207 for grant_entry in grant_pointers.iter_mut() {
2208 grant_entry.driver_num = 0;
2209 grant_entry.grant_ptr = ptr::null_mut();
2210 }
2211 });
2212 }
2213
2214 /// Allocate memory in a process's grant region.
2215 ///
2216 /// Ensures that the allocation is of `size` bytes and aligned to `align`
2217 /// bytes.
2218 ///
2219 /// If there is not enough memory, or the MPU cannot isolate the process
2220 /// accessible region from the new kernel memory break after doing the
2221 /// allocation, then this will return `None`.
2222 fn allocate_in_grant_region_internal(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
2223 self.mpu_config.and_then(|config| {
2224 // First, compute the candidate new pointer. Note that at this point
2225 // we have not yet checked whether there is space for this
2226 // allocation or that it meets alignment requirements.
2227 let new_break_unaligned = self.kernel_memory_break.get().wrapping_sub(size);
2228
2229 // Our minimum alignment requirement is two bytes, so that the
2230 // lowest bit of the address will always be zero and we can use it
2231 // as a flag. It doesn't hurt to increase the alignment (except for
2232 // potentially a wasted byte) so we make sure `align` is at least
2233 // two.
2234 let align = cmp::max(align, 2);
2235
2236 // The alignment must be a power of two, 2^a. The expression
2237 // `!(align - 1)` then returns a mask with leading ones, followed by
2238 // `a` trailing zeros.
2239 let alignment_mask = !(align - 1);
2240 let new_break = (new_break_unaligned as usize & alignment_mask) as *const u8;
2241
2242 // Verify there is space for this allocation
2243 if new_break < self.app_break.get() {
2244 None
2245 // Verify it didn't wrap around
2246 } else if new_break > self.kernel_memory_break.get() {
2247 None
2248 // Verify this is compatible with the MPU.
2249 } else if let Err(()) = self.chip.mpu().update_app_memory_region(
2250 self.app_break.get(),
2251 new_break,
2252 mpu::Permissions::ReadWriteOnly,
2253 config,
2254 ) {
2255 None
2256 } else {
2257 // Allocation is valid.
2258
2259 // We always allocate down, so we must lower the
2260 // kernel_memory_break.
2261 self.kernel_memory_break.set(new_break);
2262
2263 // We need `grant_ptr` as a mutable pointer.
2264 let grant_ptr = new_break as *mut u8;
2265
2266 // ### Safety
2267 //
2268 // Here we are guaranteeing that `grant_ptr` is not null. We can
2269 // ensure this because we just created `grant_ptr` based on the
2270 // process's allocated memory, and we know it cannot be null.
2271 unsafe { Some(NonNull::new_unchecked(grant_ptr)) }
2272 }
2273 })
2274 }
2275
2276 /// Create the identifier for a custom grant that grant.rs uses to access
2277 /// the custom grant.
2278 ///
2279 /// We create this identifier by calculating the number of bytes between
2280 /// where the custom grant starts and the end of the process memory.
2281 fn create_custom_grant_identifier(&self, ptr: NonNull<u8>) -> ProcessCustomGrantIdentifier {
2282 let custom_grant_address = ptr.as_ptr() as usize;
2283 let process_memory_end = self.mem_end() as usize;
2284
2285 ProcessCustomGrantIdentifier {
2286 offset: process_memory_end - custom_grant_address,
2287 }
2288 }
2289
2290 /// Use a `ProcessCustomGrantIdentifier` to find the address of the
2291 /// custom grant.
2292 ///
2293 /// This reverses `create_custom_grant_identifier()`.
2294 fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifier) -> usize {
2295 let process_memory_end = self.mem_end() as usize;
2296
2297 // Subtract the offset in the identifier from the end of the process
2298 // memory to get the address of the custom grant.
2299 process_memory_end - identifier.offset
2300 }
2301
2302 /// Return the app's read and modify storage permissions from the TBF header
2303 /// if it exists.
2304 ///
2305 /// If the header does not exist then return `None`. If the header does
2306 /// exist, this returns a 5-tuple with:
2307 ///
2308 /// - `write_allowed`: bool. If this process should have write permissions.
2309 /// - `read_count`: usize. How many read IDs are valid.
2310 /// - `read_ids`: [u32]. The read IDs.
2311 /// - `modify_count`: usze. How many modify IDs are valid.
2312 /// - `modify_ids`: [u32]. The modify IDs.
2313 pub fn get_tbf_storage_permissions(&self) -> Option<(bool, usize, [u32; 8], usize, [u32; 8])> {
2314 let read_perms = self.header.get_storage_read_ids();
2315 let modify_perms = self.header.get_storage_modify_ids();
2316
2317 match (read_perms, modify_perms) {
2318 (Some((read_count, read_ids)), Some((modify_count, modify_ids))) => Some((
2319 self.header.get_storage_write_id().is_some(),
2320 read_count,
2321 read_ids,
2322 modify_count,
2323 modify_ids,
2324 )),
2325 _ => None,
2326 }
2327 }
2328
2329 /// The start address of allocated RAM for this process.
2330 fn mem_start(&self) -> *const u8 {
2331 self.memory_start
2332 }
2333
2334 /// The first address after the end of the allocated RAM for this process.
2335 fn mem_end(&self) -> *const u8 {
2336 self.memory_start.wrapping_add(self.memory_len)
2337 }
2338
2339 /// The start address of the flash region allocated for this process.
2340 fn flash_start(&self) -> *const u8 {
2341 self.flash.as_ptr()
2342 }
2343
2344 /// Get the first address of process's flash that isn't protected by the
2345 /// kernel. The protected range of flash contains the TBF header and
2346 /// potentially other state the kernel is storing on behalf of the process,
2347 /// and cannot be edited by the process.
2348 fn flash_non_protected_start(&self) -> *const u8 {
2349 ((self.flash.as_ptr() as usize) + self.header.get_protected_size() as usize) as *const u8
2350 }
2351
2352 /// The first address after the end of the flash region allocated for this
2353 /// process.
2354 fn flash_end(&self) -> *const u8 {
2355 self.flash.as_ptr().wrapping_add(self.flash.len())
2356 }
2357
2358 /// The lowest address of the grant region for the process.
2359 fn kernel_memory_break(&self) -> *const u8 {
2360 self.kernel_memory_break.get()
2361 }
2362
2363 /// Return the highest address the process has access to, or the current
2364 /// process memory brk.
2365 fn app_memory_break(&self) -> *const u8 {
2366 self.app_break.get()
2367 }
2368}