diff --git a/.gitignore b/.gitignore index 38de1b93..1839608f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.org *.rs.bk +.#* Cargo.lock target diff --git a/src/exception.rs b/src/exception.rs index 3052f665..7203dfac 100644 --- a/src/exception.rs +++ b/src/exception.rs @@ -31,8 +31,8 @@ impl Exception { /// /// Returns `None` if no exception is currently active pub fn active() -> Option { - // NOTE(safe) atomic read - let icsr = unsafe { (*::peripheral::SCB.get()).icsr.read() }; + // NOTE(safe) atomic read with no side effects + let icsr = unsafe { (*::peripheral::SCB::ptr()).icsr.read() }; Some(match icsr as u8 { 0 => return None, diff --git a/src/itm.rs b/src/itm.rs index 80de99ca..5a2722d2 100644 --- a/src/itm.rs +++ b/src/itm.rs @@ -4,7 +4,7 @@ use core::{fmt, mem, ptr, slice}; use aligned::Aligned; -use peripheral::Stim; +use peripheral::itm::Stim; // NOTE assumes that `bytes` is 32-bit aligned unsafe fn write_words(stim: &Stim, bytes: &[u32]) { diff --git a/src/peripheral/cbp.rs b/src/peripheral/cbp.rs new file mode 100644 index 00000000..3397fff0 --- /dev/null +++ b/src/peripheral/cbp.rs @@ -0,0 +1,142 @@ +//! Cache and branch predictor maintenance operations + +use volatile_register::WO; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// I-cache invalidate all to PoU + pub iciallu: WO, + reserved0: u32, + /// I-cache invalidate by MVA to PoU + pub icimvau: WO, + /// D-cache invalidate by MVA to PoC + pub dcimvac: WO, + /// D-cache invalidate by set-way + pub dcisw: WO, + /// D-cache clean by MVA to PoU + pub dccmvau: WO, + /// D-cache clean by MVA to PoC + pub dccmvac: WO, + /// D-cache clean by set-way + pub dccsw: WO, + /// D-cache clean and invalidate by MVA to PoC + pub dccimvac: WO, + /// D-cache clean and invalidate by set-way + pub dccisw: WO, + /// Branch predictor invalidate all + pub bpiall: WO, +} + +const CBP_SW_WAY_POS: u32 = 30; +const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS; +const CBP_SW_SET_POS: u32 = 5; +const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; + +impl RegisterBlock { + /// I-cache invalidate all to PoU + #[inline(always)] + pub fn iciallu(&self) { + unsafe { + self.iciallu.write(0); + } + } + + /// I-cache invalidate by MVA to PoU + #[inline(always)] + pub fn icimvau(&self, mva: u32) { + unsafe { + self.icimvau.write(mva); + } + } + + /// D-cache invalidate by MVA to PoC + #[inline(always)] + pub fn dcimvac(&self, mva: u32) { + unsafe { + self.dcimvac.write(mva); + } + } + + /// D-cache invalidate by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dcisw(&self, set: u16, way: u16) { + // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way + // operations have a register data format which depends on the implementation's + // associativity and number of sets. Specifically the 'way' and 'set' fields have + // offsets 32-log2(ASSOCIATIVITY) and log2(LINELEN) respectively. + // + // However, in Cortex-M7 devices, these offsets are fixed at 30 and 5, as per the Cortex-M7 + // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the + // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the + // CMSIS-Core implementation and use fixed values. + unsafe { + self.dcisw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) + | (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS), + ); + } + } + + /// D-cache clean by MVA to PoU + #[inline(always)] + pub fn dccmvau(&self, mva: u32) { + unsafe { + self.dccmvau.write(mva); + } + } + + /// D-cache clean by MVA to PoC + #[inline(always)] + pub fn dccmvac(&self, mva: u32) { + unsafe { + self.dccmvac.write(mva); + } + } + + /// D-cache clean by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dccsw(&self, set: u16, way: u16) { + // See comment for dcisw() about the format here + unsafe { + self.dccsw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) + | (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS), + ); + } + } + + /// D-cache clean and invalidate by MVA to PoC + #[inline(always)] + pub fn dccimvac(&self, mva: u32) { + unsafe { + self.dccimvac.write(mva); + } + } + + /// D-cache clean and invalidate by set-way + /// + /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. + #[inline(always)] + pub fn dccisw(&self, set: u16, way: u16) { + // See comment for dcisw() about the format here + unsafe { + self.dccisw.write( + (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) + | (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS), + ); + } + } + + /// Branch predictor invalidate all + #[inline(always)] + pub fn bpiall(&self) { + unsafe { + self.bpiall.write(0); + } + } +} diff --git a/src/peripheral/cpuid.rs b/src/peripheral/cpuid.rs new file mode 100644 index 00000000..f0b7e6ec --- /dev/null +++ b/src/peripheral/cpuid.rs @@ -0,0 +1,84 @@ +//! CPUID + +use volatile_register::RO; +#[cfg(any(armv7m, test))] +use volatile_register::RW; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// CPUID base + pub base: RO, + reserved0: [u32; 15], + /// Processor Feature + pub pfr: [RO; 2], + /// Debug Feature + pub dfr: RO, + /// Auxiliary Feature + pub afr: RO, + /// Memory Model Feature + pub mmfr: [RO; 4], + /// Instruction Set Attribute + pub isar: [RO; 5], + reserved1: u32, + /// Cache Level ID + #[cfg(any(armv7m, test))] + pub clidr: RO, + /// Cache Type + #[cfg(any(armv7m, test))] + pub ctr: RO, + /// Cache Size ID + #[cfg(any(armv7m, test))] + pub ccsidr: RO, + /// Cache Size Selection + #[cfg(any(armv7m, test))] + pub csselr: RW, +} + +/// Type of cache to select on CSSELR writes. +#[cfg(armv7m)] +pub enum CsselrCacheType { + /// Select DCache or unified cache + DataOrUnified = 0, + /// Select ICache + Instruction = 1, +} + +#[cfg(armv7m)] +impl RegisterBlock { + /// Selects the current CCSIDR + /// + /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2 + /// * `ind`: select instruction cache or data/unified cache + /// + /// `level` is masked to be between 0 and 7. + pub fn select_cache(&self, level: u8, ind: CsselrCacheType) { + const CSSELR_IND_POS: u32 = 0; + const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS; + const CSSELR_LEVEL_POS: u32 = 1; + const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS; + + unsafe { + self.csselr.write( + (((level as u32) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK) + | (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK), + ) + } + } + + /// Returns the number of sets and ways in the selected cache + pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) { + const CCSIDR_NUMSETS_POS: u32 = 13; + const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS; + const CCSIDR_ASSOCIATIVITY_POS: u32 = 3; + const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS; + + self.select_cache(level, ind); + ::asm::dsb(); + let ccsidr = self.ccsidr.read(); + ( + (1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16, + (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16, + ) + } +} diff --git a/src/peripheral/dcb.rs b/src/peripheral/dcb.rs new file mode 100644 index 00000000..02ec901b --- /dev/null +++ b/src/peripheral/dcb.rs @@ -0,0 +1,16 @@ +//! Debug Control Block + +use volatile_register::{RW, WO}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Debug Halting Control and Status + pub dhcsr: RW, + /// Debug Core Register Selector + pub dcrsr: WO, + /// Debug Core Register Data + pub dcrdr: RW, + /// Debug Exception and Monitor Control + pub demcr: RW, +} diff --git a/src/peripheral/dwt.rs b/src/peripheral/dwt.rs new file mode 100644 index 00000000..b716369c --- /dev/null +++ b/src/peripheral/dwt.rs @@ -0,0 +1,50 @@ +//! Data Watchpoint and Trace unit + +use volatile_register::{RO, RW, WO}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Control + pub ctrl: RW, + /// Cycle Count + pub cyccnt: RW, + /// CPI Count + pub cpicnt: RW, + /// Exception Overhead Count + pub exccnt: RW, + /// Sleep Count + pub sleepcnt: RW, + /// LSU Count + pub lsucnt: RW, + /// Folded-instruction Count + pub foldcnt: RW, + /// Program Counter Sample + pub pcsr: RO, + /// Comparators + pub c: [Comparator; 16], + reserved: [u32; 932], + /// Lock Access + pub lar: WO, + /// Lock Status + pub lsr: RO, +} + +impl RegisterBlock { + /// Enables the cycle counter + pub fn enable_cycle_counter(&self) { + unsafe { self.ctrl.modify(|r| r | 1) } + } +} + +/// Comparator +#[repr(C)] +pub struct Comparator { + /// Comparator + pub comp: RW, + /// Comparator Mask + pub mask: RW, + /// Comparator Function + pub function: RW, + reserved: u32, +} diff --git a/src/peripheral/fpb.rs b/src/peripheral/fpb.rs new file mode 100644 index 00000000..0da2d5d1 --- /dev/null +++ b/src/peripheral/fpb.rs @@ -0,0 +1,19 @@ +//! Flash Patch and Breakpoint unit + +use volatile_register::{RO, RW, WO}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Control + pub ctrl: RW, + /// Remap + pub remap: RW, + /// Comparator + pub comp: [RW; 127], + reserved: [u32; 875], + /// Lock Access + pub lar: WO, + /// Lock Status + pub lsr: RO, +} diff --git a/src/peripheral/fpu.rs b/src/peripheral/fpu.rs new file mode 100644 index 00000000..ada8b7a9 --- /dev/null +++ b/src/peripheral/fpu.rs @@ -0,0 +1,22 @@ +//! Floating Point Unit + +#[cfg(any(has_fpu, test))] +use volatile_register::{RO, RW}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + reserved: u32, + /// Floating Point Context Control + #[cfg(any(has_fpu, test))] + pub fpccr: RW, + /// Floating Point Context Address + #[cfg(any(has_fpu, test))] + pub fpcar: RW, + /// Floating Point Default Status Control + #[cfg(any(has_fpu, test))] + pub fpdscr: RW, + /// Media and FP Feature + #[cfg(any(has_fpu, test))] + pub mvfr: [RO; 3], +} diff --git a/src/peripheral/itm.rs b/src/peripheral/itm.rs new file mode 100644 index 00000000..17cf869b --- /dev/null +++ b/src/peripheral/itm.rs @@ -0,0 +1,54 @@ +//! Instrumentation Trace Macrocell + +use core::cell::UnsafeCell; +use core::ptr; + +use volatile_register::{RO, RW, WO}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Stimulus Port + pub stim: [Stim; 256], + reserved0: [u32; 640], + /// Trace Enable + pub ter: [RW; 8], + reserved1: [u32; 8], + /// Trace Privilege + pub tpr: RW, + reserved2: [u32; 15], + /// Trace Control + pub tcr: RW, + reserved3: [u32; 75], + /// Lock Access + pub lar: WO, + /// Lock Status + pub lsr: RO, +} + +/// Stimulus Port +pub struct Stim { + register: UnsafeCell, +} + +impl Stim { + /// Writes an `u8` payload into the stimulus port + pub fn write_u8(&self, value: u8) { + unsafe { ptr::write_volatile(self.register.get() as *mut u8, value) } + } + + /// Writes an `u16` payload into the stimulus port + pub fn write_u16(&self, value: u16) { + unsafe { ptr::write_volatile(self.register.get() as *mut u16, value) } + } + + /// Writes an `u32` payload into the stimulus port + pub fn write_u32(&self, value: u32) { + unsafe { ptr::write_volatile(self.register.get(), value) } + } + + /// Returns `true` if the stimulus port is ready to accept more data + pub fn is_fifo_ready(&self) -> bool { + unsafe { ptr::read_volatile(self.register.get()) == 1 } + } +} diff --git a/src/peripheral/mod.rs b/src/peripheral/mod.rs index 0eb03b1b..74823987 100644 --- a/src/peripheral/mod.rs +++ b/src/peripheral/mod.rs @@ -4,1088 +4,372 @@ //! //! - ARMv7-M Architecture Reference Manual (Issue E.b) - Chapter B3 -use core::cell::UnsafeCell; -use core::ptr; - -pub use bare_metal::Peripheral; -use volatile_register::{RO, RW, WO}; - -use interrupt::Nr; - -#[cfg(test)] -mod test; +// TODO stand-alone registers: ICTR, ACTLR and STIR -/// CPUID -pub const CPUID: Peripheral = unsafe { Peripheral::new(0xE000_ED00) }; +#![allow(private_no_mangle_statics)] -/// Debug Control Block -pub const DCB: Peripheral = unsafe { Peripheral::new(0xE000_EDF0) }; +use core::marker::PhantomData; +use core::ops::Deref; -/// Data Watchpoint and Trace unit -pub const DWT: Peripheral = unsafe { Peripheral::new(0xE000_1000) }; +use interrupt; -/// Flash Patch and Breakpoint unit -pub const FPB: Peripheral = unsafe { Peripheral::new(0xE000_2000) }; - -/// Floating Point Unit -pub const FPU: Peripheral = unsafe { Peripheral::new(0xE000_EF30) }; +#[cfg(armv7m)] +pub mod cbp; +pub mod cpuid; +pub mod dcb; +pub mod dwt; +pub mod fpb; +pub mod fpu; +pub mod itm; +pub mod mpu; +pub mod nvic; +pub mod scb; +pub mod syst; +pub mod tpiu; -/// Instrumentation Trace Macrocell -pub const ITM: Peripheral = unsafe { Peripheral::new(0xE000_0000) }; +#[cfg(test)] +mod test; -/// Memory Protection Unit -pub const MPU: Peripheral = unsafe { Peripheral::new(0xE000_ED90) }; +// NOTE the `PhantomData` used in the peripherals proxy is to make them `Send` but *not* `Sync` + +/// Core peripherals +#[allow(non_snake_case)] +pub struct Peripherals { + /// Cache and branch predictor maintenance operations + #[cfg(armv7m)] + pub CBP: CBP, + /// CPUID + pub CPUID: CPUID, + /// Debug Control Block + pub DCB: DCB, + /// Data Watchpoint and Trace unit + pub DWT: DWT, + /// Flash Patch and Breakpoint unit + pub FPB: FPB, + /// Floating Point Unit + pub FPU: FPU, + /// Instrumentation Trace Macrocell + pub ITM: ITM, + /// Memory Protection Unit + pub MPU: MPU, + /// Nested Vector Interrupt Controller + pub NVIC: NVIC, + /// System Control Block + pub SCB: SCB, + /// SysTick: System Timer + pub SYST: SYST, + /// Trace Port Interface Unit; + pub TPIU: TPIU, +} -/// Nested Vector Interrupt Controller -pub const NVIC: Peripheral = unsafe { Peripheral::new(0xE000_E100) }; +// NOTE `no_mangle` is used here to prevent linking different minor versions of this crate as that +// would let you `take` the core peripherals more than once (one per minor version) +#[no_mangle] +static mut CORE_PERIPHERALS: bool = false; + +impl Peripherals { + /// Returns all the core peripherals *once* + pub fn take() -> Option { + interrupt::free(|_| { + if unsafe { CORE_PERIPHERALS } { + None + } else { + Some(unsafe { Peripherals::steal() }) + } + }) + } -/// System Control Block -pub const SCB: Peripheral = unsafe { Peripheral::new(0xE000_ED04) }; + #[doc(hidden)] + pub unsafe fn steal() -> Self { + debug_assert!(!CORE_PERIPHERALS); -/// SysTick: System Timer -pub const SYST: Peripheral = unsafe { Peripheral::new(0xE000_E010) }; + CORE_PERIPHERALS = true; -/// Trace Port Interface Unit; -pub const TPIU: Peripheral = unsafe { Peripheral::new(0xE004_0000) }; + Peripherals { + #[cfg(armv7m)] + CBP: CBP { + _marker: PhantomData, + }, + CPUID: CPUID { + _marker: PhantomData, + }, + DCB: DCB { + _marker: PhantomData, + }, + DWT: DWT { + _marker: PhantomData, + }, + FPB: FPB { + _marker: PhantomData, + }, + FPU: FPU { + _marker: PhantomData, + }, + ITM: ITM { + _marker: PhantomData, + }, + MPU: MPU { + _marker: PhantomData, + }, + NVIC: NVIC { + _marker: PhantomData, + }, + SCB: SCB { + _marker: PhantomData, + }, + SYST: SYST { + _marker: PhantomData, + }, + TPIU: TPIU { + _marker: PhantomData, + }, + } + } +} /// Cache and branch predictor maintenance operations #[cfg(armv7m)] -pub const CBP: Peripheral = unsafe { Peripheral::new(0xE000_EF50) }; - -// TODO stand-alone registers: ICTR, ACTLR and STIR - -/// CPUID register block -#[repr(C)] -pub struct CPUID { - /// CPUID base - pub base: RO, - reserved0: [u32; 15], - /// Processor Feature - pub pfr: [RO; 2], - /// Debug Feature - pub dfr: RO, - /// Auxiliary Feature - pub afr: RO, - /// Memory Model Feature - pub mmfr: [RO; 4], - /// Instruction Set Attribute - pub isar: [RO; 5], - reserved1: u32, - /// Cache Level ID - #[cfg(any(armv7m, test))] - pub clidr: RO, - /// Cache Type - #[cfg(any(armv7m, test))] - pub ctr: RO, - /// Cache Size ID - #[cfg(any(armv7m, test))] - pub ccsidr: RO, - /// Cache Size Selection - #[cfg(any(armv7m, test))] - pub csselr: RW, +pub struct CBP { + _marker: PhantomData<*const ()>, } -/// Type of cache to select on CSSELR writes. #[cfg(armv7m)] -pub enum CsselrCacheType { - /// Select DCache or unified cache - DataOrUnified = 0, - /// Select ICache - Instruction = 1, +impl CBP { + /// Returns a pointer to the register block + pub fn ptr() -> *const self::cbp::RegisterBlock { + 0xE000_EF50 as *const _ + } } #[cfg(armv7m)] -impl CPUID { - /// Selects the current CCSIDR - /// - /// * `level`: the required cache level minus 1, e.g. 0 for L1, 1 for L2 - /// * `ind`: select instruction cache or data/unified cache - /// - /// `level` is masked to be between 0 and 7. - pub fn select_cache(&self, level: u8, ind: CsselrCacheType) { - const CSSELR_IND_POS: u32 = 0; - const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS; - const CSSELR_LEVEL_POS: u32 = 1; - const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS; +unsafe impl Send for CBP {} - unsafe { self.csselr.write( - (((level as u32) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK) | - (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK) - )} - } - - /// Returns the number of sets and ways in the selected cache - pub fn cache_num_sets_ways(&self, level: u8, ind: CsselrCacheType) -> (u16, u16) { - const CCSIDR_NUMSETS_POS: u32 = 13; - const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS; - const CCSIDR_ASSOCIATIVITY_POS: u32 = 3; - const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS; +#[cfg(armv7m)] +impl Deref for CBP { + type Target = self::cbp::RegisterBlock; - self.select_cache(level, ind); - ::asm::dsb(); - let ccsidr = self.ccsidr.read(); - ((1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16, - (1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16) + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } } -/// DCB register block -#[repr(C)] -pub struct DCB { - /// Debug Halting Control and Status - pub dhcsr: RW, - /// Debug Core Register Selector - pub dcrsr: WO, - /// Debug Core Register Data - pub dcrdr: RW, - /// Debug Exception and Monitor Control - pub demcr: RW, -} - -/// DWT register block -#[repr(C)] -pub struct DWT { - /// Control - pub ctrl: RW, - /// Cycle Count - pub cyccnt: RW, - /// CPI Count - pub cpicnt: RW, - /// Exception Overhead Count - pub exccnt: RW, - /// Sleep Count - pub sleepcnt: RW, - /// LSU Count - pub lsucnt: RW, - /// Folded-instruction Count - pub foldcnt: RW, - /// Program Counter Sample - pub pcsr: RO, - /// Comparators - pub c: [Comparator; 16], - reserved: [u32; 932], - /// Lock Access - pub lar: WO, - /// Lock Status - pub lsr: RO, +/// CPUID +pub struct CPUID { + _marker: PhantomData<*const ()>, } -impl DWT { - /// Enables the cycle counter - pub fn enable_cycle_counter(&self) { - unsafe { self.ctrl.modify(|r| r | 1) } +impl CPUID { + /// Returns a pointer to the register block + pub fn ptr() -> *const self::cpuid::RegisterBlock { + 0xE000_ED00 as *const _ } } -/// Comparator -#[repr(C)] -pub struct Comparator { - /// Comparator - pub comp: RW, - /// Comparator Mask - pub mask: RW, - /// Comparator Function - pub function: RW, - reserved: u32, -} +impl Deref for CPUID { + type Target = self::cpuid::RegisterBlock; -/// FPB register block -#[repr(C)] -pub struct FPB { - /// Control - pub ctrl: RW, - /// Remap - pub remap: RW, - /// Comparator - pub comp: [RW; 127], - reserved: [u32; 875], - /// Lock Access - pub lar: WO, - /// Lock Status - pub lsr: RO, -} - -/// FPU register block -#[repr(C)] -pub struct FPU { - reserved: u32, - /// Floating Point Context Control - #[cfg(any(has_fpu, test))] - pub fpccr: RW, - /// Floating Point Context Address - #[cfg(any(has_fpu, test))] - pub fpcar: RW, - /// Floating Point Default Status Control - #[cfg(any(has_fpu, test))] - pub fpdscr: RW, - /// Media and FP Feature - #[cfg(any(has_fpu, test))] - pub mvfr: [RO; 3], -} - -/// ITM register block -#[repr(C)] -pub struct ITM { - /// Stimulus Port - pub stim: [Stim; 256], - reserved0: [u32; 640], - /// Trace Enable - pub ter: [RW; 8], - reserved1: [u32; 8], - /// Trace Privilege - pub tpr: RW, - reserved2: [u32; 15], - /// Trace Control - pub tcr: RW, - reserved3: [u32; 75], - /// Lock Access - pub lar: WO, - /// Lock Status - pub lsr: RO, + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } + } } -/// Stimulus Port -pub struct Stim { - register: UnsafeCell, +/// Debug Control Block +pub struct DCB { + _marker: PhantomData<*const ()>, } -impl Stim { - /// Writes an `u8` payload into the stimulus port - pub fn write_u8(&self, value: u8) { - unsafe { ptr::write_volatile(self.register.get() as *mut u8, value) } - } - - /// Writes an `u16` payload into the stimulus port - pub fn write_u16(&self, value: u16) { - unsafe { ptr::write_volatile(self.register.get() as *mut u16, value) } +impl DCB { + /// Returns a pointer to the register block + pub fn ptr() -> *const dcb::RegisterBlock { + 0xE000_EDF0 as *const _ } +} - /// Writes an `u32` payload into the stimulus port - pub fn write_u32(&self, value: u32) { - unsafe { ptr::write_volatile(self.register.get(), value) } - } +impl Deref for DCB { + type Target = self::dcb::RegisterBlock; - /// Returns `true` if the stimulus port is ready to accept more data - pub fn is_fifo_ready(&self) -> bool { - unsafe { ptr::read_volatile(self.register.get()) == 1 } + fn deref(&self) -> &Self::Target { + unsafe { &*DCB::ptr() } } } -/// MPU register block -#[repr(C)] -pub struct MPU { - /// Type - pub _type: RO, - /// Control - pub ctrl: RW, - /// Region Number - pub rnr: RW, - /// Region Base Address - pub rbar: RW, - /// Region Attribute and Size - pub rasr: RW, - /// Alias 1 of RBAR - pub rbar_a1: RW, - /// Alias 1 of RSAR - pub rsar_a1: RW, - /// Alias 2 of RBAR - pub rbar_a2: RW, - /// Alias 2 of RSAR - pub rsar_a2: RW, - /// Alias 3 of RBAR - pub rbar_a3: RW, - /// Alias 3 of RSAR - pub rsar_a3: RW, -} - -/// NVIC register block -#[repr(C)] -pub struct NVIC { - /// Interrupt Set-Enable - pub iser: [RW; 8], - reserved0: [u32; 24], - /// Interrupt Clear-Enable - pub icer: [RW; 8], - reserved1: [u32; 24], - /// Interrupt Set-Pending - pub ispr: [RW; 8], - reserved2: [u32; 24], - /// Interrupt Clear-Pending - pub icpr: [RW; 8], - reserved3: [u32; 24], - /// Interrupt Active Bit - pub iabr: [RO; 8], - reserved4: [u32; 56], - /// Interrupt Priority - pub ipr: [RW; 240], +/// Data Watchpoint and Trace unit +pub struct DWT { + _marker: PhantomData<*const ()>, } -impl NVIC { - /// Clears `interrupt`'s pending state - pub fn clear_pending(&self, interrupt: I) - where - I: Nr, - { - let nr = interrupt.nr(); - - unsafe { self.icpr[usize::from(nr / 32)].write(1 << (nr % 32)) } +impl DWT { + /// Returns a pointer to the register block + pub fn ptr() -> *const dwt::RegisterBlock { + 0xE000_1000 as *const _ } +} - /// Disables `interrupt` - pub fn disable(&self, interrupt: I) - where - I: Nr, - { - let nr = interrupt.nr(); +impl Deref for DWT { + type Target = self::dwt::RegisterBlock; - unsafe { self.icer[usize::from(nr / 32)].write(1 << (nr % 32)) } + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } +} - /// Enables `interrupt` - pub fn enable(&self, interrupt: I) - where - I: Nr, - { - let nr = interrupt.nr(); +/// Flash Patch and Breakpoint unit +pub struct FPB { + _marker: PhantomData<*const ()>, +} - unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) } +impl FPB { + /// Returns a pointer to the register block + pub fn ptr() -> *const fpb::RegisterBlock { + 0xE000_2000 as *const _ } +} - /// Gets the "priority" of `interrupt` - /// - /// NOTE NVIC encodes priority in the highest bits of a byte so values like - /// `1` and `2` have the same priority. Also for NVIC priorities, a lower - /// value (e.g. `16`) has higher priority than a larger value (e.g. `32`). - pub fn get_priority(&self, interrupt: I) -> u8 - where - I: Nr, - { - let nr = interrupt.nr(); +impl Deref for FPB { + type Target = self::fpb::RegisterBlock; - self.ipr[usize::from(nr)].read() + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } +} - /// Is `interrupt` active or pre-empted and stacked - pub fn is_active(&self, interrupt: I) -> bool - where - I: Nr, - { - let nr = interrupt.nr(); - let mask = 1 << (nr % 32); +/// Floating Point Unit +pub struct FPU { + _marker: PhantomData<*const ()>, +} - (self.iabr[usize::from(nr / 32)].read() & mask) == mask +impl FPU { + /// Returns a pointer to the register block + pub fn ptr() -> *const fpu::RegisterBlock { + 0xE000_EF30 as *const _ } +} - /// Checks if `interrupt` is enabled - pub fn is_enabled(&self, interrupt: I) -> bool - where - I: Nr, - { - let nr = interrupt.nr(); - let mask = 1 << (nr % 32); +#[cfg(any(has_fpu, test))] +impl Deref for FPU { + type Target = self::fpu::RegisterBlock; - (self.iser[usize::from(nr / 32)].read() & mask) == mask + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } +} - /// Checks if `interrupt` is pending - pub fn is_pending(&self, interrupt: I) -> bool - where - I: Nr, - { - let nr = interrupt.nr(); - let mask = 1 << (nr % 32); +/// Instrumentation Trace Macrocell +pub struct ITM { + _marker: PhantomData<*const ()>, +} - (self.ispr[usize::from(nr / 32)].read() & mask) == mask +impl ITM { + /// Returns a pointer to the register block + pub fn ptr() -> *const itm::RegisterBlock { + 0xE000_0000 as *const _ } +} - /// Forces `interrupt` into pending state - pub fn set_pending(&self, interrupt: I) - where - I: Nr, - { - let nr = interrupt.nr(); +impl Deref for ITM { + type Target = self::itm::RegisterBlock; - unsafe { self.ispr[usize::from(nr / 32)].write(1 << (nr % 32)) } + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } +} - /// Sets the "priority" of `interrupt` to `prio` - /// - /// NOTE See `get_priority` method for an explanation of how NVIC priorities - /// work. - pub unsafe fn set_priority(&self, interrupt: I, prio: u8) - where - I: Nr, - { - let nr = interrupt.nr(); +/// Memory Protection Unit +pub struct MPU { + _marker: PhantomData<*const ()>, +} - self.ipr[usize::from(nr)].write(prio) +impl MPU { + /// Returns a pointer to the register block + pub fn ptr() -> *const mpu::RegisterBlock { + 0xE000_ED90 as *const _ } } -/// SCB register block -#[repr(C)] -pub struct SCB { - /// Interrupt Control and State - pub icsr: RW, - /// Vector Table Offset - pub vtor: RW, - /// Application Interrupt and Reset Control - pub aircr: RW, - /// System Control - pub scr: RW, - /// Configuration and Control - pub ccr: RW, - /// System Handler Priority - pub shpr: [RW; 12], - /// System Handler Control and State - pub shpcrs: RW, - /// Configurable Fault Status - pub cfsr: RW, - /// HardFault Status - pub hfsr: RW, - /// Debug Fault Status - pub dfsr: RW, - /// MemManage Fault Address - pub mmar: RW, - /// BusFault Address - pub bfar: RW, - /// Auxiliary Fault Status - pub afsr: RW, - reserved: [u32; 18], - /// Coprocessor Access Control - pub cpacr: RW, -} +impl Deref for MPU { + type Target = self::mpu::RegisterBlock; -/// FPU access mode -#[cfg(has_fpu)] -#[derive(Clone, Copy, Debug)] -pub enum FpuAccessMode { - /// FPU is not accessible - Disabled, - /// FPU is accessible in Privileged and User mode - Enabled, - /// FPU is accessible in Privileged mode only - Privileged, + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } + } } -#[cfg(has_fpu)] -mod fpu_consts { - pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20; - pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20; - pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20; +/// Nested Vector Interrupt Controller +pub struct NVIC { + _marker: PhantomData<*const ()>, } -#[cfg(has_fpu)] -use self::fpu_consts::*; - -#[cfg(has_fpu)] -impl SCB { - /// Gets FPU access mode - pub fn fpu_access_mode(&self) -> FpuAccessMode { - let cpacr = self.cpacr.read(); - if cpacr & SCB_CPACR_FPU_MASK == - SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER - { - FpuAccessMode::Enabled - } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE { - FpuAccessMode::Privileged - } else { - FpuAccessMode::Disabled - } - } - - /// Sets FPU access mode - pub fn set_fpu_access_mode(&self, mode: FpuAccessMode) { - let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK; - match mode { - FpuAccessMode::Disabled => (), - FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE, - FpuAccessMode::Enabled => { - cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER - } - } - unsafe { self.cpacr.write(cpacr) } +impl NVIC { + /// Returns a pointer to the register block + pub fn ptr() -> *const nvic::RegisterBlock { + 0xE000_E100 as *const _ } +} - /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)` - pub fn enable_fpu(&self) { - self.set_fpu_access_mode(FpuAccessMode::Enabled) - } +impl Deref for NVIC { + type Target = self::nvic::RegisterBlock; - /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)` - pub fn disable_fpu(&self) { - self.set_fpu_access_mode(FpuAccessMode::Disabled) + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } } -#[cfg(armv7m)] -mod scb_consts { - pub const SCB_CCR_IC_MASK: u32 = (1<<17); - pub const SCB_CCR_DC_MASK: u32 = (1<<16); +/// System Control Block +pub struct SCB { + _marker: PhantomData<*const ()>, } -#[cfg(armv7m)] -use self::scb_consts::*; - -#[cfg(armv7m)] impl SCB { - /// Enables I-Cache if currently disabled - #[inline] - pub fn enable_icache(&self) { - // Don't do anything if ICache is already enabled - if self.icache_enabled() { - return; - } - - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Invalidate I-Cache - cbp.iciallu(); - - // Enable I-Cache - unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) }; - - ::asm::dsb(); - ::asm::isb(); - } - - /// Disables I-Cache if currently enabled - #[inline] - pub fn disable_icache(&self) { - // Don't do anything if ICache is already disabled - if !self.icache_enabled() { - return; - } - - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Disable I-Cache - unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; - - // Invalidate I-Cache - cbp.iciallu(); - - ::asm::dsb(); - ::asm::isb(); - } - - /// Returns whether the I-Cache is currently enabled - #[inline] - pub fn icache_enabled(&self) -> bool { - ::asm::dsb(); - ::asm::isb(); - self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK - } - - /// Invalidates I-Cache - #[inline] - pub fn invalidate_icache(&self) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Invalidate I-Cache - cbp.iciallu(); - - ::asm::dsb(); - ::asm::isb(); - } - - /// Enables D-cache if currently disabled - #[inline] - pub fn enable_dcache(&self, cpuid: &CPUID) { - // Don't do anything if DCache is already enabled - if self.dcache_enabled() { - return; - } - - // Invalidate anything currently in the DCache - self.invalidate_dcache(cpuid); - - // Now turn on the DCache - unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) }; - - ::asm::dsb(); - ::asm::isb(); - } - - /// Disables D-cache if currently enabled - #[inline] - pub fn disable_dcache(&self, cpuid: &CPUID) { - // Don't do anything if DCache is already disabled - if !self.dcache_enabled() { - return; - } - - // Turn off the DCache - unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) }; - - // Clean and invalidate whatever was left in it - self.clean_invalidate_dcache(cpuid); - } - - /// Returns whether the D-Cache is currently enabled - #[inline] - pub fn dcache_enabled(&self) -> bool { - ::asm::dsb(); - ::asm::isb(); - self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK - } - - /// Invalidates D-cache - /// - /// Note that calling this while the dcache is enabled will probably wipe out your - /// stack, depending on optimisations, breaking returning to the call point. - /// It's used immediately before enabling the dcache, but not exported publicly. - #[inline] - fn invalidate_dcache(&self, cpuid: &CPUID) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Read number of sets and ways - let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); - - // Invalidate entire D-Cache - for set in 0..sets { - for way in 0..ways { - cbp.dcisw(set, way); - } - } - - ::asm::dsb(); - ::asm::isb(); - } - - /// Cleans D-cache - #[inline] - pub fn clean_dcache(&self, cpuid: &CPUID) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Read number of sets and ways - let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); - - for set in 0..sets { - for way in 0..ways { - cbp.dccsw(set, way); - } - } - - ::asm::dsb(); - ::asm::isb(); - } - - /// Cleans and invalidates D-cache - #[inline] - pub fn clean_invalidate_dcache(&self, cpuid: &CPUID) { - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - // Read number of sets and ways - let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); - - for set in 0..sets { - for way in 0..ways { - cbp.dccisw(set, way); - } - } - - ::asm::dsb(); - ::asm::isb(); - } - - /// Invalidates D-cache by address - /// - /// `addr`: the address to invalidate - /// `size`: size of the memory block, in number of bytes - /// - /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, - /// in blocks of 32 bytes until at least `size` bytes have been invalidated. - #[inline] - pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) { - // No-op zero sized operations - if size == 0 { - return; - } - - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - ::asm::dsb(); - - // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M - const LINESIZE: usize = 32; - let num_lines = ((size - 1)/LINESIZE) + 1; - - let mut addr = addr & 0xFFFF_FFE0; - - for _ in 0..num_lines { - cbp.dcimvac(addr as u32); - addr += LINESIZE; - } - - ::asm::dsb(); - ::asm::isb(); - } - - /// Cleans D-cache by address - /// - /// `addr`: the address to clean - /// `size`: size of the memory block, in number of bytes - /// - /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, - /// in blocks of 32 bytes until at least `size` bytes have been cleaned. - #[inline] - pub fn clean_dcache_by_address(&self, addr: usize, size: usize) { - // No-op zero sized operations - if size == 0 { - return; - } - - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - ::asm::dsb(); - - // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M - const LINESIZE: usize = 32; - let num_lines = ((size - 1)/LINESIZE) + 1; - - let mut addr = addr & 0xFFFF_FFE0; - - for _ in 0..num_lines { - cbp.dccmvac(addr as u32); - addr += LINESIZE; - } - - ::asm::dsb(); - ::asm::isb(); + /// Returns a pointer to the register block + pub fn ptr() -> *const scb::RegisterBlock { + 0xE000_ED04 as *const _ } +} - /// Cleans and invalidates D-cache by address - /// - /// `addr`: the address to clean and invalidate - /// `size`: size of the memory block, in number of bytes - /// - /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented - /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and - /// invalidated. - #[inline] - pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) { - // No-op zero sized operations - if size == 0 { - return; - } - - // All of CBP is write-only so no data races are possible - let cbp = unsafe { &mut *CBP.get() }; - - ::asm::dsb(); - - // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M - const LINESIZE: usize = 32; - let num_lines = ((size - 1)/LINESIZE) + 1; - - let mut addr = addr & 0xFFFF_FFE0; - - for _ in 0..num_lines { - cbp.dccimvac(addr as u32); - addr += LINESIZE; - } +impl Deref for SCB { + type Target = self::scb::RegisterBlock; - ::asm::dsb(); - ::asm::isb(); + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } } -/// SysTick register block -#[repr(C)] +/// SysTick: System Timer pub struct SYST { - /// Control and Status - pub csr: RW, - /// Reload Value - pub rvr: RW, - /// Current Value - pub cvr: RW, - /// Calibration Value - pub calib: RO, + _marker: PhantomData<*const ()>, } -/// SysTick clock source -#[derive(Clone, Copy, Debug)] -pub enum SystClkSource { - /// Core-provided clock - Core, - /// External reference clock - External, -} - -const SYST_COUNTER_MASK: u32 = 0x00ffffff; - -const SYST_CSR_ENABLE: u32 = 1 << 0; -const SYST_CSR_TICKINT: u32 = 1 << 1; -const SYST_CSR_CLKSOURCE: u32 = 1 << 2; -const SYST_CSR_COUNTFLAG: u32 = 1 << 16; - -const SYST_CALIB_SKEW: u32 = 1 << 30; -const SYST_CALIB_NOREF: u32 = 1 << 31; - impl SYST { - /// Checks if counter is enabled - pub fn is_counter_enabled(&self) -> bool { - self.csr.read() & SYST_CSR_ENABLE != 0 - } - - /// Enables counter - pub fn enable_counter(&self) { - unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) } - } - - /// Disables counter - pub fn disable_counter(&self) { - unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) } - } - - /// Checks if SysTick interrupt is enabled - pub fn is_interrupt_enabled(&self) -> bool { - self.csr.read() & SYST_CSR_TICKINT != 0 - } - - /// Enables SysTick interrupt - pub fn enable_interrupt(&self) { - unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) } - } - - /// Disables SysTick interrupt - pub fn disable_interrupt(&self) { - unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) } - } - - /// Gets clock source - pub fn get_clock_source(&self) -> SystClkSource { - let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0; - match clk_source_bit { - false => SystClkSource::External, - true => SystClkSource::Core, - } - } - - /// Sets clock source - pub fn set_clock_source(&self, clk_source: SystClkSource) { - match clk_source { - SystClkSource::External => unsafe { - self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) - }, - SystClkSource::Core => unsafe { - self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) - }, - } - } - - /// Checks if the counter wrapped (underflowed) since the last check - pub fn has_wrapped(&self) -> bool { - self.csr.read() & SYST_CSR_COUNTFLAG != 0 - } - - /// Gets reload value - pub fn get_reload(&self) -> u32 { - self.rvr.read() - } - - /// Sets reload value - /// - /// Valid values are between `1` and `0x00ffffff`. - pub fn set_reload(&self, value: u32) { - unsafe { self.rvr.write(value) } - } - - /// Gets current value - pub fn get_current(&self) -> u32 { - self.cvr.read() - } - - /// Clears current value to 0 - /// - /// After calling `clear_current()`, the next call to `has_wrapped()` - /// will return `false`. - pub fn clear_current(&self) { - unsafe { self.cvr.write(0) } - } - - /// Returns the reload value with which the counter would wrap once per 10 - /// ms - /// - /// Returns `0` if the value is not known (e.g. because the clock can - /// change dynamically). - pub fn get_ticks_per_10ms(&self) -> u32 { - self.calib.read() & SYST_COUNTER_MASK + /// Returns a pointer to the register block + pub fn ptr() -> *const syst::RegisterBlock { + 0xE000_E010 as *const _ } +} - /// Checks if the calibration value is precise - /// - /// Returns `false` if using the reload value returned by - /// `get_ticks_per_10ms()` may result in a period significantly deviating - /// from 10 ms. - pub fn is_precise(&self) -> bool { - self.calib.read() & SYST_CALIB_SKEW == 0 - } +impl Deref for SYST { + type Target = self::syst::RegisterBlock; - /// Checks if an external reference clock is available - pub fn has_reference_clock(&self) -> bool { - self.calib.read() & SYST_CALIB_NOREF == 0 + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } } -/// TPIU register block -#[repr(C)] +/// Trace Port Interface Unit; pub struct TPIU { - /// Supported Parallel Port Sizes - pub sspsr: RO, - /// Current Parallel Port Size - pub cspsr: RW, - reserved0: [u32; 2], - /// Asynchronous Clock Prescaler - pub acpr: RW, - reserved1: [u32; 55], - /// Selected Pin Control - pub sppr: RW, - reserved2: [u32; 132], - /// Formatter and Flush Control - pub ffcr: RW, - reserved3: [u32; 810], - /// Lock Access - pub lar: WO, - /// Lock Status - pub lsr: RO, - reserved4: [u32; 4], - /// TPIU Type - pub _type: RO, + _marker: PhantomData<*const ()>, } -/// Cache and branch predictor maintenance operations register block -#[repr(C)] -#[cfg(armv7m)] -pub struct CBP { - /// I-cache invalidate all to PoU - pub iciallu: WO, - reserved0: u32, - /// I-cache invalidate by MVA to PoU - pub icimvau: WO, - /// D-cache invalidate by MVA to PoC - pub dcimvac: WO, - /// D-cache invalidate by set-way - pub dcisw: WO, - /// D-cache clean by MVA to PoU - pub dccmvau: WO, - /// D-cache clean by MVA to PoC - pub dccmvac: WO, - /// D-cache clean by set-way - pub dccsw: WO, - /// D-cache clean and invalidate by MVA to PoC - pub dccimvac: WO, - /// D-cache clean and invalidate by set-way - pub dccisw: WO, - /// Branch predictor invalidate all - pub bpiall: WO, -} - -#[cfg(armv7m)] -mod cbp_consts { - pub const CBP_SW_WAY_POS: u32 = 30; - pub const CBP_SW_WAY_MASK: u32 = 0x3 << CBP_SW_WAY_POS; - pub const CBP_SW_SET_POS: u32 = 5; - pub const CBP_SW_SET_MASK: u32 = 0x1FF << CBP_SW_SET_POS; -} - -#[cfg(armv7m)] -use self::cbp_consts::*; - -#[cfg(armv7m)] -impl CBP { - /// I-cache invalidate all to PoU - #[inline(always)] - pub fn iciallu(&self) { - unsafe { self.iciallu.write(0); } - } - - /// I-cache invalidate by MVA to PoU - #[inline(always)] - pub fn icimvau(&self, mva: u32) { - unsafe { self.icimvau.write(mva); } - } - - /// D-cache invalidate by MVA to PoC - #[inline(always)] - pub fn dcimvac(&self, mva: u32) { - unsafe { self.dcimvac.write(mva); } - } - - /// D-cache invalidate by set-way - /// - /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline(always)] - pub fn dcisw(&self, set: u16, way: u16) { - // The ARMv7-M Architecture Reference Manual, as of Revision E.b, says these set/way - // operations have a register data format which depends on the implementation's - // associativity and number of sets. Specifically the 'way' and 'set' fields have - // offsets 32-log2(ASSOCIATIVITY) and log2(LINELEN) respectively. - // - // However, in Cortex-M7 devices, these offsets are fixed at 30 and 5, as per the Cortex-M7 - // Generic User Guide section 4.8.3. Since no other ARMv7-M implementations except the - // Cortex-M7 have a DCACHE or ICACHE at all, it seems safe to do the same thing as the - // CMSIS-Core implementation and use fixed values. - unsafe { self.dcisw.write( - (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | - (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); - } - } - - /// D-cache clean by MVA to PoU - #[inline(always)] - pub fn dccmvau(&self, mva: u32) { - unsafe { self.dccmvau.write(mva); } - } - - /// D-cache clean by MVA to PoC - #[inline(always)] - pub fn dccmvac(&self, mva: u32) { - unsafe { self.dccmvac.write(mva); } - } - - /// D-cache clean by set-way - /// - /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline(always)] - pub fn dccsw(&self, set: u16, way: u16) { - // See comment for dcisw() about the format here - unsafe { self.dccsw.write( - (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | - (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); - } - } - - /// D-cache clean and invalidate by MVA to PoC - #[inline(always)] - pub fn dccimvac(&self, mva: u32) { - unsafe { self.dccimvac.write(mva); } +impl TPIU { + /// Returns a pointer to the register block + pub fn ptr() -> *const tpiu::RegisterBlock { + 0xE004_0000 as *const _ } +} - /// D-cache clean and invalidate by set-way - /// - /// `set` is masked to be between 0 and 3, and `way` between 0 and 511. - #[inline(always)] - pub fn dccisw(&self, set: u16, way: u16) { - // See comment for dcisw() about the format here - unsafe { self.dccisw.write( - (((way as u32) & (CBP_SW_WAY_MASK >> CBP_SW_WAY_POS)) << CBP_SW_WAY_POS) | - (((set as u32) & (CBP_SW_SET_MASK >> CBP_SW_SET_POS)) << CBP_SW_SET_POS)); - } - } +impl Deref for TPIU { + type Target = self::tpiu::RegisterBlock; - /// Branch predictor invalidate all - #[inline(always)] - pub fn bpiall(&self) { - unsafe { self.bpiall.write(0); } + fn deref(&self) -> &Self::Target { + unsafe { &*Self::ptr() } } } diff --git a/src/peripheral/mpu.rs b/src/peripheral/mpu.rs new file mode 100644 index 00000000..09d06f08 --- /dev/null +++ b/src/peripheral/mpu.rs @@ -0,0 +1,30 @@ +//! Memory Protection Unit + +use volatile_register::{RO, RW}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Type + pub _type: RO, + /// Control + pub ctrl: RW, + /// Region Number + pub rnr: RW, + /// Region Base Address + pub rbar: RW, + /// Region Attribute and Size + pub rasr: RW, + /// Alias 1 of RBAR + pub rbar_a1: RW, + /// Alias 1 of RSAR + pub rsar_a1: RW, + /// Alias 2 of RBAR + pub rbar_a2: RW, + /// Alias 2 of RSAR + pub rsar_a2: RW, + /// Alias 3 of RBAR + pub rbar_a3: RW, + /// Alias 3 of RSAR + pub rsar_a3: RW, +} diff --git a/src/peripheral/nvic.rs b/src/peripheral/nvic.rs new file mode 100644 index 00000000..1154f388 --- /dev/null +++ b/src/peripheral/nvic.rs @@ -0,0 +1,129 @@ +//! Nested Vector Interrupt Controller + +use volatile_register::{RO, RW}; + +use interrupt::Nr; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Interrupt Set-Enable + pub iser: [RW; 8], + reserved0: [u32; 24], + /// Interrupt Clear-Enable + pub icer: [RW; 8], + reserved1: [u32; 24], + /// Interrupt Set-Pending + pub ispr: [RW; 8], + reserved2: [u32; 24], + /// Interrupt Clear-Pending + pub icpr: [RW; 8], + reserved3: [u32; 24], + /// Interrupt Active Bit + pub iabr: [RO; 8], + reserved4: [u32; 56], + /// Interrupt Priority + pub ipr: [RW; 240], +} + +impl RegisterBlock { + /// Clears `interrupt`'s pending state + pub fn clear_pending(&self, interrupt: I) + where + I: Nr, + { + let nr = interrupt.nr(); + + unsafe { self.icpr[usize::from(nr / 32)].write(1 << (nr % 32)) } + } + + /// Disables `interrupt` + pub fn disable(&self, interrupt: I) + where + I: Nr, + { + let nr = interrupt.nr(); + + unsafe { self.icer[usize::from(nr / 32)].write(1 << (nr % 32)) } + } + + /// Enables `interrupt` + pub fn enable(&self, interrupt: I) + where + I: Nr, + { + let nr = interrupt.nr(); + + unsafe { self.iser[usize::from(nr / 32)].write(1 << (nr % 32)) } + } + + /// Gets the "priority" of `interrupt` + /// + /// NOTE NVIC encodes priority in the highest bits of a byte so values like + /// `1` and `2` have the same priority. Also for NVIC priorities, a lower + /// value (e.g. `16`) has higher priority than a larger value (e.g. `32`). + pub fn get_priority(&self, interrupt: I) -> u8 + where + I: Nr, + { + let nr = interrupt.nr(); + + self.ipr[usize::from(nr)].read() + } + + /// Is `interrupt` active or pre-empted and stacked + pub fn is_active(&self, interrupt: I) -> bool + where + I: Nr, + { + let nr = interrupt.nr(); + let mask = 1 << (nr % 32); + + (self.iabr[usize::from(nr / 32)].read() & mask) == mask + } + + /// Checks if `interrupt` is enabled + pub fn is_enabled(&self, interrupt: I) -> bool + where + I: Nr, + { + let nr = interrupt.nr(); + let mask = 1 << (nr % 32); + + (self.iser[usize::from(nr / 32)].read() & mask) == mask + } + + /// Checks if `interrupt` is pending + pub fn is_pending(&self, interrupt: I) -> bool + where + I: Nr, + { + let nr = interrupt.nr(); + let mask = 1 << (nr % 32); + + (self.ispr[usize::from(nr / 32)].read() & mask) == mask + } + + /// Forces `interrupt` into pending state + pub fn set_pending(&self, interrupt: I) + where + I: Nr, + { + let nr = interrupt.nr(); + + unsafe { self.ispr[usize::from(nr / 32)].write(1 << (nr % 32)) } + } + + /// Sets the "priority" of `interrupt` to `prio` + /// + /// NOTE See `get_priority` method for an explanation of how NVIC priorities + /// work. + pub unsafe fn set_priority(&self, interrupt: I, prio: u8) + where + I: Nr, + { + let nr = interrupt.nr(); + + self.ipr[usize::from(nr)].write(prio) + } +} diff --git a/src/peripheral/scb.rs b/src/peripheral/scb.rs new file mode 100644 index 00000000..188f3b7a --- /dev/null +++ b/src/peripheral/scb.rs @@ -0,0 +1,381 @@ +//! System Control Block + +use volatile_register::RW; + +#[cfg(armv7m)] +use super::CBP; +#[cfg(armv7m)] +use super::cpuid::{self, CsselrCacheType}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Interrupt Control and State + pub icsr: RW, + /// Vector Table Offset + pub vtor: RW, + /// Application Interrupt and Reset Control + pub aircr: RW, + /// System Control + pub scr: RW, + /// Configuration and Control + pub ccr: RW, + /// System Handler Priority + pub shpr: [RW; 12], + /// System Handler Control and State + pub shpcrs: RW, + /// Configurable Fault Status + pub cfsr: RW, + /// HardFault Status + pub hfsr: RW, + /// Debug Fault Status + pub dfsr: RW, + /// MemManage Fault Address + pub mmar: RW, + /// BusFault Address + pub bfar: RW, + /// Auxiliary Fault Status + pub afsr: RW, + reserved: [u32; 18], + /// Coprocessor Access Control + pub cpacr: RW, +} + +/// FPU access mode +#[cfg(has_fpu)] +#[derive(Clone, Copy, Debug)] +pub enum FpuAccessMode { + /// FPU is not accessible + Disabled, + /// FPU is accessible in Privileged and User mode + Enabled, + /// FPU is accessible in Privileged mode only + Privileged, +} + +#[cfg(has_fpu)] +mod fpu_consts { + pub const SCB_CPACR_FPU_MASK: u32 = 0b11_11 << 20; + pub const SCB_CPACR_FPU_ENABLE: u32 = 0b01_01 << 20; + pub const SCB_CPACR_FPU_USER: u32 = 0b10_10 << 20; +} + +#[cfg(has_fpu)] +use self::fpu_consts::*; + +#[cfg(has_fpu)] +impl RegisterBlock { + /// Gets FPU access mode + pub fn fpu_access_mode(&self) -> FpuAccessMode { + let cpacr = self.cpacr.read(); + if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER { + FpuAccessMode::Enabled + } else if cpacr & SCB_CPACR_FPU_MASK == SCB_CPACR_FPU_ENABLE { + FpuAccessMode::Privileged + } else { + FpuAccessMode::Disabled + } + } + + /// Sets FPU access mode + pub fn set_fpu_access_mode(&self, mode: FpuAccessMode) { + let mut cpacr = self.cpacr.read() & !SCB_CPACR_FPU_MASK; + match mode { + FpuAccessMode::Disabled => (), + FpuAccessMode::Privileged => cpacr |= SCB_CPACR_FPU_ENABLE, + FpuAccessMode::Enabled => cpacr |= SCB_CPACR_FPU_ENABLE | SCB_CPACR_FPU_USER, + } + unsafe { self.cpacr.write(cpacr) } + } + + /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Enabled)` + pub fn enable_fpu(&self) { + self.set_fpu_access_mode(FpuAccessMode::Enabled) + } + + /// Shorthand for `set_fpu_access_mode(FpuAccessMode::Disabled)` + pub fn disable_fpu(&self) { + self.set_fpu_access_mode(FpuAccessMode::Disabled) + } +} + +#[cfg(armv7m)] +mod scb_consts { + pub const SCB_CCR_IC_MASK: u32 = (1 << 17); + pub const SCB_CCR_DC_MASK: u32 = (1 << 16); +} + +#[cfg(armv7m)] +use self::scb_consts::*; + +#[cfg(armv7m)] +impl RegisterBlock { + /// Enables I-Cache if currently disabled + #[inline] + pub fn enable_icache(&self) { + // Don't do anything if ICache is already enabled + if self.icache_enabled() { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Invalidate I-Cache + cbp.iciallu(); + + // Enable I-Cache + unsafe { self.ccr.modify(|r| r | SCB_CCR_IC_MASK) }; + + ::asm::dsb(); + ::asm::isb(); + } + + /// Disables I-Cache if currently enabled + #[inline] + pub fn disable_icache(&self) { + // Don't do anything if ICache is already disabled + if !self.icache_enabled() { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Disable I-Cache + unsafe { self.ccr.modify(|r| r & !SCB_CCR_IC_MASK) }; + + // Invalidate I-Cache + cbp.iciallu(); + + ::asm::dsb(); + ::asm::isb(); + } + + /// Returns whether the I-Cache is currently enabled + #[inline] + pub fn icache_enabled(&self) -> bool { + ::asm::dsb(); + ::asm::isb(); + self.ccr.read() & SCB_CCR_IC_MASK == SCB_CCR_IC_MASK + } + + /// Invalidates I-Cache + #[inline] + pub fn invalidate_icache(&self) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Invalidate I-Cache + cbp.iciallu(); + + ::asm::dsb(); + ::asm::isb(); + } + + /// Enables D-cache if currently disabled + #[inline] + pub fn enable_dcache(&self, cpuid: &cpuid::RegisterBlock) { + // Don't do anything if DCache is already enabled + if self.dcache_enabled() { + return; + } + + // Invalidate anything currently in the DCache + self.invalidate_dcache(cpuid); + + // Now turn on the DCache + unsafe { self.ccr.modify(|r| r | SCB_CCR_DC_MASK) }; + + ::asm::dsb(); + ::asm::isb(); + } + + /// Disables D-cache if currently enabled + #[inline] + pub fn disable_dcache(&self, cpuid: &cpuid::RegisterBlock) { + // Don't do anything if DCache is already disabled + if !self.dcache_enabled() { + return; + } + + // Turn off the DCache + unsafe { self.ccr.modify(|r| r & !SCB_CCR_DC_MASK) }; + + // Clean and invalidate whatever was left in it + self.clean_invalidate_dcache(cpuid); + } + + /// Returns whether the D-Cache is currently enabled + #[inline] + pub fn dcache_enabled(&self) -> bool { + ::asm::dsb(); + ::asm::isb(); + self.ccr.read() & SCB_CCR_DC_MASK == SCB_CCR_DC_MASK + } + + /// Invalidates D-cache + /// + /// Note that calling this while the dcache is enabled will probably wipe out your + /// stack, depending on optimisations, breaking returning to the call point. + /// It's used immediately before enabling the dcache, but not exported publicly. + #[inline] + fn invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + // Invalidate entire D-Cache + for set in 0..sets { + for way in 0..ways { + cbp.dcisw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans D-cache + #[inline] + pub fn clean_dcache(&self, cpuid: &cpuid::RegisterBlock) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + for set in 0..sets { + for way in 0..ways { + cbp.dccsw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans and invalidates D-cache + #[inline] + pub fn clean_invalidate_dcache(&self, cpuid: &cpuid::RegisterBlock) { + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + // Read number of sets and ways + let (sets, ways) = cpuid.cache_num_sets_ways(0, CsselrCacheType::DataOrUnified); + + for set in 0..sets { + for way in 0..ways { + cbp.dccisw(set, way); + } + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Invalidates D-cache by address + /// + /// `addr`: the address to invalidate + /// `size`: size of the memory block, in number of bytes + /// + /// Invalidates cache starting from the lowest 32-byte aligned address represented by `addr`, + /// in blocks of 32 bytes until at least `size` bytes have been invalidated. + #[inline] + pub fn invalidate_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1) / LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dcimvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans D-cache by address + /// + /// `addr`: the address to clean + /// `size`: size of the memory block, in number of bytes + /// + /// Cleans cache starting from the lowest 32-byte aligned address represented by `addr`, + /// in blocks of 32 bytes until at least `size` bytes have been cleaned. + #[inline] + pub fn clean_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1) / LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dccmvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } + + /// Cleans and invalidates D-cache by address + /// + /// `addr`: the address to clean and invalidate + /// `size`: size of the memory block, in number of bytes + /// + /// Cleans and invalidates cache starting from the lowest 32-byte aligned address represented + /// by `addr`, in blocks of 32 bytes until at least `size` bytes have been cleaned and + /// invalidated. + #[inline] + pub fn clean_invalidate_dcache_by_address(&self, addr: usize, size: usize) { + // No-op zero sized operations + if size == 0 { + return; + } + + // All of CBP is write-only so no data races are possible + let cbp = unsafe { &*CBP::ptr() }; + + ::asm::dsb(); + + // Cache lines are fixed to 32 bit on Cortex-M7 and not present in earlier Cortex-M + const LINESIZE: usize = 32; + let num_lines = ((size - 1) / LINESIZE) + 1; + + let mut addr = addr & 0xFFFF_FFE0; + + for _ in 0..num_lines { + cbp.dccimvac(addr as u32); + addr += LINESIZE; + } + + ::asm::dsb(); + ::asm::isb(); + } +} diff --git a/src/peripheral/syst.rs b/src/peripheral/syst.rs new file mode 100644 index 00000000..3f962086 --- /dev/null +++ b/src/peripheral/syst.rs @@ -0,0 +1,137 @@ +//! SysTick: System Timer + +use volatile_register::{RO, RW}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Control and Status + pub csr: RW, + /// Reload Value + pub rvr: RW, + /// Current Value + pub cvr: RW, + /// Calibration Value + pub calib: RO, +} + +/// SysTick clock source +#[derive(Clone, Copy, Debug)] +pub enum SystClkSource { + /// Core-provided clock + Core, + /// External reference clock + External, +} + +const SYST_COUNTER_MASK: u32 = 0x00ffffff; + +const SYST_CSR_ENABLE: u32 = 1 << 0; +const SYST_CSR_TICKINT: u32 = 1 << 1; +const SYST_CSR_CLKSOURCE: u32 = 1 << 2; +const SYST_CSR_COUNTFLAG: u32 = 1 << 16; + +const SYST_CALIB_SKEW: u32 = 1 << 30; +const SYST_CALIB_NOREF: u32 = 1 << 31; + +impl RegisterBlock { + /// Checks if counter is enabled + pub fn is_counter_enabled(&self) -> bool { + self.csr.read() & SYST_CSR_ENABLE != 0 + } + + /// Enables counter + pub fn enable_counter(&self) { + unsafe { self.csr.modify(|v| v | SYST_CSR_ENABLE) } + } + + /// Disables counter + pub fn disable_counter(&self) { + unsafe { self.csr.modify(|v| v & !SYST_CSR_ENABLE) } + } + + /// Checks if SysTick interrupt is enabled + pub fn is_interrupt_enabled(&self) -> bool { + self.csr.read() & SYST_CSR_TICKINT != 0 + } + + /// Enables SysTick interrupt + pub fn enable_interrupt(&self) { + unsafe { self.csr.modify(|v| v | SYST_CSR_TICKINT) } + } + + /// Disables SysTick interrupt + pub fn disable_interrupt(&self) { + unsafe { self.csr.modify(|v| v & !SYST_CSR_TICKINT) } + } + + /// Gets clock source + pub fn get_clock_source(&self) -> SystClkSource { + let clk_source_bit = self.csr.read() & SYST_CSR_CLKSOURCE != 0; + match clk_source_bit { + false => SystClkSource::External, + true => SystClkSource::Core, + } + } + + /// Sets clock source + pub fn set_clock_source(&self, clk_source: SystClkSource) { + match clk_source { + SystClkSource::External => unsafe { self.csr.modify(|v| v & !SYST_CSR_CLKSOURCE) }, + SystClkSource::Core => unsafe { self.csr.modify(|v| v | SYST_CSR_CLKSOURCE) }, + } + } + + /// Checks if the counter wrapped (underflowed) since the last check + pub fn has_wrapped(&self) -> bool { + self.csr.read() & SYST_CSR_COUNTFLAG != 0 + } + + /// Gets reload value + pub fn get_reload(&self) -> u32 { + self.rvr.read() + } + + /// Sets reload value + /// + /// Valid values are between `1` and `0x00ffffff`. + pub fn set_reload(&self, value: u32) { + unsafe { self.rvr.write(value) } + } + + /// Gets current value + pub fn get_current(&self) -> u32 { + self.cvr.read() + } + + /// Clears current value to 0 + /// + /// After calling `clear_current()`, the next call to `has_wrapped()` + /// will return `false`. + pub fn clear_current(&self) { + unsafe { self.cvr.write(0) } + } + + /// Returns the reload value with which the counter would wrap once per 10 + /// ms + /// + /// Returns `0` if the value is not known (e.g. because the clock can + /// change dynamically). + pub fn get_ticks_per_10ms(&self) -> u32 { + self.calib.read() & SYST_COUNTER_MASK + } + + /// Checks if the calibration value is precise + /// + /// Returns `false` if using the reload value returned by + /// `get_ticks_per_10ms()` may result in a period significantly deviating + /// from 10 ms. + pub fn is_precise(&self) -> bool { + self.calib.read() & SYST_CALIB_SKEW == 0 + } + + /// Checks if an external reference clock is available + pub fn has_reference_clock(&self) -> bool { + self.calib.read() & SYST_CALIB_NOREF == 0 + } +} diff --git a/src/peripheral/tpiu.rs b/src/peripheral/tpiu.rs new file mode 100644 index 00000000..7a08805f --- /dev/null +++ b/src/peripheral/tpiu.rs @@ -0,0 +1,29 @@ +//! Trace Port Interface Unit; + +use volatile_register::{RO, RW, WO}; + +/// Register block +#[repr(C)] +pub struct RegisterBlock { + /// Supported Parallel Port Sizes + pub sspsr: RO, + /// Current Parallel Port Size + pub cspsr: RW, + reserved0: [u32; 2], + /// Asynchronous Clock Prescaler + pub acpr: RW, + reserved1: [u32; 55], + /// Selected Pin Control + pub sppr: RW, + reserved2: [u32; 132], + /// Formatter and Flush Control + pub ffcr: RW, + reserved3: [u32; 810], + /// Lock Access + pub lar: WO, + /// Lock Status + pub lsr: RO, + reserved4: [u32; 4], + /// TPIU Type + pub _type: RO, +}