Skip to content

Instantly share code, notes, and snippets.

@mattico
Created October 28, 2021 15:30
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mattico/b2abbf63cb4bc2f434a81744d023add2 to your computer and use it in GitHub Desktop.
Save mattico/b2abbf63cb4bc2f434a81744d023add2 to your computer and use it in GitHub Desktop.
An example Multi-RTT Logger implementation for knurling-rs/defmt
use core::{
ptr,
sync::atomic::{AtomicUsize, Ordering},
};
// Split the 512KiB AXI SRAM for the lower 16 regular priorities.
pub const LARGE_SIZE: usize = 512 * 1024 / 16;
// Use smaller buffers for NMI/HardFault, which aren't likely to be used.
pub const SMALL_SIZE: usize = 512;
pub const MODE_MASK: usize = 0b11;
/// Block the application if the RTT buffer is full, wait for the host to read data.
pub const MODE_BLOCK_IF_FULL: usize = 2;
/// Don't block if the RTT buffer is full. Truncate data to output as much as fits.
pub const MODE_NON_BLOCKING_TRIM: usize = 1;
#[repr(C)]
pub(crate) struct Channel {
pub name: *const u8,
pub buffer: *mut u8,
pub size: usize,
pub write: AtomicUsize,
pub read: AtomicUsize,
/// Channel properties.
///
/// Currently, only the lowest 2 bits are used to set the channel mode (see constants above).
pub flags: AtomicUsize,
}
impl Channel {
pub const fn new(name: &'static [u8], buffer: *mut u8, large: bool) -> Self {
Channel {
name: name as *const _ as *const u8,
buffer,
size: if large { LARGE_SIZE } else { SMALL_SIZE },
write: AtomicUsize::new(0),
read: AtomicUsize::new(0),
flags: AtomicUsize::new(MODE_NON_BLOCKING_TRIM),
}
}
pub fn write_all(&self, mut bytes: &[u8]) {
// When the SIZE is a power of 2 the modulo can be converted into a bitwise AND, but only if the code is
// specialized for the specific SIZE.
let write = match (self.blocking_enabled(), self.size) {
(true, LARGE_SIZE) => Channel::blocking_write::<LARGE_SIZE>,
(true, SMALL_SIZE) => Channel::blocking_write::<SMALL_SIZE>,
(false, LARGE_SIZE) => Channel::nonblocking_write::<LARGE_SIZE>,
(false, SMALL_SIZE) => Channel::nonblocking_write::<SMALL_SIZE>,
_ => defmt::unreachable!(),
};
while !bytes.is_empty() {
let consumed = write(self, bytes);
if consumed != 0 {
bytes = &bytes[consumed..];
}
}
}
fn blocking_write<const SIZE: usize>(&self, bytes: &[u8]) -> usize {
if bytes.is_empty() {
return 0;
}
let read = self.read.load(Ordering::Relaxed);
let write = self.write.load(Ordering::Acquire);
let available = if read > write {
read - write - 1
} else if read == 0 {
SIZE - write - 1
} else {
SIZE - write
};
if available == 0 {
return 0;
}
let cursor = write;
let len = bytes.len().min(available);
unsafe {
if cursor + len > SIZE {
// split memcpy
let pivot = SIZE - cursor;
ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), pivot);
ptr::copy_nonoverlapping(bytes.as_ptr().add(pivot), self.buffer, len - pivot);
} else {
// single memcpy
ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), len);
}
}
self.write
.store(write.wrapping_add(len) % SIZE, Ordering::Release);
len
}
fn nonblocking_write<const SIZE: usize>(&self, bytes: &[u8]) -> usize {
let write = self.write.load(Ordering::Acquire);
let cursor = write;
// NOTE truncate at SIZE to avoid more than one "wrap-around" in a single `write` call
let len = bytes.len().min(SIZE);
unsafe {
if cursor + len > SIZE {
// split memcpy
let pivot = SIZE - cursor;
ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), pivot);
ptr::copy_nonoverlapping(bytes.as_ptr().add(pivot), self.buffer, len - pivot);
} else {
// single memcpy
ptr::copy_nonoverlapping(bytes.as_ptr(), self.buffer.add(cursor), len);
}
}
self.write
.store(write.wrapping_add(len) % SIZE, Ordering::Release);
len
}
pub fn flush(&self) {
// busy wait, until the read- catches up with the write-pointer
let read = || self.read.load(Ordering::Relaxed);
let write = || self.write.load(Ordering::Relaxed);
while read() != write() {
core::hint::spin_loop();
}
}
pub fn blocking_enabled(&self) -> bool {
// we assume that a host is connected if we are in blocking-mode. this is what probe-run does.
self.flags.load(Ordering::Relaxed) & MODE_MASK == MODE_BLOCK_IF_FULL
}
}
// This module contains code adapted from the Knurling project
// https://github.com/knurling-rs/defmt
// Used under the terms of the MIT license.
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! [`defmt`](https://github.com/knurling-rs/defmt) global logger over multiple RTT channels.
//!
//! Global loggers usually use an interrupt-disabling critical section to prevent data races and to prevent corruption
//! of log frames. However, we want to avoid blocking or disabling interrupts for long periods as we rely on fast
//! interrupt processing for our start of scan interrupt. To facilitate this, we setup a separate RTT logging channel
//! per execution priority level, so different threads of execution at different priority levels can't interfere with
//! each other. We split up the large AXI SRAM into 16 buffers for the 16 normal execution priorities, and use a bit of
//! normal RAM for the two special priorities.
//!
//! # Blocking/Non-blocking
//!
//! `probe-run` puts RTT channel 0 into blocking-mode, to avoid losing data. The remaining channels continue to be
//! non-blocking, so may lose data if they are written to too fast.
//!
//! As an effect this implementation may block forever if `probe-run` disconnects on runtime. This
//! is because the RTT buffer will fill up and writing will eventually halt the program execution.
//!
//! `defmt::flush` would also block forever in that case.
mod channel;
mod panic;
use channel::Channel;
const PRIO_LEVELS: usize = MAX_PRIORITY as usize + 1;
static mut ENCODERS: [defmt::Encoder; PRIO_LEVELS] = [
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
defmt::Encoder::new(),
];
#[defmt::global_logger]
struct Logger;
impl Logger {
pub fn host_is_connected() -> bool {
unsafe { handles()[0].blocking_enabled() }
}
}
unsafe impl defmt::Logger for Logger {
fn acquire() {
let priority = current_priority() as usize;
let handle = unsafe { &handles()[priority] };
unsafe { ENCODERS[priority].start_frame(|b| handle.write_all(b)) }
}
unsafe fn flush() {
if !Self::host_is_connected() {
return;
}
let priority = current_priority() as usize;
handles()[priority].flush();
}
unsafe fn release() {
let priority = current_priority() as usize;
let handle = &handles()[priority];
ENCODERS[priority].end_frame(|b| handle.write_all(b));
}
unsafe fn write(bytes: &[u8]) {
let priority = current_priority() as usize;
let handle = &handles()[priority];
ENCODERS[priority].write(bytes, |b| handle.write_all(b));
}
}
#[repr(C)]
struct Header {
id: [u8; 16],
max_up_channels: usize,
max_down_channels: usize,
up_channels: [Channel; CHANNELS],
}
const CHANNELS: usize = 18;
// make sure we only get shared references to the header/channel (avoid UB)
/// # Safety
/// `Channel` API is not re-entrant; this handle should not be held from different execution
/// contexts (e.g. thread-mode, interrupt context)
unsafe fn handles() -> &'static [Channel; CHANNELS] {
// NOTE the `rtt-target` API is too permissive. It allows writing arbitrary data to any
// channel (`set_print_channel` + `rprint*`) and that can corrupt defmt log frames.
// So we declare the RTT control block here and make it impossible to use `rtt-target` together
// with this crate.
#[no_mangle]
static mut _SEGGER_RTT: Header = Header {
id: *b"SEGGER RTT\0\0\0\0\0\0",
max_up_channels: CHANNELS,
max_down_channels: 0,
up_channels: [
Channel::new(NAME, unsafe { &mut BUFS[0] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[1] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[2] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[3] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[4] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[5] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[6] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[7] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[8] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[9] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[10] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[11] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[12] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[13] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[14] as *mut _ as *mut u8 }, true),
Channel::new(NAME, unsafe { &mut BUFS[15] as *mut _ as *mut u8 }, true),
// The highest two priorities shouldn't be used normally (NMI and HardFault) so we can use smaller buffers
// for them so we can use 32K buffers for the regular priorities.
Channel::new(
NAME,
unsafe { &mut SMALL_BUFS[0] as *mut _ as *mut u8 },
false,
),
Channel::new(
NAME,
unsafe { &mut SMALL_BUFS[1] as *mut _ as *mut u8 },
false,
),
],
};
use channel::{LARGE_SIZE, SMALL_SIZE};
#[link_section = ".axisram.buffers"]
static mut BUFS: [[u8; LARGE_SIZE]; CHANNELS - 2] = [[0u8; LARGE_SIZE]; CHANNELS - 2];
static mut SMALL_BUFS: [[u8; SMALL_SIZE]; 2] = [[0u8; SMALL_SIZE]; 2];
static NAME: &[u8] = b"defmt\0";
&_SEGGER_RTT.up_channels
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
struct InterruptNumber(u8);
unsafe impl cortex_m::interrupt::Nr for InterruptNumber {
fn nr(&self) -> u8 {
self.0
}
}
pub const MAX_PRIORITY: u8 = 17;
/// Returns the current execution priority
///
/// **NOTE:** This does not return hardware priority levels but converts them to an easier form.
/// Higher numbers represent higher priorities, starting at zero. The max regular priority is 15,
/// above that is HardFault(16) and NonMaskableInt(17).
pub fn current_priority() -> u8 {
use cortex_m::peripheral::scb::{Exception, SystemHandler, VectActive};
use cortex_m::peripheral::{NVIC, SCB};
use stm32h7xx_hal::pac::NVIC_PRIO_BITS;
// IPSR should be slightly faster than SCB.ICSR
let ipsr: u32;
unsafe {
asm!("mrs {}, IPSR", out(reg) ipsr);
}
let vect_active = VectActive::from(ipsr as u8).unwrap_or(VectActive::ThreadMode);
match vect_active {
VectActive::ThreadMode => 0,
VectActive::Exception(ex) => {
let sysh = match ex {
Exception::MemoryManagement => SystemHandler::MemoryManagement,
Exception::BusFault => SystemHandler::BusFault,
Exception::UsageFault => SystemHandler::UsageFault,
Exception::SVCall => SystemHandler::SVCall,
Exception::DebugMonitor => SystemHandler::DebugMonitor,
Exception::PendSV => SystemHandler::PendSV,
Exception::SysTick => SystemHandler::SysTick,
Exception::HardFault => return 16,
Exception::NonMaskableInt => return 17,
};
16 - (SCB::get_priority(sysh) >> NVIC_PRIO_BITS)
}
VectActive::Interrupt { irqn } => {
16 - (NVIC::get_priority(InterruptNumber(irqn - 16)) >> NVIC_PRIO_BITS)
}
}
}
use core::panic::PanicInfo;
use core::sync::atomic::{AtomicBool, Ordering};
use cortex_m::asm;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
static PANICKED: AtomicBool = AtomicBool::new(false);
cortex_m::interrupt::disable();
// Guard against infinite recursion, just in case.
if !PANICKED.load(Ordering::Relaxed) {
PANICKED.store(true, Ordering::Relaxed);
defmt::error!("{:?}", defmt::Display2Format(info));
}
unsafe {
for channel in super::handles() {
channel.flush();
}
}
// If `UsageFault` is enabled, we disable that first, since otherwise `udf` will cause that
// exception instead of `HardFault`.
#[cfg(not(any(armv6m, armv8m_base)))]
{
const SHCSR: *mut u32 = 0xE000ED24usize as _;
const USGFAULTENA: usize = 18;
unsafe {
let mut shcsr = core::ptr::read_volatile(SHCSR);
shcsr &= !(1 << USGFAULTENA);
core::ptr::write_volatile(SHCSR, shcsr);
}
}
// Trigger a `HardFault` via `udf` instruction.
asm::udf();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment