Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 125 additions & 0 deletions include/platform/irq-latency.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
/* Copyright (c) 2026 The F9 Microkernel Project. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/

#ifndef PLATFORM_IRQ_LATENCY_H_
#define PLATFORM_IRQ_LATENCY_H_

#include <stdint.h>

/* DWT (Data Watchpoint and Trace) registers for cycle counting */
#define DWT_CTRL ((volatile uint32_t *) 0xE0001000)
#define DWT_CYCCNT ((volatile uint32_t *) 0xE0001004)
#define DWT_CTRL_CYCCNTENA (1 << 0)

#define DEMCR ((volatile uint32_t *) 0xE000EDFC)
#define DEMCR_TRCENA (1 << 24)

/**
* @file irq_latency.h
* @brief Interrupt latency measurement and profiling infrastructure
*
* Provides cycle-accurate latency tracking for zero-latency ISRs and
* standard IRQs. Enables validation of BASEPRI-based zero-latency
* interrupt performance (<10 cycle target).
*
* Usage:
* 1. Call latency_sample_start() at ISR entry
* 2. Call latency_sample_end(priority, irq_num) at ISR exit
* 3. View statistics via KDB 'L' command
*/

/**
* Latency statistics per interrupt priority level.
*/
typedef struct {
uint32_t count; /* Number of samples */
uint32_t min; /* Minimum latency (cycles) */
uint32_t max; /* Maximum latency (cycles) */
uint32_t sum; /* Sum for average calculation */
uint32_t avg; /* Average latency (cycles) */
} latency_stats_t;

/**
* Get current cycle count from DWT_CYCCNT.
* Returns 0 if DWT is not enabled.
*/
static inline uint32_t get_cycle_count(void)
{
return *DWT_CYCCNT;
}

/**
* Enable DWT cycle counter for latency measurements.
* Called during system initialization.
*/
void latency_init(void);

/**
* Record latency sample for an interrupt.
*
* @param priority Interrupt priority (0x0-0xF)
* @param irq_num IRQ number (-15 to 239)
* @param cycles Measured latency in cycles
*/
void latency_record(uint8_t priority, int16_t irq_num, uint32_t cycles);

/**
* Get latency statistics for a priority level.
*
* @param priority Interrupt priority (0x0-0xF)
* @return Pointer to statistics structure
*/
const latency_stats_t *latency_get_stats(uint8_t priority);

/**
* Get a best-effort atomic snapshot of latency statistics.
*
* Uses relaxed atomics only; intended for diagnostic reads outside ISR
* context. Returns 1 on success, 0 on invalid input.
*/
int latency_get_stats_snapshot(uint8_t priority, latency_stats_t *out);

/**
* Reset all latency statistics.
*/
void latency_reset(void);

/**
* Get interrupt number from IPSR.
* Returns 0 for thread mode, 1-15 for exceptions, 16+ for IRQs.
*/
static inline uint32_t get_irq_number(void)
{
uint32_t ipsr;
__asm__ __volatile__("mrs %0, ipsr" : "=r"(ipsr));
return ipsr & 0x1FF;
}

/**
* Latency measurement helper - call at ISR entry.
* Returns timestamp for latency_sample_end().
*/
static inline uint32_t latency_sample_start(void)
{
return get_cycle_count();
}

/**
* Latency measurement helper - call at ISR exit.
*
* @param start_cycles Timestamp from latency_sample_start()
* @param priority Interrupt priority level
* @param irq_num IRQ number from IPSR
*/
static inline void latency_sample_end(uint32_t start_cycles,
uint8_t priority,
int16_t irq_num)
{
uint32_t end_cycles = get_cycle_count();
uint32_t elapsed = end_cycles - start_cycles;
latency_record(priority, irq_num, elapsed);
}

#endif /* PLATFORM_IRQ_LATENCY_H_ */
76 changes: 75 additions & 1 deletion include/platform/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,34 @@

void irq_init(void);

/*
* Interrupt Priority Levels (ARM Cortex-M 4-bit priorities)
*/
#define IRQ_PRIO_ZERO_LATENCY_MAX 0x2 /* Highest priority, never masked */
#define IRQ_PRIO_SYSTICK 0x3 /* System timer */
#define IRQ_PRIO_KERNEL_MASK 0x40 /* BASEPRI mask (0x4 << 4) */
#define IRQ_PRIO_USER_DEFAULT 0x8 /* Default user IRQ priority */
#define IRQ_PRIO_LOWEST 0xF /* SVCall, PendSV */

/*
* System state tracking for ISR context.
* 0 = Thread mode (PSP), 1+ = Handler mode (MSP, tracks nesting depth).
*/
extern volatile uint32_t irq_system_state;

/*
* Fast ISR context check using hardware IPSR register.
* Returns: true if currently in exception handler, false if in thread mode.
* Zero overhead: Single MRS instruction, no memory access or race conditions.
*/
static inline bool in_isr_context(void)
{
return IPSR() != 0;
}

/*
* PRIMASK-based critical sections (blocks ALL interrupts).
*/
static inline void irq_disable(void)
{
__asm__ __volatile__("cpsid i" ::: "memory");
Expand Down Expand Up @@ -45,6 +73,53 @@ static inline void irq_restore_flags(uint32_t flags)
__asm__ __volatile__("msr primask, %0" ::"r"(flags) : "memory");
}

/*
* BASEPRI-based critical sections (blocks interrupts >= priority level).
* Zero-latency ISRs at priority 0x0-0x2 can preempt kernel critical sections.
*/
static inline void irq_disable_below(uint8_t priority)
{
uint32_t basepri = (priority << 4) & 0xFF;
__asm__ __volatile__("msr basepri, %0" ::"r"(basepri) : "memory");
}

static inline void irq_enable_all(void)
{
__asm__ __volatile__("msr basepri, %0" ::"r"(0) : "memory");
}

static inline uint32_t irq_save_basepri(uint8_t priority)
{
uint32_t prev_basepri;
uint32_t new_basepri = (priority << 4) & 0xFF;
__asm__ __volatile__(
"mrs %0, basepri\n\t"
"msr basepri, %1"
: "=r"(prev_basepri)
: "r"(new_basepri)
: "memory");
return prev_basepri;
}

static inline void irq_restore_basepri(uint32_t basepri)
{
__asm__ __volatile__("msr basepri, %0" ::"r"(basepri) : "memory");
}

/*
* Kernel critical section (masks interrupts >= 0x4, allows 0x0-0x3).
* Use this as the default for scheduler, IPC, and memory operations.
*/
static inline uint32_t irq_kernel_critical_enter(void)
{
return irq_save_basepri(IRQ_PRIO_KERNEL_MASK >> 4);
}

static inline void irq_kernel_critical_exit(uint32_t basepri)
{
irq_restore_basepri(basepri);
}

static inline void irq_svc(void)
{
__asm__ __volatile__("svc #0");
Expand Down Expand Up @@ -242,7 +317,6 @@ extern volatile uint32_t __irq_saved_regs[8];
request_schedule(); \
irq_return(); \
}

extern volatile tcb_t *current;

#endif /* PLATFORM_IRQ_H_ */
3 changes: 2 additions & 1 deletion kernel/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ kernel-y = \
interrupt.o

KDB-$(CONFIG_KDB) = \
kdb.o
kdb.o \
kdb-latency.o

KPROBES-$(CONFIG_KPROBES) = \
kprobes.o
Expand Down
31 changes: 24 additions & 7 deletions kernel/ipc.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,18 +214,19 @@ static void do_ipc(tcb_t *from, tcb_t *to)
* CONSTRAINT: Callback MUST NOT destroy its own TCB.
*/
if (to->ipc_notify && to->notify_pending && to->notify_depth < 3) {
uint32_t irq_flags;
uint32_t basepri;
uint8_t generation_before;
notify_handler_t callback;

/* Atomically increment depth and capture generation.
* IRQ masking prevents race with nested interrupt-driven IPC.
* BASEPRI masking prevents race with nested interrupt-driven IPC.
* Zero-latency ISRs (0x0-0x2) can still preempt during this operation.
*/
irq_flags = irq_save_flags();
basepri = irq_kernel_critical_enter();
to->notify_depth++;
generation_before = to->notify_generation;
callback = to->ipc_notify;
irq_restore_flags(irq_flags);
irq_kernel_critical_exit(basepri);

/* Recursion protection: prevent unbounded callback nesting.
* Max depth 3 allows: serial → network → timer notification chains.
Expand All @@ -245,11 +246,27 @@ static void do_ipc(tcb_t *from, tcb_t *to)
/* Atomically decrement depth only if TCB still valid.
* Generation counter detects TCB destruction during callback.
* If TCB was destroyed, skip depth decrement (would be use-after-free).
*
* SAFETY: We must verify 'to' is still a valid TCB before accessing it.
* Search thread_map to confirm the pointer hasn't been freed and
* reused.
*/
irq_flags = irq_save_flags();
if (to->notify_generation == generation_before)
basepri = irq_kernel_critical_enter();

/* Verify TCB is still valid by checking thread_map */
int tcb_valid = 0;
for (int i = 1; i < thread_count; ++i) {
if (thread_map[i] == to) {
tcb_valid = 1;
break;
}
}

/* Only decrement if TCB is valid AND generation hasn't changed */
if (tcb_valid && to->notify_generation == generation_before)
to->notify_depth--;
irq_restore_flags(irq_flags);

irq_kernel_critical_exit(basepri);

/* Check for preemption after notification.
* Callback may have made higher-priority threads runnable.
Expand Down
67 changes: 67 additions & 0 deletions kernel/kdb-latency.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/* Copyright (c) 2026 The F9 Microkernel Project. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/

#include <debug.h>
#include <platform/irq-latency.h>

/**
* KDB command: Display interrupt latency statistics.
*
* Shows min/avg/max latency for each priority level, highlighting
* zero-latency ISRs (0x0-0x2) and standard user IRQs.
*/
void kdb_show_latency(void)
{
int i;
latency_stats_t stats;
int has_data = 0;

dbg_printf(DL_KDB, "\n=== Interrupt Latency Statistics ===\n");
dbg_printf(DL_KDB, "Prio Type Count Min Avg Max\n");
dbg_printf(DL_KDB, "---- ---------------- ------ ----- ----- -----\n");

for (i = 0; i < 16; i++) {
if (!latency_get_stats_snapshot(i, &stats))
continue;

if (stats.count == 0)
continue;

has_data = 1;

const char *type;
if (i <= 0x2)
type = "Zero-latency ISR";
else if (i == 0x3)
type = "SysTick";
else if (i <= 0xE)
type = "User IRQ";
else
type = "SVCall/PendSV";

stats.avg = stats.count > 0 ? (stats.sum / stats.count) : 0;
dbg_printf(DL_KDB, "0x%X %-16s %6u %5u %5u %5u\n", i, type,
stats.count, stats.min, stats.avg, stats.max);
}

if (!has_data) {
dbg_printf(DL_KDB, "(No latency samples recorded yet)\n");
}

dbg_printf(DL_KDB, "\nNotes:\n");
dbg_printf(DL_KDB, " - Zero-latency ISRs (0x0-0x2) target <10 cycles\n");
dbg_printf(DL_KDB, " - User IRQs (0x4-0xE) masked during kernel ops\n");
dbg_printf(DL_KDB, " - Use 'r' to reset statistics\n");
dbg_printf(DL_KDB, "\n");
}

/**
* KDB command: Reset latency statistics.
*/
void kdb_reset_latency(void)
{
latency_reset();
dbg_printf(DL_KDB, "Latency statistics reset.\n");
}
10 changes: 10 additions & 0 deletions kernel/kdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ extern void kdb_dump_as(void);
extern void kdb_show_sampling(void);
extern void kdb_show_tickless_verify(void);
extern void kdb_dump_notifications(void);
extern void kdb_show_latency(void);
extern void kdb_reset_latency(void);

struct kdb_t kdb_functions[] = {
{.option = 'K',
Expand Down Expand Up @@ -84,6 +86,14 @@ struct kdb_t kdb_functions[] = {
.menuentry = "show tickless scheduling stat",
.function = kdb_show_tickless_verify},
#endif
{.option = 'L',
.name = "LATENCY",
.menuentry = "show interrupt latency",
.function = kdb_show_latency},
{.option = 'r',
.name = "RESET LATENCY",
.menuentry = "reset latency statistics",
.function = kdb_reset_latency},
/* Insert KDB functions here */
};

Expand Down
Loading
Loading