sched: add spinlock to sched_note_preemption and nxsched_critmon_preemption

Add spinlock protection to sched_note_preemption and nxsched_critmon_preemption.
Ensures thread-safe access to global state in preemption notification and critical
monitoring, preventing race conditions in SMP and interrupt contexts.

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5
2025-05-12 13:56:29 +08:00
committed by Xiang Xiao
parent fa652f9c24
commit bb8d16f422
2 changed files with 19 additions and 1 deletions
+6 -1
View File
@@ -171,7 +171,8 @@ FAR static struct note_driver_s *
static struct note_taskname_s g_note_taskname;
#endif
#if defined(CONFIG_SCHED_INSTRUMENTATION_FILTER)
#if defined(CONFIG_SCHED_INSTRUMENTATION_FILTER) || \
defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
static spinlock_t g_note_lock;
#endif
@@ -1056,7 +1057,9 @@ void sched_note_preemption(FAR struct tcb_s *tcb, bool locked)
struct note_preempt_s note;
FAR struct note_driver_s **driver;
bool formatted = false;
irqstate_t flags;
flags = spin_lock_irqsave_notrace(&g_note_lock);
for (driver = g_note_drivers; *driver; driver++)
{
if (!note_isenabled(*driver))
@@ -1088,6 +1091,8 @@ void sched_note_preemption(FAR struct tcb_s *tcb, bool locked)
note_add(*driver, &note, sizeof(struct note_preempt_s));
}
spin_unlock_irqrestore_notrace(&g_note_lock, flags);
}
#endif
+13
View File
@@ -86,6 +86,14 @@
# define CHECK_THREAD(pid, elapsed)
#endif
/****************************************************************************
* Private Data
****************************************************************************/
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
static spinlock_t g_crimonitor_lock = SP_UNLOCKED;
#endif
/****************************************************************************
* Public Data
****************************************************************************/
@@ -175,6 +183,9 @@ void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state,
FAR void *caller)
{
clock_t current = perf_gettime();
irqstate_t flags;
flags = spin_lock_irqsave_notrace(&g_crimonitor_lock);
/* Are we enabling or disabling pre-emption */
@@ -206,6 +217,8 @@ void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state,
g_preemp_max[cpu] = elapsed;
}
}
spin_unlock_irqrestore_notrace(&g_crimonitor_lock, flags);
}
#endif /* CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 */