From 29e514b27607588a5a2ebab771e1f4494d6f4bea Mon Sep 17 00:00:00 2001 From: hujun5 Date: Fri, 30 Jan 2026 19:06:54 +0800 Subject: [PATCH] clock: Correct the usage of atomic64_t atomic functions Replace CONFIG_SYSTEM_TIME64 conditional atomic64 operations with a unified seqlock-based approach for managing the system tick counter. This simplifies code by eliminating architecture-specific branches and provides more robust thread-safe access patterns. Signed-off-by: hujun5 --- arch/arm/include/armv8-m/barriers.h | 4 +-- sched/clock/clock_sched_ticks.c | 41 +++++++++-------------------- 2 files changed, 15 insertions(+), 30 deletions(-) diff --git a/arch/arm/include/armv8-m/barriers.h b/arch/arm/include/armv8-m/barriers.h index 8c635989923..2dcec81afc0 100644 --- a/arch/arm/include/armv8-m/barriers.h +++ b/arch/arm/include/armv8-m/barriers.h @@ -35,8 +35,8 @@ #define arm_isb() __asm__ __volatile__ ("isb " : : : "memory") #define arm_dmb() __asm__ __volatile__ ("dmb " : : : "memory") -#define arm_rmb() __asm__ __volatile__ ("dmb ishld" : : : "memory") -#define arm_wmb() __asm__ __volatile__ ("dmb ishst" : : : "memory") +#define arm_rmb() __asm__ __volatile__ ("dmb " : : : "memory") +#define arm_wmb() __asm__ __volatile__ ("dmb " : : : "memory") #define arm_dsb(n) __asm__ __volatile__ ("dsb " #n : : : "memory") #define UP_ISB() arm_isb() diff --git a/sched/clock/clock_sched_ticks.c b/sched/clock/clock_sched_ticks.c index 9836ae50290..353e4f3e0b1 100644 --- a/sched/clock/clock_sched_ticks.c +++ b/sched/clock/clock_sched_ticks.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "clock/clock.h" @@ -39,6 +40,7 @@ ****************************************************************************/ static volatile clock_t g_system_ticks = INITIAL_SYSTEM_TIMER_TICKS; +static seqcount_t g_system_tick_lock = SEQLOCK_INITIALIZER; /**************************************************************************** * Public Functions @@ -62,13 +64,13 @@ static volatile clock_t g_system_ticks = INITIAL_SYSTEM_TIMER_TICKS; void clock_increase_sched_ticks(clock_t ticks) { + irqstate_t flags; + /* Increment the per-tick scheduler counter */ -#ifdef CONFIG_SYSTEM_TIME64 - atomic64_fetch_add((FAR atomic64_t *)&g_system_ticks, ticks); -#else - atomic_fetch_add((FAR atomic_t *)&g_system_ticks, ticks); -#endif + flags = write_seqlock_irqsave(&g_system_tick_lock); + g_system_ticks += ticks; + write_sequnlock_irqrestore(&g_system_tick_lock, flags); } /**************************************************************************** @@ -86,32 +88,15 @@ void clock_increase_sched_ticks(clock_t ticks) clock_t clock_get_sched_ticks(void) { -#ifdef CONFIG_SYSTEM_TIME64 - clock_t sample; - clock_t verify; - - /* 64-bit accesses are not atomic on most architectures. The following - * loop samples the 64-bit counter twice and retries in the rare case - * that a 32-bit rollover occurs between samples. - * - * If no 32-bit rollover occurs: - * - The MS 32 bits of both samples will be identical, and - * - The LS 32 bits of the second sample will be greater than or equal - * to those of the first. - */ + clock_t ret; + unsigned int seq; do { - verify = g_system_ticks; - sample = g_system_ticks; + seq = read_seqbegin(&g_system_tick_lock); + ret = g_system_ticks; } - while ((sample & TIMER_MASK32) < (verify & TIMER_MASK32) || - (sample & ~TIMER_MASK32) != (verify & ~TIMER_MASK32)); + while (read_seqretry(&g_system_tick_lock, seq)); - return sample; -#else - /* On 32-bit systems, atomic access is guaranteed */ - - return g_system_ticks; -#endif + return ret; }