mirror of
https://github.com/apache/nuttx.git
synced 2026-02-06 04:22:26 +08:00
clock: Correct the usage of atomic64_t atomic functions
Replace CONFIG_SYSTEM_TIME64 conditional atomic64 operations with a unified seqlock-based approach for managing the system tick counter. This simplifies code by eliminating architecture-specific branches and provides more robust thread-safe access patterns. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
@@ -35,8 +35,8 @@
|
||||
|
||||
#define arm_isb() __asm__ __volatile__ ("isb " : : : "memory")
|
||||
#define arm_dmb() __asm__ __volatile__ ("dmb " : : : "memory")
|
||||
#define arm_rmb() __asm__ __volatile__ ("dmb ishld" : : : "memory")
|
||||
#define arm_wmb() __asm__ __volatile__ ("dmb ishst" : : : "memory")
|
||||
#define arm_rmb() __asm__ __volatile__ ("dmb " : : : "memory")
|
||||
#define arm_wmb() __asm__ __volatile__ ("dmb " : : : "memory")
|
||||
#define arm_dsb(n) __asm__ __volatile__ ("dsb " #n : : : "memory")
|
||||
|
||||
#define UP_ISB() arm_isb()
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <nuttx/irq.h>
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/clock.h>
|
||||
#include <nuttx/seqlock.h>
|
||||
|
||||
#include "clock/clock.h"
|
||||
|
||||
@@ -39,6 +40,7 @@
|
||||
****************************************************************************/
|
||||
|
||||
static volatile clock_t g_system_ticks = INITIAL_SYSTEM_TIMER_TICKS;
|
||||
static seqcount_t g_system_tick_lock = SEQLOCK_INITIALIZER;
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
@@ -62,13 +64,13 @@ static volatile clock_t g_system_ticks = INITIAL_SYSTEM_TIMER_TICKS;
|
||||
|
||||
void clock_increase_sched_ticks(clock_t ticks)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* Increment the per-tick scheduler counter */
|
||||
|
||||
#ifdef CONFIG_SYSTEM_TIME64
|
||||
atomic64_fetch_add((FAR atomic64_t *)&g_system_ticks, ticks);
|
||||
#else
|
||||
atomic_fetch_add((FAR atomic_t *)&g_system_ticks, ticks);
|
||||
#endif
|
||||
flags = write_seqlock_irqsave(&g_system_tick_lock);
|
||||
g_system_ticks += ticks;
|
||||
write_sequnlock_irqrestore(&g_system_tick_lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@@ -86,32 +88,15 @@ void clock_increase_sched_ticks(clock_t ticks)
|
||||
|
||||
clock_t clock_get_sched_ticks(void)
|
||||
{
|
||||
#ifdef CONFIG_SYSTEM_TIME64
|
||||
clock_t sample;
|
||||
clock_t verify;
|
||||
|
||||
/* 64-bit accesses are not atomic on most architectures. The following
|
||||
* loop samples the 64-bit counter twice and retries in the rare case
|
||||
* that a 32-bit rollover occurs between samples.
|
||||
*
|
||||
* If no 32-bit rollover occurs:
|
||||
* - The MS 32 bits of both samples will be identical, and
|
||||
* - The LS 32 bits of the second sample will be greater than or equal
|
||||
* to those of the first.
|
||||
*/
|
||||
clock_t ret;
|
||||
unsigned int seq;
|
||||
|
||||
do
|
||||
{
|
||||
verify = g_system_ticks;
|
||||
sample = g_system_ticks;
|
||||
seq = read_seqbegin(&g_system_tick_lock);
|
||||
ret = g_system_ticks;
|
||||
}
|
||||
while ((sample & TIMER_MASK32) < (verify & TIMER_MASK32) ||
|
||||
(sample & ~TIMER_MASK32) != (verify & ~TIMER_MASK32));
|
||||
while (read_seqretry(&g_system_tick_lock, seq));
|
||||
|
||||
return sample;
|
||||
#else
|
||||
/* On 32-bit systems, atomic access is guaranteed */
|
||||
|
||||
return g_system_ticks;
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user