[kernel]Rewrite rt_thread_get_usage to use incremental statistics based on sampling windows.
ToolsCI / Tools (push) Has been cancelled
RT-Thread BSP Static Build Check / 🔍 Summary of Git Diff Changes (push) Has been cancelled
RT-Thread BSP Static Build Check / ${{ matrix.legs.RTT_BSP }} (push) Has been cancelled
RT-Thread BSP Static Build Check / collect-artifacts (push) Has been cancelled
pkgs_test / change (push) Has been cancelled
utest_auto_run / A9 :components/dfs.cfg (push) Has been cancelled
utest_auto_run / A9 :components/lwip.cfg (push) Has been cancelled
utest_auto_run / A9 :components/netdev.cfg (push) Has been cancelled
utest_auto_run / A9 :components/sal.cfg (push) Has been cancelled
utest_auto_run / A9 :cpp11/cpp11.cfg (push) Has been cancelled
utest_auto_run / AARCH64-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / A9-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / RISCV-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / XUANTIE-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / AARCH64 :default.cfg (push) Has been cancelled
utest_auto_run / AARCH64-smp :default.cfg (push) Has been cancelled
utest_auto_run / A9 :default.cfg (push) Has been cancelled
utest_auto_run / A9-smp :default.cfg (push) Has been cancelled
utest_auto_run / RISCV :default.cfg (push) Has been cancelled
utest_auto_run / RISCV-smp :default.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/atomic_c11.cfg (push) Has been cancelled
utest_auto_run / RISCV :kernel/atomic_c11.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/ipc.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/kernel_basic.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/mem.cfg (push) Has been cancelled
doc_doxygen / doxygen_doc generate (push) Has been cancelled
doc_doxygen / deploy (push) Has been cancelled

This commit is contained in:
Rbb666
2026-03-16 13:28:12 +08:00
committed by Rbb666
parent 29e5f61fb9
commit 4025c51407
4 changed files with 175 additions and 29 deletions
+2
View File
@@ -935,6 +935,8 @@ struct rt_thread
#ifdef RT_USING_CPU_USAGE_TRACER
rt_ubase_t user_time; /**< Ticks on user */
rt_ubase_t system_time; /**< Ticks on system */
rt_ubase_t total_time_prev; /**< Previous total ticks snapshot */
rt_uint8_t cpu_usage; /**< Recent CPU usage in percent */
#endif /* RT_USING_CPU_USAGE_TRACER */
#ifdef RT_USING_MEM_PROTECTION
+11
View File
@@ -199,6 +199,17 @@ config RT_USING_CPU_USAGE_TRACER
percentage information through the list thread command.
It will automatically integrate with the scheduler to track thread execution time.
if RT_USING_CPU_USAGE_TRACER
config RT_CPU_USAGE_CALC_INTERVAL_MS
int "CPU usage sampling interval (ms)"
default 200
range 50 5000
help
Sampling window for thread CPU usage display.
A shorter interval updates faster but fluctuates more.
A longer interval is smoother but has higher display latency.
endif
menu "kservice options"
config RT_USING_TINY_FFS
bool "Enable kservice to use tiny finding first bit set method"
+155 -29
View File
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2024, RT-Thread Development Team
* Copyright (c) 2006-2026, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -28,6 +28,7 @@
* 2023-10-21 Shell support the common backtrace API which is arch-independent
* 2023-12-10 xqyjlj perf rt_hw_interrupt_disable/enable, fix memheap lock
* 2024-03-10 Meco Man move std libc related functions to rtklibc
* 2026-03-16 Rbb666 Change rt_thread_get_usage to incremental statistics.
*/
#include <rtthread.h>
@@ -572,49 +573,174 @@ rt_err_t rt_backtrace_thread(rt_thread_t thread)
}
#ifdef RT_USING_CPU_USAGE_TRACER
/**
* @brief Get thread usage percentage relative to total system CPU time
#define RT_CPU_USAGE_CALC_INTERVAL_TICK \
((RT_TICK_PER_SECOND * RT_CPU_USAGE_CALC_INTERVAL_MS + 999U) / 1000U)
static rt_tick_t _cpu_usage_sample_tick;
static rt_bool_t _cpu_usage_inited = RT_FALSE;
static struct rt_cpu_usage_stats _cpu_usage_prev_cpu_stat[RT_CPUS_NR];
static struct rt_spinlock _cpu_usage_lock = RT_SPINLOCK_INIT;
/*
* Calculate total CPU-time delta for this sampling window and
* refresh per-CPU snapshots.
*
* This function calculates the CPU usage percentage of a specific thread
* relative to the total CPU time consumed by all threads in the system.
* Each counter delta is computed in rt_ubase_t width first, so wrap-around
* on 32-bit targets is handled naturally by unsigned arithmetic.
*/
static rt_uint64_t _cpu_usage_calc_total_delta(void)
{
rt_uint64_t total_delta = 0;
int i;
for (i = 0; i < RT_CPUS_NR; i++)
{
rt_cpu_t pcpu = rt_cpu_index(i);
rt_ubase_t user_now = pcpu->cpu_stat.user;
rt_ubase_t system_now = pcpu->cpu_stat.system;
rt_ubase_t idle_now = pcpu->cpu_stat.idle;
/* Per-counter delta first to avoid overflow artifacts after sum. */
rt_ubase_t user_delta = (rt_ubase_t)(user_now - _cpu_usage_prev_cpu_stat[i].user);
rt_ubase_t system_delta = (rt_ubase_t)(system_now - _cpu_usage_prev_cpu_stat[i].system);
rt_ubase_t idle_delta = (rt_ubase_t)(idle_now - _cpu_usage_prev_cpu_stat[i].idle);
total_delta += (rt_uint64_t)user_delta;
total_delta += (rt_uint64_t)system_delta;
total_delta += (rt_uint64_t)idle_delta;
_cpu_usage_prev_cpu_stat[i].user = user_now;
_cpu_usage_prev_cpu_stat[i].system = system_now;
_cpu_usage_prev_cpu_stat[i].idle = idle_now;
}
return total_delta;
}
static void _cpu_usage_snapshot_init(void)
{
struct rt_object_information *info;
rt_list_t *list;
rt_list_t *node;
rt_base_t level;
int i;
info = rt_object_get_information(RT_Object_Class_Thread);
list = &info->object_list;
level = rt_spin_lock_irqsave(&info->spinlock);
for (node = list->next; node != list; node = node->next)
{
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
struct rt_thread *t = (struct rt_thread *)obj;
t->total_time_prev = 0U;
t->cpu_usage = 0U;
}
rt_spin_unlock_irqrestore(&info->spinlock, level);
for (i = 0; i < RT_CPUS_NR; i++)
{
_cpu_usage_prev_cpu_stat[i].user = 0U;
_cpu_usage_prev_cpu_stat[i].system = 0U;
_cpu_usage_prev_cpu_stat[i].idle = 0U;
}
_cpu_usage_sample_tick = rt_tick_get();
_cpu_usage_inited = RT_TRUE;
}
static void _cpu_usage_refresh_threads(rt_uint64_t total_delta)
{
struct rt_object_information *info;
rt_list_t *list;
rt_list_t *node;
rt_base_t level;
info = rt_object_get_information(RT_Object_Class_Thread);
list = &info->object_list;
level = rt_spin_lock_irqsave(&info->spinlock);
for (node = list->next; node != list; node = node->next)
{
struct rt_object *obj = rt_list_entry(node, struct rt_object, list);
struct rt_thread *t = (struct rt_thread *)obj;
rt_ubase_t total_now = (rt_ubase_t)(t->user_time + t->system_time);
rt_ubase_t total_delta_now = (rt_ubase_t)(total_now - t->total_time_prev);
rt_uint64_t thread_delta = (rt_uint64_t)total_delta_now;
if (total_delta > 0U)
{
rt_uint64_t usage = (thread_delta * 100U) / total_delta;
t->cpu_usage = (rt_uint8_t)(usage > 100U ? 100U : usage);
}
else
{
t->cpu_usage = 0U;
}
t->total_time_prev = total_now;
}
rt_spin_unlock_irqrestore(&info->spinlock, level);
}
static void _cpu_usage_update(void)
{
rt_tick_t tick_now;
rt_tick_t delta_tick;
rt_uint64_t total_delta;
rt_bool_t bypass_interval_check = RT_FALSE;
if (!_cpu_usage_inited)
{
_cpu_usage_snapshot_init();
bypass_interval_check = RT_TRUE;
}
tick_now = rt_tick_get();
delta_tick = rt_tick_get_delta(_cpu_usage_sample_tick);
if (!bypass_interval_check && delta_tick < RT_CPU_USAGE_CALC_INTERVAL_TICK)
{
return;
}
total_delta = _cpu_usage_calc_total_delta();
_cpu_usage_refresh_threads(total_delta);
_cpu_usage_sample_tick = tick_now;
}
/**
* @brief Get thread CPU usage percentage in the recent sampling window
*
* This function returns per-thread CPU usage based on delta runtime in the
* latest sampling window, rather than cumulative runtime since boot.
*
* @param thread Pointer to the thread object. Must not be NULL.
*
* @return The CPU usage percentage as an integer value (0-100).
* Returns 0 if total system time is 0 or if CPU usage tracing is not enabled.
* If sampling interval has not elapsed yet, the previous cached value
* is returned (initial value is 0).
*
* @note This function requires RT_USING_CPU_USAGE_TRACER to be enabled.
* @note The percentage is calculated as: (thread_time * 100) / total_system_time
* @note Due to integer arithmetic, the result is truncated and may not sum
* to exactly 100% across all threads due to rounding.
* @note The percentage is calculated as
* (thread_time_delta * 100) / total_time_delta,
* where total_time_delta is the sum of user/system/idle deltas of all CPUs.
* @note Sampling interval can be tuned with RT_CPU_USAGE_CALC_INTERVAL_MS.
* @note If thread is NULL, an assertion will be triggered in debug builds.
*/
rt_uint8_t rt_thread_get_usage(rt_thread_t thread)
{
rt_ubase_t thread_time;
rt_ubase_t total_time = 0U;
int i;
rt_cpu_t pcpu;
rt_uint8_t usage;
RT_ASSERT(thread != RT_NULL);
thread_time = thread->user_time + thread->system_time;
rt_spin_lock(&_cpu_usage_lock);
_cpu_usage_update();
usage = thread->cpu_usage;
rt_spin_unlock(&_cpu_usage_lock);
/* Calculate total system time by summing all CPUs' time */
for (i = 0; i < RT_CPUS_NR; i++)
{
pcpu = rt_cpu_index(i);
total_time += pcpu->cpu_stat.user + pcpu->cpu_stat.system + pcpu->cpu_stat.idle;
}
if (total_time > 0U)
{
/* Calculate thread usage percentage: (thread_time * 100) / total_time */
rt_ubase_t usage = (thread_time * 100U) / total_time;
return (rt_uint8_t)(usage > 100U ? 100U : usage);
}
return 0U;
return usage;
}
#endif /* RT_USING_CPU_USAGE_TRACER */
+7
View File
@@ -277,6 +277,13 @@ static rt_err_t _thread_init(struct rt_thread *thread,
thread->system_time = 0;
#endif
#ifdef RT_USING_CPU_USAGE_TRACER
thread->user_time = 0;
thread->system_time = 0;
thread->total_time_prev = 0;
thread->cpu_usage = 0;
#endif /* RT_USING_CPU_USAGE_TRACER */
#ifdef RT_USING_PTHREADS
thread->pthread_data = RT_NULL;
#endif /* RT_USING_PTHREADS */