mirror of
https://github.com/RT-Thread/rt-thread.git
synced 2026-03-25 01:43:49 +08:00
Some checks failed
AutoTestCI / components/cpp11 (push) Has been cancelled
AutoTestCI / kernel/atomic (push) Has been cancelled
AutoTestCI / kernel/atomic/riscv64 (push) Has been cancelled
AutoTestCI / kernel/atomic_c11 (push) Has been cancelled
AutoTestCI / kernel/atomic_c11/riscv64 (push) Has been cancelled
AutoTestCI / kernel/device (push) Has been cancelled
AutoTestCI / kernel/ipc (push) Has been cancelled
AutoTestCI / kernel/irq (push) Has been cancelled
AutoTestCI / kernel/mem (push) Has been cancelled
AutoTestCI / kernel/mem/riscv64 (push) Has been cancelled
AutoTestCI / kernel/thread (push) Has been cancelled
AutoTestCI / kernel/timer (push) Has been cancelled
AutoTestCI / rtsmart/aarch64 (push) Has been cancelled
AutoTestCI / rtsmart/arm (push) Has been cancelled
AutoTestCI / rtsmart/riscv64 (push) Has been cancelled
AutoTestCI / components/utest (push) Has been cancelled
RT-Thread BSP Static Build Check / 🔍 Summary of Git Diff Changes (push) Has been cancelled
RT-Thread BSP Static Build Check / ${{ matrix.legs.RTT_BSP }} (push) Has been cancelled
RT-Thread BSP Static Build Check / collect-artifacts (push) Has been cancelled
doc_doxygen / doxygen_doc generate (push) Has been cancelled
doc_doxygen / deploy (push) Has been cancelled
pkgs_test / change (push) Has been cancelled
utest_auto_run / A9 :cpp11/cpp11.cfg (push) Has been cancelled
utest_auto_run / AARCH64-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / A9-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / RISCV-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / XUANTIE-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / AARCH64 :default.cfg (push) Has been cancelled
utest_auto_run / A9 :default.cfg (push) Has been cancelled
utest_auto_run / A9-smp :default.cfg (push) Has been cancelled
utest_auto_run / RISCV :default.cfg (push) Has been cancelled
utest_auto_run / A9 :dfs/dfs.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/object.cfg (push) Has been cancelled
Refer to the issue described in PR #10599. This was a temporary fix at that time. After further research, we discovered that this issue can be addressed using the "@cond" command supported by Doxygen. Since we currently do not intend to generate two sets of documentation for UP and MP when generating Doxygen documentation, the current solution is to generate only MP documentation by default. For functions defined in MP but not in UP, we will use the "@note" command in the function's Doxygen comment to indicate whether the function supports both UP and MP, or only MP. Signed-off-by: Chen Wang <unicorn_wang@outlook.com>
277 lines
7.0 KiB
C
277 lines
7.0 KiB
C
/*
|
|
* Copyright (c) 2006-2023, RT-Thread Development Team
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*
|
|
* Change Logs:
|
|
* Date Author Notes
|
|
* 2018-10-30 Bernard The first version
|
|
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
|
|
* 2023-12-10 xqyjlj spinlock should lock sched
|
|
* 2024-01-25 Shell Using rt_exit_critical_safe
|
|
*/
|
|
#include <rthw.h>
|
|
#include <rtthread.h>
|
|
|
|
#ifdef RT_USING_SMART
|
|
#include <lwp.h>
|
|
#endif
|
|
|
|
#ifdef RT_USING_DEBUG
|
|
rt_base_t _cpus_critical_level;
|
|
#endif /* RT_USING_DEBUG */
|
|
|
|
static struct rt_cpu _cpus[RT_CPUS_NR];
|
|
rt_hw_spinlock_t _cpus_lock;
|
|
#if defined(RT_DEBUGING_SPINLOCK)
|
|
void *_cpus_lock_owner = 0;
|
|
void *_cpus_lock_pc = 0;
|
|
|
|
#endif /* RT_DEBUGING_SPINLOCK */
|
|
|
|
/**
|
|
* @addtogroup group_thread_comm
|
|
*
|
|
* @cond DOXYGEN_SMP
|
|
*
|
|
* @{
|
|
*/
|
|
|
|
/**
|
|
* @brief Initialize a static spinlock object.
|
|
*
|
|
* @param lock is a pointer to the spinlock to initialize.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
void rt_spin_lock_init(struct rt_spinlock *lock)
|
|
{
|
|
rt_hw_spin_lock_init(&lock->lock);
|
|
}
|
|
RTM_EXPORT(rt_spin_lock_init)
|
|
|
|
/**
|
|
* @brief This function will lock the spinlock, will lock the thread scheduler.
|
|
*
|
|
* If the spinlock is locked, the current CPU will keep polling the spinlock state
|
|
* until the spinlock is unlocked.
|
|
*
|
|
* @param lock is a pointer to the spinlock.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
void rt_spin_lock(struct rt_spinlock *lock)
|
|
{
|
|
rt_enter_critical();
|
|
rt_hw_spin_lock(&lock->lock);
|
|
RT_SPIN_LOCK_DEBUG(lock);
|
|
}
|
|
RTM_EXPORT(rt_spin_lock)
|
|
|
|
/**
|
|
* @brief This function will unlock the spinlock, will unlock the thread scheduler.
|
|
*
|
|
* If the scheduling function is called before unlocking, it will be scheduled in this function.
|
|
*
|
|
* @param lock is a pointer to the spinlock.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
void rt_spin_unlock(struct rt_spinlock *lock)
|
|
{
|
|
rt_base_t critical_level;
|
|
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
|
|
rt_hw_spin_unlock(&lock->lock);
|
|
rt_exit_critical_safe(critical_level);
|
|
}
|
|
RTM_EXPORT(rt_spin_unlock)
|
|
|
|
/**
|
|
* @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
|
|
*
|
|
* If the spinlock is locked, the current CPU will keep polling the spinlock state
|
|
* until the spinlock is unlocked.
|
|
*
|
|
* @param lock is a pointer to the spinlock.
|
|
*
|
|
* @return Return current cpu interrupt status.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
|
|
{
|
|
rt_base_t level;
|
|
|
|
level = rt_hw_local_irq_disable();
|
|
rt_enter_critical();
|
|
rt_hw_spin_lock(&lock->lock);
|
|
RT_SPIN_LOCK_DEBUG(lock);
|
|
return level;
|
|
}
|
|
RTM_EXPORT(rt_spin_lock_irqsave)
|
|
|
|
/**
|
|
* @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
|
|
*
|
|
* If the scheduling function is called before unlocking, it will be scheduled in this function.
|
|
*
|
|
* @param lock is a pointer to the spinlock.
|
|
*
|
|
* @param level is interrupt status returned by rt_spin_lock_irqsave().
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
|
|
{
|
|
rt_base_t critical_level;
|
|
|
|
RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
|
|
rt_hw_spin_unlock(&lock->lock);
|
|
rt_exit_critical_safe(critical_level);
|
|
rt_hw_local_irq_enable(level);
|
|
}
|
|
RTM_EXPORT(rt_spin_unlock_irqrestore)
|
|
|
|
/**
|
|
* @brief This fucntion will return current cpu object.
|
|
*
|
|
* @return Return a pointer to the current cpu object.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
struct rt_cpu *rt_cpu_self(void)
|
|
{
|
|
return &_cpus[rt_hw_cpu_id()];
|
|
}
|
|
|
|
/**
|
|
* @brief This fucntion will return the cpu object corresponding to index.
|
|
*
|
|
* @param index is the index of target cpu object.
|
|
*
|
|
* @return Return a pointer to the cpu object corresponding to index.
|
|
*
|
|
* @note This function has UP version and MP version.
|
|
*/
|
|
struct rt_cpu *rt_cpu_index(int index)
|
|
{
|
|
return &_cpus[index];
|
|
}
|
|
|
|
/**
|
|
* @brief This function will lock all cpus's scheduler and disable local irq.
|
|
*
|
|
* @return Return current cpu interrupt status.
|
|
*
|
|
* @note This function only has MP version.
|
|
*/
|
|
rt_base_t rt_cpus_lock(void)
|
|
{
|
|
rt_base_t level;
|
|
struct rt_cpu* pcpu;
|
|
|
|
level = rt_hw_local_irq_disable();
|
|
pcpu = rt_cpu_self();
|
|
if (pcpu->current_thread != RT_NULL)
|
|
{
|
|
rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
|
|
|
|
rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
|
|
if (lock_nest == 0)
|
|
{
|
|
rt_enter_critical();
|
|
rt_hw_spin_lock(&_cpus_lock);
|
|
#ifdef RT_USING_DEBUG
|
|
_cpus_critical_level = rt_critical_level();
|
|
#endif /* RT_USING_DEBUG */
|
|
|
|
#ifdef RT_DEBUGING_SPINLOCK
|
|
_cpus_lock_owner = pcpu->current_thread;
|
|
_cpus_lock_pc = __GET_RETURN_ADDRESS;
|
|
#endif /* RT_DEBUGING_SPINLOCK */
|
|
}
|
|
}
|
|
|
|
return level;
|
|
}
|
|
RTM_EXPORT(rt_cpus_lock);
|
|
|
|
/**
|
|
* @brief This function will restore all cpus's scheduler and restore local irq.
|
|
*
|
|
* @param level is interrupt status returned by rt_cpus_lock().
|
|
*
|
|
* @note This function only has MP version.
|
|
*/
|
|
void rt_cpus_unlock(rt_base_t level)
|
|
{
|
|
struct rt_cpu* pcpu = rt_cpu_self();
|
|
|
|
if (pcpu->current_thread != RT_NULL)
|
|
{
|
|
rt_base_t critical_level = 0;
|
|
RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
|
|
rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
|
|
|
|
if (pcpu->current_thread->cpus_lock_nest == 0)
|
|
{
|
|
#if defined(RT_DEBUGING_SPINLOCK)
|
|
_cpus_lock_owner = __OWNER_MAGIC;
|
|
_cpus_lock_pc = RT_NULL;
|
|
#endif /* RT_DEBUGING_SPINLOCK */
|
|
#ifdef RT_USING_DEBUG
|
|
critical_level = _cpus_critical_level;
|
|
_cpus_critical_level = 0;
|
|
#endif /* RT_USING_DEBUG */
|
|
rt_hw_spin_unlock(&_cpus_lock);
|
|
rt_exit_critical_safe(critical_level);
|
|
}
|
|
}
|
|
rt_hw_local_irq_enable(level);
|
|
}
|
|
RTM_EXPORT(rt_cpus_unlock);
|
|
|
|
/**
|
|
* This function is invoked by scheduler.
|
|
* It will restore the lock state to whatever the thread's counter expects.
|
|
* If target thread not locked the cpus then unlock the cpus lock.
|
|
*
|
|
* @param thread is a pointer to the target thread.
|
|
*
|
|
* @note This function only has MP version.
|
|
*/
|
|
void rt_cpus_lock_status_restore(struct rt_thread *thread)
|
|
{
|
|
#if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
|
|
lwp_aspace_switch(thread);
|
|
#endif
|
|
rt_sched_post_ctx_switch(thread);
|
|
}
|
|
RTM_EXPORT(rt_cpus_lock_status_restore);
|
|
|
|
/* A safe API with debugging feature to be called in most codes */
|
|
|
|
#undef rt_cpu_get_id
|
|
/**
|
|
* @brief Get logical CPU ID
|
|
*
|
|
* @return logical CPU ID
|
|
*
|
|
* @note This function only has MP version.
|
|
*/
|
|
rt_base_t rt_cpu_get_id(void)
|
|
{
|
|
|
|
RT_ASSERT(rt_sched_thread_is_binding(RT_NULL) ||
|
|
rt_hw_interrupt_is_disabled() ||
|
|
!rt_scheduler_is_available());
|
|
|
|
return rt_hw_cpu_id();
|
|
}
|
|
|
|
/**
|
|
* @}
|
|
*
|
|
* @endcond
|
|
*/
|