feat(core): Add 8-bit and 16-bit atomic operation support

This commit is contained in:
wdfk-prog
2026-03-13 14:15:05 +08:00
committed by Rbb666
parent b7686b8d79
commit 05445c04e8
4 changed files with 392 additions and 1 deletions

View File

@@ -7,6 +7,7 @@
* Date Author Notes * Date Author Notes
* 2023-03-14 WangShun first version * 2023-03-14 WangShun first version
* 2023-05-20 Bernard add stdc atomic detection. * 2023-05-20 Bernard add stdc atomic detection.
* 2026-03-09 wdfk-prog add 8/16-bit atomic operations support
*/ */
#ifndef __RT_ATOMIC_H__ #ifndef __RT_ATOMIC_H__
#define __RT_ATOMIC_H__ #define __RT_ATOMIC_H__
@@ -17,8 +18,16 @@
rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr); rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr);
void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val); void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val);
rt_atomic8_t rt_hw_atomic_load8(volatile rt_atomic8_t *ptr);
void rt_hw_atomic_store8(volatile rt_atomic8_t *ptr, rt_atomic8_t val);
rt_atomic16_t rt_hw_atomic_load16(volatile rt_atomic16_t *ptr);
void rt_hw_atomic_store16(volatile rt_atomic16_t *ptr, rt_atomic16_t val);
rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val); rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val);
rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val); rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val);
rt_atomic8_t rt_hw_atomic_and8(volatile rt_atomic8_t *ptr, rt_atomic8_t val);
rt_atomic8_t rt_hw_atomic_or8(volatile rt_atomic8_t *ptr, rt_atomic8_t val);
rt_atomic16_t rt_hw_atomic_and16(volatile rt_atomic16_t *ptr, rt_atomic16_t val);
rt_atomic16_t rt_hw_atomic_or16(volatile rt_atomic16_t *ptr, rt_atomic16_t val);
rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val); rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val);
rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val); rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val);
rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val); rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val);
@@ -32,8 +41,16 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
#ifndef __STDC_NO_ATOMICS__ #ifndef __STDC_NO_ATOMICS__
#define rt_atomic_load(ptr) atomic_load(ptr) #define rt_atomic_load(ptr) atomic_load(ptr)
#define rt_atomic_store(ptr, v) atomic_store(ptr, v) #define rt_atomic_store(ptr, v) atomic_store(ptr, v)
#define rt_atomic_load8(ptr) atomic_load(ptr)
#define rt_atomic_store8(ptr, v) atomic_store(ptr, v)
#define rt_atomic_load16(ptr) atomic_load(ptr)
#define rt_atomic_store16(ptr, v) atomic_store(ptr, v)
#define rt_atomic_add(ptr, v) atomic_fetch_add(ptr, v) #define rt_atomic_add(ptr, v) atomic_fetch_add(ptr, v)
#define rt_atomic_sub(ptr, v) atomic_fetch_sub(ptr, v) #define rt_atomic_sub(ptr, v) atomic_fetch_sub(ptr, v)
#define rt_atomic_and8(ptr, v) atomic_fetch_and(ptr, v)
#define rt_atomic_or8(ptr, v) atomic_fetch_or(ptr, v)
#define rt_atomic_and16(ptr, v) atomic_fetch_and(ptr, v)
#define rt_atomic_or16(ptr, v) atomic_fetch_or(ptr, v)
#define rt_atomic_and(ptr, v) atomic_fetch_and(ptr, v) #define rt_atomic_and(ptr, v) atomic_fetch_and(ptr, v)
#define rt_atomic_or(ptr, v) atomic_fetch_or(ptr, v) #define rt_atomic_or(ptr, v) atomic_fetch_or(ptr, v)
#define rt_atomic_xor(ptr, v) atomic_fetch_xor(ptr, v) #define rt_atomic_xor(ptr, v) atomic_fetch_xor(ptr, v)
@@ -48,6 +65,28 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
#elif defined(RT_USING_HW_ATOMIC) #elif defined(RT_USING_HW_ATOMIC)
#define rt_atomic_load(ptr) rt_hw_atomic_load(ptr) #define rt_atomic_load(ptr) rt_hw_atomic_load(ptr)
#define rt_atomic_store(ptr, v) rt_hw_atomic_store(ptr, v) #define rt_atomic_store(ptr, v) rt_hw_atomic_store(ptr, v)
#if defined(ARCH_USING_HW_ATOMIC_8)
#define rt_atomic_load8(ptr) rt_hw_atomic_load8(ptr)
#define rt_atomic_store8(ptr, v) rt_hw_atomic_store8(ptr, v)
#define rt_atomic_and8(ptr, v) rt_hw_atomic_and8(ptr, v)
#define rt_atomic_or8(ptr, v) rt_hw_atomic_or8(ptr, v)
#else
#define rt_atomic_load8(ptr) rt_soft_atomic_load8(ptr)
#define rt_atomic_store8(ptr, v) rt_soft_atomic_store8(ptr, v)
#define rt_atomic_and8(ptr, v) rt_soft_atomic_and8(ptr, v)
#define rt_atomic_or8(ptr, v) rt_soft_atomic_or8(ptr, v)
#endif
#if defined(ARCH_USING_HW_ATOMIC_16)
#define rt_atomic_load16(ptr) rt_hw_atomic_load16(ptr)
#define rt_atomic_store16(ptr, v) rt_hw_atomic_store16(ptr, v)
#define rt_atomic_and16(ptr, v) rt_hw_atomic_and16(ptr, v)
#define rt_atomic_or16(ptr, v) rt_hw_atomic_or16(ptr, v)
#else
#define rt_atomic_load16(ptr) rt_soft_atomic_load16(ptr)
#define rt_atomic_store16(ptr, v) rt_soft_atomic_store16(ptr, v)
#define rt_atomic_and16(ptr, v) rt_soft_atomic_and16(ptr, v)
#define rt_atomic_or16(ptr, v) rt_soft_atomic_or16(ptr, v)
#endif
#define rt_atomic_add(ptr, v) rt_hw_atomic_add(ptr, v) #define rt_atomic_add(ptr, v) rt_hw_atomic_add(ptr, v)
#define rt_atomic_sub(ptr, v) rt_hw_atomic_sub(ptr, v) #define rt_atomic_sub(ptr, v) rt_hw_atomic_sub(ptr, v)
#define rt_atomic_and(ptr, v) rt_hw_atomic_and(ptr, v) #define rt_atomic_and(ptr, v) rt_hw_atomic_and(ptr, v)
@@ -62,8 +101,16 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
#include <rthw.h> #include <rthw.h>
#define rt_atomic_load(ptr) rt_soft_atomic_load(ptr) #define rt_atomic_load(ptr) rt_soft_atomic_load(ptr)
#define rt_atomic_store(ptr, v) rt_soft_atomic_store(ptr, v) #define rt_atomic_store(ptr, v) rt_soft_atomic_store(ptr, v)
#define rt_atomic_load8(ptr) rt_soft_atomic_load8(ptr)
#define rt_atomic_store8(ptr, v) rt_soft_atomic_store8(ptr, v)
#define rt_atomic_load16(ptr) rt_soft_atomic_load16(ptr)
#define rt_atomic_store16(ptr, v) rt_soft_atomic_store16(ptr, v)
#define rt_atomic_add(ptr, v) rt_soft_atomic_add(ptr, v) #define rt_atomic_add(ptr, v) rt_soft_atomic_add(ptr, v)
#define rt_atomic_sub(ptr, v) rt_soft_atomic_sub(ptr, v) #define rt_atomic_sub(ptr, v) rt_soft_atomic_sub(ptr, v)
#define rt_atomic_and8(ptr, v) rt_soft_atomic_and8(ptr, v)
#define rt_atomic_or8(ptr, v) rt_soft_atomic_or8(ptr, v)
#define rt_atomic_and16(ptr, v) rt_soft_atomic_and16(ptr, v)
#define rt_atomic_or16(ptr, v) rt_soft_atomic_or16(ptr, v)
#define rt_atomic_and(ptr, v) rt_soft_atomic_and(ptr, v) #define rt_atomic_and(ptr, v) rt_soft_atomic_and(ptr, v)
#define rt_atomic_or(ptr, v) rt_soft_atomic_or(ptr, v) #define rt_atomic_or(ptr, v) rt_soft_atomic_or(ptr, v)
#define rt_atomic_xor(ptr, v) rt_soft_atomic_xor(ptr, v) #define rt_atomic_xor(ptr, v) rt_soft_atomic_xor(ptr, v)
@@ -72,6 +119,100 @@ rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_a
#define rt_atomic_flag_test_and_set(ptr) rt_soft_atomic_flag_test_and_set(ptr) #define rt_atomic_flag_test_and_set(ptr) rt_soft_atomic_flag_test_and_set(ptr)
#define rt_atomic_compare_exchange_strong(ptr, v,des) rt_soft_atomic_compare_exchange_strong(ptr, v ,des) #define rt_atomic_compare_exchange_strong(ptr, v,des) rt_soft_atomic_compare_exchange_strong(ptr, v ,des)
rt_inline rt_atomic8_t rt_soft_atomic_load8(volatile rt_atomic8_t *ptr)
{
rt_base_t level;
rt_atomic8_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline void rt_soft_atomic_store8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
*ptr = val;
rt_hw_interrupt_enable(level);
}
rt_inline rt_atomic16_t rt_soft_atomic_load16(volatile rt_atomic16_t *ptr)
{
rt_base_t level;
rt_atomic16_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline void rt_soft_atomic_store16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
rt_base_t level;
level = rt_hw_interrupt_disable();
*ptr = val;
rt_hw_interrupt_enable(level);
}
rt_inline rt_atomic8_t rt_soft_atomic_and8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
rt_base_t level;
rt_atomic8_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
*ptr = temp & val;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline rt_atomic8_t rt_soft_atomic_or8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
rt_base_t level;
rt_atomic8_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
*ptr = temp | val;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline rt_atomic16_t rt_soft_atomic_and16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
rt_base_t level;
rt_atomic16_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
*ptr = temp & val;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline rt_atomic16_t rt_soft_atomic_or16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
rt_base_t level;
rt_atomic16_t temp;
level = rt_hw_interrupt_disable();
temp = *ptr;
*ptr = temp | val;
rt_hw_interrupt_enable(level);
return temp;
}
rt_inline rt_atomic_t rt_soft_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val) rt_inline rt_atomic_t rt_soft_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
{ {
rt_base_t level; rt_base_t level;

View File

@@ -99,14 +99,22 @@ typedef rt_base_t rt_off_t; /**< Type for offset */
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus
typedef rt_uint8_t rt_atomic8_t;
typedef rt_uint16_t rt_atomic16_t;
typedef rt_base_t rt_atomic_t; typedef rt_base_t rt_atomic_t;
#else #else
#if defined(RT_USING_STDC_ATOMIC) #if defined(RT_USING_STDC_ATOMIC)
#include <stdatomic.h> #include <stdatomic.h>
typedef _Atomic(rt_uint8_t) rt_atomic8_t;
typedef _Atomic(rt_uint16_t) rt_atomic16_t;
typedef _Atomic(rt_base_t) rt_atomic_t; typedef _Atomic(rt_base_t) rt_atomic_t;
#elif defined(RT_USING_HW_ATOMIC) #elif defined(RT_USING_HW_ATOMIC)
typedef rt_uint8_t rt_atomic8_t;
typedef rt_uint16_t rt_atomic16_t;
typedef rt_base_t rt_atomic_t; typedef rt_base_t rt_atomic_t;
#else #else
typedef rt_uint8_t rt_atomic8_t;
typedef rt_uint16_t rt_atomic16_t;
typedef rt_base_t rt_atomic_t; typedef rt_base_t rt_atomic_t;
#endif /* RT_USING_STDC_ATOMIC */ #endif /* RT_USING_STDC_ATOMIC */
#endif /* __cplusplus */ #endif /* __cplusplus */

View File

@@ -13,6 +13,14 @@ config RT_USING_HW_ATOMIC
bool bool
default n default n
config ARCH_USING_HW_ATOMIC_8
bool
default n
config ARCH_USING_HW_ATOMIC_16
bool
default n
config ARCH_CPU_BIG_ENDIAN config ARCH_CPU_BIG_ENDIAN
bool bool
@@ -60,6 +68,8 @@ config ARCH_ARM_CORTEX_M3
select ARCH_ARM_CORTEX_M select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_MPU config ARCH_ARM_MPU
bool bool
@@ -71,6 +81,8 @@ config ARCH_ARM_CORTEX_M4
select ARCH_ARM_CORTEX_M select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_M7 config ARCH_ARM_CORTEX_M7
bool bool
@@ -78,28 +90,38 @@ config ARCH_ARM_CORTEX_M7
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_CACHE select RT_USING_CACHE
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_M85 config ARCH_ARM_CORTEX_M85
bool bool
select ARCH_ARM_CORTEX_M select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_M23 config ARCH_ARM_CORTEX_M23
bool bool
select ARCH_ARM_CORTEX_M select ARCH_ARM_CORTEX_M
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_M33 config ARCH_ARM_CORTEX_M33
bool bool
select ARCH_ARM_CORTEX_M select ARCH_ARM_CORTEX_M
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_R config ARCH_ARM_CORTEX_R
bool bool
select ARCH_ARM select ARCH_ARM
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
config ARCH_ARM_CORTEX_R52 config ARCH_ARM_CORTEX_R52
bool bool
@@ -141,6 +163,8 @@ config ARCH_ARM_CORTEX_A
select ARCH_ARM_MMU select ARCH_ARM_MMU
select RT_USING_CPU_FFS select RT_USING_CPU_FFS
select RT_USING_HW_ATOMIC select RT_USING_HW_ATOMIC
select ARCH_USING_HW_ATOMIC_8
select ARCH_USING_HW_ATOMIC_16
if ARCH_ARM_CORTEX_A if ARCH_ARM_CORTEX_A
config RT_SMP_AUTO_BOOT config RT_SMP_AUTO_BOOT

View File

@@ -5,7 +5,8 @@
* *
* Change Logs: * Change Logs:
* Date Author Notes * Date Author Notes
* 2022-07-27 flybreak the first version * 2022-07-27 flybreak the first version
* 2026-03-09 wdfk-prog add 8/16-bit atomic operations support
*/ */
#include <rtthread.h> #include <rtthread.h>
@@ -14,6 +15,147 @@
#include <intrinsics.h> #include <intrinsics.h>
#include <iccarm_builtin.h> #include <iccarm_builtin.h>
#endif #endif
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit values.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */
#ifndef __LDREXB
#define __LDREXB (uint8_t)__builtin_arm_ldrex
#endif
#define __LDREXB_PRIV(ptr) ((rt_atomic8_t)__LDREXB((volatile uint8_t *)(ptr)))
#elif defined(__ARMCC_VERSION) /* ARM Compiler V5 */
#if __ARMCC_VERSION < 5060020
#define __LDREXB_PRIV(ptr) ((rt_atomic8_t ) __ldrex(ptr))
#else
#define __LDREXB_PRIV(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((rt_atomic8_t ) __ldrex(ptr)) _Pragma("pop")
#endif
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
_Pragma("inline=forced") __intrinsic rt_atomic8_t __LDREXB_PRIV(volatile rt_atomic8_t *ptr)
{
return __iar_builtin_LDREXB((volatile unsigned char *)ptr);
}
#elif defined (__GNUC__) /* GNU GCC Compiler */
__attribute__((always_inline)) static inline rt_atomic8_t __LDREXB_PRIV(volatile rt_atomic8_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__asm volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
#else
__asm volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return (rt_atomic8_t)result;
}
#endif
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */
#ifndef __STREXB
#define __STREXB (uint32_t)__builtin_arm_strex
#endif
#define __STREXB_PRIV(value, ptr) ((rt_atomic_t)__STREXB((uint8_t)(value), (volatile uint8_t *)(ptr)))
#elif defined(__ARMCC_VERSION) /* ARM Compiler V5 */
#if __ARMCC_VERSION < 5060020
#define __STREXB_PRIV(value, ptr) __strex(value, ptr)
#else
#define __STREXB_PRIV(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
_Pragma("inline=forced") __intrinsic rt_atomic_t __STREXB_PRIV(rt_atomic8_t value, volatile rt_atomic8_t *ptr)
{
return __iar_builtin_STREXB(value, (volatile unsigned char *)ptr);
}
#elif defined (__GNUC__) /* GNU GCC Compiler */
__attribute__((always_inline)) static inline rt_atomic_t __STREXB_PRIV(rt_atomic8_t value, volatile rt_atomic8_t *addr)
{
rt_atomic_t result;
__asm volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return result;
}
#endif
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */
#ifndef __LDREXH
#define __LDREXH (uint16_t)__builtin_arm_ldrex
#endif
#define __LDREXH_PRIV(ptr) ((rt_atomic16_t)__LDREXH((volatile uint16_t *)(ptr)))
#elif defined(__ARMCC_VERSION) /* ARM Compiler V5 */
#if __ARMCC_VERSION < 5060020
#define __LDREXH_PRIV(ptr) ((rt_atomic16_t ) __ldrex(ptr))
#else
#define __LDREXH_PRIV(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((rt_atomic16_t ) __ldrex(ptr)) _Pragma("pop")
#endif
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
_Pragma("inline=forced") __intrinsic rt_atomic16_t __LDREXH_PRIV(volatile rt_atomic16_t *ptr)
{
return __iar_builtin_LDREXH((volatile unsigned short *)ptr);
}
#elif defined (__GNUC__) /* GNU GCC Compiler */
__attribute__((always_inline)) static inline rt_atomic16_t __LDREXH_PRIV(volatile rt_atomic16_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__asm volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
#else
__asm volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return (rt_atomic16_t)result;
}
#endif
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) /* ARM Compiler V6 */
#ifndef __STREXH
#define __STREXH (uint32_t)__builtin_arm_strex
#endif
#define __STREXH_PRIV(value, ptr) ((rt_atomic_t)__STREXH((uint16_t)(value), (volatile uint16_t *)(ptr)))
#elif defined(__ARMCC_VERSION) /* ARM Compiler V5 */
#if __ARMCC_VERSION < 5060020
#define __STREXH_PRIV(value, ptr) __strex(value, ptr)
#else
#define __STREXH_PRIV(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
#elif defined (__IAR_SYSTEMS_ICC__) /* for IAR Compiler */
_Pragma("inline=forced") __intrinsic rt_atomic_t __STREXH_PRIV(rt_atomic16_t value, volatile rt_atomic16_t *ptr)
{
return __iar_builtin_STREXH(value, (volatile unsigned short *)ptr);
}
#elif defined (__GNUC__) /* GNU GCC Compiler */
__attribute__((always_inline)) static inline rt_atomic_t __STREXH_PRIV(rt_atomic16_t value, volatile rt_atomic16_t *addr)
{
rt_atomic_t result;
__asm volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return result;
}
#endif
/** /**
\brief LDR Exclusive (32 bit) \brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values. \details Executes a exclusive LDR instruction for 32 bit values.
@@ -92,6 +234,42 @@ void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
} while ((__STREXW(val, ptr)) != 0U); } while ((__STREXW(val, ptr)) != 0U);
} }
rt_atomic8_t rt_hw_atomic_load8(volatile rt_atomic8_t *ptr)
{
rt_atomic8_t oldval;
do
{
oldval = __LDREXB_PRIV(ptr);
} while ((__STREXB_PRIV(oldval, ptr)) != 0U);
return oldval;
}
void rt_hw_atomic_store8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
do
{
__LDREXB_PRIV(ptr);
} while ((__STREXB_PRIV(val, ptr)) != 0U);
}
rt_atomic16_t rt_hw_atomic_load16(volatile rt_atomic16_t *ptr)
{
rt_atomic16_t oldval;
do
{
oldval = __LDREXH_PRIV(ptr);
} while ((__STREXH_PRIV(oldval, ptr)) != 0U);
return oldval;
}
void rt_hw_atomic_store16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
do
{
__LDREXH_PRIV(ptr);
} while ((__STREXH_PRIV(val, ptr)) != 0U);
}
rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val) rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
{ {
rt_atomic_t oldval; rt_atomic_t oldval;
@@ -112,6 +290,46 @@ rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
return oldval; return oldval;
} }
rt_atomic8_t rt_hw_atomic_and8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
rt_atomic8_t oldval;
do
{
oldval = __LDREXB_PRIV(ptr);
} while ((__STREXB_PRIV((rt_atomic8_t)(oldval & val), ptr)) != 0U);
return oldval;
}
rt_atomic8_t rt_hw_atomic_or8(volatile rt_atomic8_t *ptr, rt_atomic8_t val)
{
rt_atomic8_t oldval;
do
{
oldval = __LDREXB_PRIV(ptr);
} while ((__STREXB_PRIV((rt_atomic8_t)(oldval | val), ptr)) != 0U);
return oldval;
}
rt_atomic16_t rt_hw_atomic_and16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
rt_atomic16_t oldval;
do
{
oldval = __LDREXH_PRIV(ptr);
} while ((__STREXH_PRIV((rt_atomic16_t)(oldval & val), ptr)) != 0U);
return oldval;
}
rt_atomic16_t rt_hw_atomic_or16(volatile rt_atomic16_t *ptr, rt_atomic16_t val)
{
rt_atomic16_t oldval;
do
{
oldval = __LDREXH_PRIV(ptr);
} while ((__STREXH_PRIV((rt_atomic16_t)(oldval | val), ptr)) != 0U);
return oldval;
}
rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val) rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
{ {
rt_atomic_t oldval; rt_atomic_t oldval;