Integrate nxmutex support fully into nxsem

This puts the mutex support fully inside nxsem, allowing
locking the mutex and setting the holder with single atomic
operation.

This enables fast mutex locking from userspace, avoiding taking
critical_sections, which may be heavy in SMP and cleanup
of nxmutex library in the future.

Signed-off-by: Jukka Laitinen <jukka.laitinen@tii.ae>
This commit is contained in:
Jukka Laitinen
2025-03-31 08:37:10 +03:00
committed by Xiang Xiao
parent 5c188b6625
commit b6f2729730
19 changed files with 364 additions and 119 deletions

View File

@@ -37,8 +37,10 @@
****************************************************************************/
#define NXMUTEX_NO_HOLDER ((pid_t)-1)
#define NXMUTEX_INITIALIZER {NXSEM_INITIALIZER(1, SEM_TYPE_MUTEX | \
SEM_PRIO_INHERIT), NXMUTEX_NO_HOLDER}
#define NXMUTEX_INITIALIZER { \
NXSEM_INITIALIZER(NXSEM_NO_MHOLDER, SEM_TYPE_MUTEX | SEM_PRIO_INHERIT), \
NXMUTEX_NO_HOLDER}
#define NXRMUTEX_INITIALIZER {NXMUTEX_INITIALIZER, 0}
/****************************************************************************

View File

@@ -45,23 +45,42 @@
/* semcount, flags, waitlist, hhead */
# define NXSEM_INITIALIZER(c, f) \
{(c), (f), SEM_WAITLIST_INITIALIZER, NULL}
{{(c)}, (f), SEM_WAITLIST_INITIALIZER, NULL}
# else
/* semcount, flags, waitlist, holder[2] */
# define NXSEM_INITIALIZER(c, f) \
{(c), (f), SEM_WAITLIST_INITIALIZER, SEMHOLDER_INITIALIZER}
{{(c)}, (f), SEM_WAITLIST_INITIALIZER, SEMHOLDER_INITIALIZER}
# endif
#else /* CONFIG_PRIORITY_INHERITANCE */
/* semcount, flags, waitlist */
# define NXSEM_INITIALIZER(c, f) \
{(c), (f), SEM_WAITLIST_INITIALIZER}
{{(c)}, (f), SEM_WAITLIST_INITIALIZER}
#endif /* CONFIG_PRIORITY_INHERITANCE */
/* Macro to retrieve sem count */
/* Macros to retrieve sem count and to check if nxsem is mutex */
#define NXSEM_COUNT(s) ((FAR atomic_t *)&(s)->semcount)
#define NXSEM_COUNT(s) ((FAR atomic_t *)&(s)->val.semcount)
#define NXSEM_IS_MUTEX(s) (((s)->flags & SEM_TYPE_MUTEX) != 0)
/* Mutex related helper macros */
#define NXSEM_MBLOCKING_BIT (((uint32_t)1) << 31)
#define NXSEM_NO_MHOLDER ((uint32_t)0x7ffffffe)
#define NXSEM_MRESET ((uint32_t)0x7fffffff)
/* Macro to retrieve mutex's atomic holder's ptr */
#define NXSEM_MHOLDER(s) ((FAR atomic_t *)&(s)->val.mholder)
/* Check if holder value (TID) is not NO_HOLDER or RESET */
#define NXSEM_MACQUIRED(h) (((h) & NXSEM_NO_MHOLDER) != NXSEM_NO_MHOLDER)
/* Check if mutex is acquired and blocks some other task */
#define NXSEM_MBLOCKING(h) (((h) & NXSEM_MBLOCKING_BIT) != 0)
/****************************************************************************
* Public Type Definitions
@@ -128,7 +147,7 @@ extern "C"
*
****************************************************************************/
int nxsem_init(FAR sem_t *sem, int pshared, unsigned int value);
int nxsem_init(FAR sem_t *sem, int pshared, uint32_t value);
/****************************************************************************
* Name: nxsem_destroy

View File

@@ -348,8 +348,9 @@ typedef struct pthread_mutex_s pthread_mutex_t;
#define PTHREAD_MUTEX_DEFAULT_PRIO_FLAGS (PTHREAD_MUTEX_DEFAULT_PRIO_INHERIT | \
PTHREAD_MUTEX_DEFAULT_PRIO_PROTECT)
#define PTHREAD_NXMUTEX_INITIALIZER { \
NXSEM_INITIALIZER(1, SEM_TYPE_MUTEX | PTHREAD_MUTEX_DEFAULT_PRIO_FLAGS), \
#define PTHREAD_NXMUTEX_INITIALIZER { \
NXSEM_INITIALIZER(NXSEM_NO_MHOLDER, \
SEM_TYPE_MUTEX | PTHREAD_MUTEX_DEFAULT_PRIO_FLAGS), \
NXMUTEX_NO_HOLDER}
#define PTHREAD_NXRMUTEX_INITIALIZER {PTHREAD_NXMUTEX_INITIALIZER, 0}

View File

@@ -71,8 +71,8 @@ struct semholder_s
FAR struct semholder_s *flink; /* List of semaphore's holder */
#endif
FAR struct semholder_s *tlink; /* List of task held semaphores */
FAR struct sem_s *sem; /* Ths corresponding semaphore */
FAR struct tcb_s *htcb; /* Ths corresponding TCB */
FAR struct sem_s *sem; /* This corresponding semaphore */
FAR struct tcb_s *htcb; /* This corresponding TCB */
int32_t counts; /* Number of counts owned by this holder */
};
@@ -104,8 +104,16 @@ struct semholder_s
struct sem_s
{
volatile int32_t semcount; /* >0 -> Num counts available */
/* <0 -> Num tasks waiting for semaphore */
union
{
volatile int32_t semcount; /* >0 -> Num counts available */
/* <0 -> Num tasks waiting for semaphore */
volatile uint32_t mholder; /* == NXSEM_NO_MHOLDER -> mutex has no holder */
/* == NXSEM_RESET -> mutex has been reset */
/* Otherwise: */
/* bits[30:0]: TID of the current holder */
/* bit [31]: Mutex is blocking some task */
} val;
/* If priority inheritance is enabled, then we have to keep track of which
* tasks hold references to the semaphore.
@@ -137,18 +145,18 @@ typedef struct sem_s sem_t;
/* semcount, flags, waitlist, hhead */
# define SEM_INITIALIZER(c) \
{(c), 0, SEM_WAITLIST_INITIALIZER, NULL}
{{(c)}, 0, SEM_WAITLIST_INITIALIZER, NULL}
# else
/* semcount, flags, waitlist, holder[2] */
# define SEM_INITIALIZER(c) \
{(c), 0, SEM_WAITLIST_INITIALIZER, SEMHOLDER_INITIALIZER}
{{(c)}, 0, SEM_WAITLIST_INITIALIZER, SEMHOLDER_INITIALIZER}
# endif
#else
/* semcount, flags, waitlist */
# define SEM_INITIALIZER(c) \
{(c), 0, SEM_WAITLIST_INITIALIZER}
{{(c)}, 0, SEM_WAITLIST_INITIALIZER}
#endif
#define SEM_WAITLIST(sem) (&((sem)->waitlist))

View File

@@ -113,7 +113,7 @@ static void nxmutex_add_backtrace(FAR mutex_t *mutex)
int nxmutex_init(FAR mutex_t *mutex)
{
int ret = nxsem_init(&mutex->sem, 0, 1);
int ret = nxsem_init(&mutex->sem, 0, NXSEM_NO_MHOLDER);
if (ret < 0)
{
@@ -217,12 +217,7 @@ int nxmutex_get_holder(FAR mutex_t *mutex)
bool nxmutex_is_locked(FAR mutex_t *mutex)
{
int cnt;
int ret;
ret = nxsem_get_value(&mutex->sem, &cnt);
return ret >= 0 && cnt < 1;
return NXSEM_MACQUIRED(mutex->sem.val.mholder);
}
/****************************************************************************

View File

@@ -63,7 +63,7 @@
int nxsem_get_value(FAR sem_t *sem, FAR int *sval)
{
if (sem != NULL && sval != NULL)
if (sem != NULL && sval != NULL && !NXSEM_IS_MUTEX(sem))
{
*sval = atomic_read(NXSEM_COUNT(sem));
return OK;

View File

@@ -62,15 +62,15 @@
*
****************************************************************************/
int nxsem_init(FAR sem_t *sem, int pshared, unsigned int value)
int nxsem_init(FAR sem_t *sem, int pshared, uint32_t value)
{
UNUSED(pshared);
DEBUGASSERT(sem != NULL && value <= SEM_VALUE_MAX);
DEBUGASSERT(sem != NULL);
/* Initialize the semaphore count */
/* Initialize the semaphore count or mutex holder */
sem->semcount = (int32_t)value;
sem->val.semcount = (int32_t)value;
/* Initialize semaphore wait list */

View File

@@ -29,6 +29,7 @@
#include <errno.h>
#include <assert.h>
#include <nuttx/sched.h>
#include <nuttx/semaphore.h>
#include <nuttx/atomic.h>
@@ -133,8 +134,9 @@ int nxsem_post(FAR sem_t *sem)
# endif
)
{
int32_t old = 0;
if (atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &old, 1))
int32_t old = _SCHED_GETTID();
if (atomic_try_cmpxchg_release(NXSEM_MHOLDER(sem), &old,
NXSEM_NO_MHOLDER))
{
return OK;
}

View File

@@ -30,6 +30,7 @@
#include <assert.h>
#include <sched.h>
#include <nuttx/sched.h>
#include <nuttx/init.h>
#include <nuttx/semaphore.h>
#include <nuttx/atomic.h>
@@ -128,8 +129,9 @@ int nxsem_trywait(FAR sem_t *sem)
#endif
)
{
int32_t old = 1;
return atomic_try_cmpxchg_acquire(NXSEM_COUNT(sem), &old, 0) ?
int32_t tid = _SCHED_GETTID();
int32_t old = NXSEM_NO_MHOLDER;
return atomic_try_cmpxchg_acquire(NXSEM_MHOLDER(sem), &old, tid) ?
OK : -EAGAIN;
}

View File

@@ -30,6 +30,7 @@
#include <assert.h>
#include <sched.h>
#include <nuttx/sched.h>
#include <nuttx/init.h>
#include <nuttx/cancelpt.h>
#include <nuttx/semaphore.h>
@@ -156,8 +157,9 @@ int nxsem_wait(FAR sem_t *sem)
# endif
)
{
int32_t old = 1;
if (atomic_try_cmpxchg_acquire(NXSEM_COUNT(sem), &old, 0))
int32_t tid = _SCHED_GETTID();
int32_t old = NXSEM_NO_MHOLDER;
if (atomic_try_cmpxchg_acquire(NXSEM_MHOLDER(sem), &old, tid))
{
return OK;
}

View File

@@ -66,7 +66,7 @@ int pthread_mutex_init(FAR pthread_mutex_t *mutex,
return EINVAL;
}
/* Initialize the mutex like a semaphore with initial count = 1 */
/* Initialize the semaphore of type mutex */
status = mutex_init(&mutex->mutex);
if (status < 0)

View File

@@ -61,6 +61,9 @@
int nxsem_destroy(FAR sem_t *sem)
{
int32_t old;
bool mutex = NXSEM_IS_MUTEX(sem);
FAR atomic_t *val = mutex ? NXSEM_MHOLDER(sem) : NXSEM_COUNT(sem);
int32_t new = mutex ? NXSEM_NO_MHOLDER : 1;
DEBUGASSERT(sem != NULL);
@@ -74,15 +77,16 @@ int nxsem_destroy(FAR sem_t *sem)
* leave the count unchanged but still return OK.
*/
old = atomic_read(NXSEM_COUNT(sem));
old = atomic_read(val);
do
{
if (old < 0)
if ((mutex && NXSEM_MBLOCKING(old)) ||
(!mutex && old < 0))
{
break;
}
}
while (!atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &old, 1));
while (!atomic_try_cmpxchg_release(val, &old, new));
/* Release holders of the semaphore */

View File

@@ -876,7 +876,9 @@ void nxsem_canceled(FAR struct tcb_s *stcb, FAR sem_t *sem)
{
/* Check our assumptions */
DEBUGASSERT(atomic_read(NXSEM_COUNT(sem)) <= 0);
DEBUGASSERT(NXSEM_IS_MUTEX(sem) || atomic_read(NXSEM_COUNT(sem)) <= 0);
DEBUGASSERT(!NXSEM_IS_MUTEX(sem) ||
NXSEM_MACQUIRED(atomic_read(NXSEM_MHOLDER(sem))));
/* Adjust the priority of every holder as necessary */
@@ -970,11 +972,14 @@ void nxsem_release_all(FAR struct tcb_s *htcb)
nxsem_freeholder(sem, pholder);
/* Increment the count on the semaphore, to releases the count
* that was taken by sem_wait() or sem_post().
*/
if (!NXSEM_IS_MUTEX(sem))
{
/* Increment the count on the semaphore, to releases the count
* that was taken by sem_wait() or sem_post().
*/
atomic_fetch_add(NXSEM_COUNT(sem), 1);
atomic_fetch_add(NXSEM_COUNT(sem), 1);
}
}
}

View File

@@ -73,10 +73,12 @@ int nxsem_post_slow(FAR sem_t *sem)
{
FAR struct tcb_s *stcb = NULL;
irqstate_t flags;
int32_t sem_count;
#if defined(CONFIG_PRIORITY_INHERITANCE) || defined(CONFIG_PRIORITY_PROTECT)
uint8_t proto;
#endif
bool blocking = false;
bool mutex = NXSEM_IS_MUTEX(sem);
uint32_t mholder = NXSEM_NO_MHOLDER;
/* The following operations must be performed with interrupts
* disabled because sem_post() may be called from an interrupt
@@ -85,19 +87,54 @@ int nxsem_post_slow(FAR sem_t *sem)
flags = enter_critical_section();
/* Check the maximum allowable value */
sem_count = atomic_read(NXSEM_COUNT(sem));
do
if (mutex)
{
if (sem_count >= SEM_VALUE_MAX)
/* Mutex post from interrupt context is not allowed */
DEBUGASSERT(!up_interrupt_context());
/* Lock the mutex for us by setting the blocking bit */
mholder = atomic_fetch_or(NXSEM_MHOLDER(sem), NXSEM_MBLOCKING_BIT);
/* Mutex post from another thread is not allowed, unless
* called from nxsem_reset
*/
DEBUGASSERT(mholder == (NXSEM_MBLOCKING_BIT | NXSEM_MRESET) ||
(mholder & (~NXSEM_MBLOCKING_BIT)) == nxsched_gettid());
blocking = NXSEM_MBLOCKING(mholder);
if (!blocking)
{
leave_critical_section(flags);
return -EOVERFLOW;
if (mholder != NXSEM_MRESET)
{
mholder = NXSEM_NO_MHOLDER;
}
atomic_set(NXSEM_MHOLDER(sem), mholder);
}
}
while (!atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &sem_count,
sem_count + 1));
else
{
int32_t sem_count;
/* Check the maximum allowable value */
sem_count = atomic_read(NXSEM_COUNT(sem));
do
{
if (sem_count >= SEM_VALUE_MAX)
{
leave_critical_section(flags);
return -EOVERFLOW;
}
}
while (!atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &sem_count,
sem_count + 1));
blocking = sem_count < 0;
}
/* Perform the semaphore unlock operation, releasing this task as a
* holder then also incrementing the count on the semaphore.
@@ -116,7 +153,10 @@ int nxsem_post_slow(FAR sem_t *sem)
* initialized if the semaphore is to used for signaling purposes.
*/
nxsem_release_holder(sem);
if (!mutex || blocking)
{
nxsem_release_holder(sem);
}
#if defined(CONFIG_PRIORITY_INHERITANCE) || defined(CONFIG_PRIORITY_PROTECT)
/* Don't let any unblocked tasks run until we complete any priority
@@ -138,7 +178,7 @@ int nxsem_post_slow(FAR sem_t *sem)
* there must be some task waiting for the semaphore.
*/
if (sem_count < 0)
if (blocking)
{
/* Check if there are any tasks in the waiting for semaphore
* task list that are waiting for this semaphore. This is a
@@ -147,7 +187,6 @@ int nxsem_post_slow(FAR sem_t *sem)
*/
stcb = (FAR struct tcb_s *)dq_remfirst(SEM_WAITLIST(sem));
if (stcb != NULL)
{
FAR struct tcb_s *rtcb = this_task();
@@ -156,7 +195,17 @@ int nxsem_post_slow(FAR sem_t *sem)
* it is awakened.
*/
nxsem_add_holder_tcb(stcb, sem);
if (mutex)
{
uint32_t blocking_bit = dq_empty(SEM_WAITLIST(sem)) ?
NXSEM_MBLOCKING_BIT : 0;
atomic_set(NXSEM_MHOLDER(sem),
((uint32_t)stcb->pid) | blocking_bit);
}
else
{
nxsem_add_holder_tcb(stcb, sem);
}
/* Stop the watchdog timer */
@@ -178,14 +227,6 @@ int nxsem_post_slow(FAR sem_t *sem)
up_switch_context(stcb, rtcb);
}
}
#if 0 /* REVISIT: This can fire on IOB throttle semaphore */
else
{
/* This should not happen. */
DEBUGPANIC();
}
#endif
}
/* Check if we need to drop the priority of any threads holding

View File

@@ -86,7 +86,8 @@ void nxsem_recover(FAR struct tcb_s *tcb)
if (tcb->task_state == TSTATE_WAIT_SEM)
{
FAR sem_t *sem = tcb->waitobj;
DEBUGASSERT(sem != NULL && atomic_read(NXSEM_COUNT(sem)) < 0);
DEBUGASSERT(sem != NULL);
/* Restore the correct priority of all threads that hold references
* to this semaphore.
@@ -104,14 +105,29 @@ void nxsem_recover(FAR struct tcb_s *tcb)
* place.
*/
atomic_fetch_add(NXSEM_COUNT(sem), 1);
if (NXSEM_IS_MUTEX(sem))
{
/* Clear the blocking bit, if not blocked any more */
if (dq_empty(SEM_WAITLIST(sem)))
{
uint32_t mholder =
atomic_fetch_and(NXSEM_MHOLDER(sem), ~NXSEM_MBLOCKING_BIT);
DEBUGASSERT(NXSEM_MBLOCKING(mholder));
}
}
else
{
DEBUGASSERT(atomic_read(NXSEM_COUNT(sem)) < 0);
atomic_fetch_add(NXSEM_COUNT(sem), 1);
}
#ifdef CONFIG_MM_KMAP
kmm_unmap(sem);
#endif
}
/* Release all semphore holders for the task */
/* Release all semaphore holders for the task */
nxsem_release_all(tcb);

View File

@@ -46,6 +46,11 @@
* tasks waiting on a count. This kind of operation is sometimes required
* within the OS (only) for certain error handling conditions.
*
* Mutex is simply posted until it is not blocking any tasks. If the
* requested count is 0, a single running holder is left. If the requested
* count is 1, the mutex is set to "reset". Other requested counts are not
* allowed for mutex.
*
* Input Parameters:
* sem - Semaphore descriptor to be reset
* count - The requested semaphore count
@@ -76,38 +81,65 @@ int nxsem_reset(FAR sem_t *sem, int16_t count)
flags = enter_critical_section();
/* A negative count indicates that the negated number of threads are
* waiting to take a count from the semaphore. Loop here, handing
* out counts to any waiting threads.
*/
while (atomic_read(NXSEM_COUNT(sem)) < 0 && count > 0)
if (NXSEM_IS_MUTEX(sem))
{
/* Give out one counting, waking up one of the waiting threads
* and, perhaps, kicking off a lot of priority inheritance
* logic (REVISIT).
/* Support only resetting mutex by removing one waiter */
DEBUGASSERT(count == 1);
/* Post the mutex once with holder value set to RESET | BLOCKS
* so we know that it is ok in this case to call the post from
* another thread.
*/
DEBUGVERIFY(nxsem_post(sem));
count--;
}
atomic_set(NXSEM_MHOLDER(sem),
NXSEM_MRESET | NXSEM_MBLOCKING_BIT);
/* We exit the above loop with either (1) no threads waiting for the
* (i.e., with sem->semcount >= 0). In this case, 'count' holds the
* the new value of the semaphore count. OR (2) with threads still
* waiting but all of the semaphore counts exhausted: The current
* value of sem->semcount is already correct in this case.
*/
semcount = atomic_read(NXSEM_COUNT(sem));
do
{
if (semcount < 0)
if (!dq_empty(SEM_WAITLIST(sem)))
{
break;
DEBUGVERIFY(nxsem_post(sem));
}
else
{
atomic_set(NXSEM_MHOLDER(sem), NXSEM_MRESET);
}
}
while (!atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &semcount, count));
else
{
/* A negative count indicates that the negated number of threads are
* waiting to take a count from the semaphore. Loop here, handing
* out counts to any waiting threads.
*/
while (atomic_read(NXSEM_COUNT(sem)) < 0 && count > 0)
{
/* Give out one counting, waking up one of the waiting threads
* and, perhaps, kicking off a lot of priority inheritance
* logic (REVISIT).
*/
DEBUGVERIFY(nxsem_post(sem));
count--;
}
/* We exit the above loop with either (1) no threads waiting for the
* (i.e., with sem->semcount >= 0). In this case, 'count' holds the
* the new value of the semaphore count. OR (2) with threads still
* waiting but all of the semaphore counts exhausted: The current
* value of sem->semcount is already correct in this case.
*/
semcount = atomic_read(NXSEM_COUNT(sem));
do
{
if (semcount < 0)
{
break;
}
}
while (!atomic_try_cmpxchg_release(NXSEM_COUNT(sem), &semcount,
count));
}
/* Allow any pending context switches to occur now */

View File

@@ -63,8 +63,11 @@
int nxsem_trywait_slow(FAR sem_t *sem)
{
irqstate_t flags;
int32_t semcount;
int ret;
int ret = -EAGAIN;
bool mutex = NXSEM_IS_MUTEX(sem);
FAR atomic_t *val = mutex ? NXSEM_MHOLDER(sem) : NXSEM_COUNT(sem);
int32_t old;
int32_t new;
/* The following operations must be performed with interrupts disabled
* because sem_post() may be called from an interrupt handler.
@@ -74,29 +77,57 @@ int nxsem_trywait_slow(FAR sem_t *sem)
/* If the semaphore is available, give it to the requesting task */
semcount = atomic_read(NXSEM_COUNT(sem));
if (mutex)
{
new = nxsched_gettid();
}
old = atomic_read(val);
do
{
if (semcount <= 0)
if (mutex)
{
leave_critical_section(flags);
return -EAGAIN;
if (NXSEM_MACQUIRED(old))
{
goto out;
}
}
else
{
if (old <= 0)
{
goto out;
}
new = old - 1;
}
}
while (!atomic_try_cmpxchg_acquire(NXSEM_COUNT(sem),
&semcount, semcount - 1));
while (!atomic_try_cmpxchg_acquire(val, &old, new));
/* It is, let the task take the semaphore */
ret = nxsem_protect_wait(sem);
if (ret < 0)
{
atomic_fetch_add(NXSEM_COUNT(sem), 1);
if (mutex)
{
atomic_set(NXSEM_MHOLDER(sem), NXSEM_NO_MHOLDER);
}
else
{
atomic_fetch_add(NXSEM_COUNT(sem), 1);
}
leave_critical_section(flags);
return ret;
goto out;
}
nxsem_add_holder(sem);
if (!mutex)
{
nxsem_add_holder(sem);
}
out:
/* Interrupts may now be enabled. */

View File

@@ -71,9 +71,12 @@
int nxsem_wait_slow(FAR sem_t *sem)
{
FAR struct tcb_s *rtcb;
FAR struct tcb_s *rtcb = this_task();
irqstate_t flags;
int ret;
bool unlocked;
FAR struct tcb_s *htcb = NULL;
bool mutex = NXSEM_IS_MUTEX(sem);
/* The following operations must be performed with interrupts
* disabled because nxsem_post() may be called from an interrupt
@@ -86,19 +89,62 @@ int nxsem_wait_slow(FAR sem_t *sem)
/* Check if the lock is available */
if (atomic_fetch_sub(NXSEM_COUNT(sem), 1) > 0)
if (mutex)
{
uint32_t mholder;
/* We lock the mutex for us by setting the blocks bit,
* this is all that is needed if we block
*/
mholder = atomic_fetch_or(NXSEM_MHOLDER(sem), NXSEM_MBLOCKING_BIT);
if (NXSEM_MACQUIRED(mholder))
{
/* htcb gets NULL if
* - the only holder did exit (without posting first)
* - the mutex was reset before
* In both cases we simply acquire the mutex, thus recovering
* from these situations.
*/
htcb = nxsched_get_tcb(mholder & (~NXSEM_MBLOCKING_BIT));
}
unlocked = htcb == NULL;
}
else
{
unlocked = atomic_fetch_sub(NXSEM_COUNT(sem), 1) > 0;
}
if (unlocked)
{
/* It is, let the task take the semaphore. */
ret = nxsem_protect_wait(sem);
if (ret < 0)
{
atomic_fetch_add(NXSEM_COUNT(sem), 1);
if (mutex)
{
atomic_set(NXSEM_MHOLDER(sem), NXSEM_NO_MHOLDER);
}
else
{
atomic_fetch_add(NXSEM_COUNT(sem), 1);
}
leave_critical_section(flags);
return ret;
}
nxsem_add_holder(sem);
/* For mutexes, we only add the holder to the tasks list at the
* time when a task blocks on the mutex, for priority restoration
*/
if (!mutex)
{
nxsem_add_holder(sem);
}
}
/* The semaphore is NOT available, We will have to block the
@@ -110,7 +156,6 @@ int nxsem_wait_slow(FAR sem_t *sem)
#ifdef CONFIG_PRIORITY_INHERITANCE
uint8_t prioinherit = sem->flags & SEM_PRIO_MASK;
#endif
rtcb = this_task();
/* First, verify that the task is not already waiting on a
* semaphore
@@ -126,6 +171,13 @@ int nxsem_wait_slow(FAR sem_t *sem)
rtcb->waitobj = sem;
/* In case of a mutex, store the previous holder in the task's list */
if (mutex)
{
nxsem_add_holder_tcb(htcb, sem);
}
/* If priority inheritance is enabled, then check the priority of
* the holder of the semaphore.
*/
@@ -218,6 +270,16 @@ int nxsem_wait_slow(FAR sem_t *sem)
#endif
}
/* If this now holds the mutex, set the holder TID and the lock bit */
if (mutex && ret == OK)
{
uint32_t blocking_bit =
dq_empty(SEM_WAITLIST(sem)) ? 0 : NXSEM_MBLOCKING_BIT;
atomic_set(NXSEM_MHOLDER(sem), ((uint32_t)rtcb->pid) | blocking_bit);
}
leave_critical_section(flags);
return ret;
}

View File

@@ -72,12 +72,22 @@ void nxsem_wait_irq(FAR struct tcb_s *wtcb, int errcode)
{
FAR struct tcb_s *rtcb = this_task();
FAR sem_t *sem = wtcb->waitobj;
bool mutex = NXSEM_IS_MUTEX(sem);
/* It is possible that an interrupt/context switch beat us to the punch
* and already changed the task's state.
*/
DEBUGASSERT(sem != NULL && atomic_read(NXSEM_COUNT(sem)) < 0);
DEBUGASSERT(sem != NULL);
DEBUGASSERT(mutex || atomic_read(NXSEM_COUNT(sem)) < 0);
DEBUGASSERT(!mutex || NXSEM_MBLOCKING(atomic_read(NXSEM_MHOLDER(sem))));
/* Mutex is never interrupted by a signal or canceled */
if (mutex && (errcode == EINTR || errcode == ECANCELED))
{
return;
}
/* Restore the correct priority of all threads that hold references
* to this semaphore.
@@ -85,18 +95,31 @@ void nxsem_wait_irq(FAR struct tcb_s *wtcb, int errcode)
nxsem_canceled(wtcb, sem);
/* And increment the count on the semaphore. This releases the count
* that was taken by sem_post(). This count decremented the semaphore
* count to negative and caused the thread to be blocked in the first
* place.
*/
atomic_fetch_add(NXSEM_COUNT(sem), 1);
/* Remove task from waiting list */
dq_rem((FAR dq_entry_t *)wtcb, SEM_WAITLIST(sem));
/* This restores the value to what it was before the previous sem_wait.
* This caused the thread to be blocked in the first place.
*/
if (mutex)
{
/* The TID of the mutex holder is correct but we need to
* update the blocking bit. The mutex is still blocking if there are
* any items left in the wait queue.
*/
if (dq_empty(SEM_WAITLIST(sem)))
{
atomic_fetch_and(NXSEM_MHOLDER(sem), ~NXSEM_MBLOCKING_BIT);
}
}
else
{
atomic_fetch_add(NXSEM_COUNT(sem), 1);
}
/* Indicate that the wait is over. */
wtcb->waitobj = NULL;