libc/semaphore: Go the fast path even arch doesn't support atomic

since the simulated atomic operation is still fast than the slow path

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
This commit is contained in:
Xiang Xiao
2025-06-22 03:21:23 +08:00
committed by Alan C. Assis
parent fb14b54b83
commit 4c7366045c
3 changed files with 15 additions and 49 deletions

View File

@@ -119,37 +119,30 @@ int sem_post(FAR sem_t *sem)
int nxsem_post(FAR sem_t *sem)
{
bool mutex;
bool fastpath = true;
bool mutex;
DEBUGASSERT(sem != NULL);
/* We don't do atomic fast path in case of LIBC_ARCH_ATOMIC because that
* uses spinlocks, which can't be called from userspace. Also in the kernel
* taking the slow path directly is faster than locking first in here
*/
#ifndef CONFIG_LIBC_ARCH_ATOMIC
mutex = NXSEM_IS_MUTEX(sem);
/* Disable fast path if priority protection is enabled on the semaphore */
# ifdef CONFIG_PRIORITY_PROTECT
#ifdef CONFIG_PRIORITY_PROTECT
if ((sem->flags & SEM_PRIO_MASK) == SEM_PRIO_PROTECT)
{
fastpath = false;
}
# endif
#endif
/* Disable fast path on a counting semaphore with priority inheritance */
# ifdef CONFIG_PRIORITY_INHERITANCE
#ifdef CONFIG_PRIORITY_INHERITANCE
if (!mutex && (sem->flags & SEM_PRIO_MASK) != SEM_PRIO_NONE)
{
fastpath = false;
}
# endif
#endif
while (fastpath)
{
@@ -181,10 +174,6 @@ int nxsem_post(FAR sem_t *sem)
return OK;
}
}
#else
UNUSED(mutex);
UNUSED(fastpath);
#endif
return nxsem_post_slow(sem);
}

View File

@@ -107,8 +107,8 @@ int sem_trywait(FAR sem_t *sem)
int nxsem_trywait(FAR sem_t *sem)
{
bool mutex;
bool fastpath = true;
bool mutex;
DEBUGASSERT(sem != NULL);
@@ -119,32 +119,25 @@ int nxsem_trywait(FAR sem_t *sem)
up_interrupt_context());
#endif
/* We don't do atomic fast path in case of LIBC_ARCH_ATOMIC because that
* uses spinlocks, which can't be called from userspace. Also in the kernel
* taking the slow path directly is faster than locking first in here
*/
#ifndef CONFIG_LIBC_ARCH_ATOMIC
mutex = NXSEM_IS_MUTEX(sem);
/* Disable fast path if priority protection is enabled on the semaphore */
# ifdef CONFIG_PRIORITY_PROTECT
#ifdef CONFIG_PRIORITY_PROTECT
if ((sem->flags & SEM_PRIO_MASK) == SEM_PRIO_PROTECT)
{
fastpath = false;
}
# endif
#endif
/* Disable fast path on a counting semaphore with priority inheritance */
# ifdef CONFIG_PRIORITY_INHERITANCE
#ifdef CONFIG_PRIORITY_INHERITANCE
if (!mutex && (sem->flags & SEM_PRIO_MASK) != SEM_PRIO_NONE)
{
fastpath = false;
}
# endif
#endif
while (fastpath)
{
@@ -177,10 +170,5 @@ int nxsem_trywait(FAR sem_t *sem)
}
}
#else
UNUSED(mutex);
UNUSED(fastpath);
#endif
return nxsem_trywait_slow(sem);
}

View File

@@ -135,8 +135,8 @@ errout_with_cancelpt:
int nxsem_wait(FAR sem_t *sem)
{
bool mutex;
bool fastpath = true;
bool mutex;
DEBUGASSERT(sem != NULL);
@@ -147,32 +147,25 @@ int nxsem_wait(FAR sem_t *sem)
up_interrupt_context());
#endif
/* We don't do atomic fast path in case of LIBC_ARCH_ATOMIC because that
* uses spinlocks, which can't be called from userspace. Also in the kernel
* taking the slow path directly is faster than locking first in here
*/
#ifndef CONFIG_LIBC_ARCH_ATOMIC
mutex = NXSEM_IS_MUTEX(sem);
/* Disable fast path if priority protection is enabled on the semaphore */
# ifdef CONFIG_PRIORITY_PROTECT
#ifdef CONFIG_PRIORITY_PROTECT
if ((sem->flags & SEM_PRIO_MASK) == SEM_PRIO_PROTECT)
{
fastpath = false;
}
# endif
#endif
/* Disable fast path on a counting semaphore with priority inheritance */
# ifdef CONFIG_PRIORITY_INHERITANCE
#ifdef CONFIG_PRIORITY_INHERITANCE
if (!mutex && (sem->flags & SEM_PRIO_MASK) != SEM_PRIO_NONE)
{
fastpath = false;
}
# endif
#endif
while (fastpath)
{
@@ -204,10 +197,6 @@ int nxsem_wait(FAR sem_t *sem)
return OK;
}
}
#else
UNUSED(mutex);
UNUSED(fastpath);
#endif
return nxsem_wait_slow(sem);
}