mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-12-30 15:38:06 +01:00
Replace internal usage of struct umtx with umutex which can supports
real-time if we want, no functionality is changed.
This commit is contained in:
parent
b2d24734cd
commit
bddd24cd9c
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=162061
@ -50,8 +50,8 @@ _pthread_atfork(void (*prepare)(void), void (*parent)(void),
|
||||
af->prepare = prepare;
|
||||
af->parent = parent;
|
||||
af->child = child;
|
||||
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock);
|
||||
TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
|
||||
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
return (0);
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ _pthread_barrier_init(pthread_barrier_t *barrier,
|
||||
if (bar == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
_thr_umtx_init(&bar->b_lock);
|
||||
_thr_umutex_init(&bar->b_lock);
|
||||
bar->b_cycle = 0;
|
||||
bar->b_waiters = 0;
|
||||
bar->b_count = count;
|
||||
@ -90,17 +90,17 @@ _pthread_barrier_wait(pthread_barrier_t *barrier)
|
||||
return (EINVAL);
|
||||
|
||||
bar = *barrier;
|
||||
THR_UMTX_LOCK(curthread, &bar->b_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &bar->b_lock);
|
||||
if (++bar->b_waiters == bar->b_count) {
|
||||
/* Current thread is lastest thread */
|
||||
bar->b_waiters = 0;
|
||||
bar->b_cycle++;
|
||||
_thr_umtx_wake(&bar->b_cycle, bar->b_count - 1);
|
||||
THR_UMTX_UNLOCK(curthread, &bar->b_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
|
||||
ret = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
} else {
|
||||
cycle = bar->b_cycle;
|
||||
THR_UMTX_UNLOCK(curthread, &bar->b_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &bar->b_lock);
|
||||
do {
|
||||
_thr_umtx_wait(&bar->b_cycle, cycle, NULL);
|
||||
/* test cycle to avoid bogus wakeup */
|
||||
|
@ -73,7 +73,7 @@ cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
|
||||
/*
|
||||
* Initialise the condition variable structure:
|
||||
*/
|
||||
_thr_umtx_init(&pcond->c_lock);
|
||||
_thr_umutex_init(&pcond->c_lock);
|
||||
pcond->c_seqno = 0;
|
||||
pcond->c_waiters = 0;
|
||||
pcond->c_wakeups = 0;
|
||||
|
@ -44,11 +44,11 @@ _thr_report_creation(struct pthread *curthread, struct pthread *newthread)
|
||||
curthread->event_buf.event = TD_CREATE;
|
||||
curthread->event_buf.th_p = (td_thrhandle_t *)newthread;
|
||||
curthread->event_buf.data = 0;
|
||||
THR_UMTX_LOCK(curthread, &_thr_event_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &_thr_event_lock);
|
||||
_thread_last_event = curthread;
|
||||
_thread_bp_create();
|
||||
_thread_last_event = NULL;
|
||||
THR_UMTX_UNLOCK(curthread, &_thr_event_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &_thr_event_lock);
|
||||
}
|
||||
|
||||
void
|
||||
@ -57,9 +57,9 @@ _thr_report_death(struct pthread *curthread)
|
||||
curthread->event_buf.event = TD_DEATH;
|
||||
curthread->event_buf.th_p = (td_thrhandle_t *)curthread;
|
||||
curthread->event_buf.data = 0;
|
||||
THR_UMTX_LOCK(curthread, &_thr_event_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &_thr_event_lock);
|
||||
_thread_last_event = curthread;
|
||||
_thread_bp_death();
|
||||
_thread_last_event = NULL;
|
||||
THR_UMTX_UNLOCK(curthread, &_thr_event_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &_thr_event_lock);
|
||||
}
|
||||
|
@ -90,9 +90,9 @@ _pthread_atfork(void (*prepare)(void), void (*parent)(void),
|
||||
af->prepare = prepare;
|
||||
af->parent = parent;
|
||||
af->child = child;
|
||||
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock);
|
||||
TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
|
||||
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ _fork(void)
|
||||
|
||||
curthread = _get_curthread();
|
||||
|
||||
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &_thr_atfork_lock);
|
||||
|
||||
/* Run down atfork prepare handlers. */
|
||||
TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
|
||||
@ -154,8 +154,8 @@ _fork(void)
|
||||
thr_self(&curthread->tid);
|
||||
|
||||
/* clear other threads locked us. */
|
||||
_thr_umtx_init(&curthread->lock);
|
||||
_thr_umtx_init(&_thr_atfork_lock);
|
||||
_thr_umutex_init(&curthread->lock);
|
||||
_thr_umutex_init(&_thr_atfork_lock);
|
||||
_thr_setthreaded(0);
|
||||
|
||||
/* reinitialize libc spinlocks. */
|
||||
@ -189,7 +189,7 @@ _fork(void)
|
||||
af->parent();
|
||||
}
|
||||
|
||||
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &_thr_atfork_lock);
|
||||
}
|
||||
errno = errsave;
|
||||
|
||||
|
@ -66,7 +66,7 @@ pthreadlist _thread_list = TAILQ_HEAD_INITIALIZER(_thread_list);
|
||||
pthreadlist _thread_gc_list = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
|
||||
int _thread_active_threads = 1;
|
||||
atfork_head _thr_atfork_list = TAILQ_HEAD_INITIALIZER(_thr_atfork_list);
|
||||
umtx_t _thr_atfork_lock;
|
||||
struct umutex _thr_atfork_lock = DEFAULT_UMUTEX;
|
||||
|
||||
struct pthread_prio _thr_priorities[3] = {
|
||||
{RTP_PRIO_MIN, RTP_PRIO_MAX, 0}, /* FIFO */
|
||||
@ -105,12 +105,12 @@ size_t _thr_stack_default = THR_STACK_DEFAULT;
|
||||
size_t _thr_stack_initial = THR_STACK_INITIAL;
|
||||
int _thr_page_size;
|
||||
int _gc_count;
|
||||
umtx_t _mutex_static_lock;
|
||||
umtx_t _cond_static_lock;
|
||||
umtx_t _rwlock_static_lock;
|
||||
umtx_t _keytable_lock;
|
||||
umtx_t _thr_list_lock;
|
||||
umtx_t _thr_event_lock;
|
||||
struct umutex _mutex_static_lock = DEFAULT_UMUTEX;
|
||||
struct umutex _cond_static_lock = DEFAULT_UMUTEX;
|
||||
struct umutex _rwlock_static_lock = DEFAULT_UMUTEX;
|
||||
struct umutex _keytable_lock = DEFAULT_UMUTEX;
|
||||
struct umutex _thr_list_lock = DEFAULT_UMUTEX;
|
||||
struct umutex _thr_event_lock = DEFAULT_UMUTEX;
|
||||
|
||||
int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
|
||||
int __pthread_mutex_lock(pthread_mutex_t *);
|
||||
@ -424,12 +424,12 @@ init_private(void)
|
||||
size_t len;
|
||||
int mib[2];
|
||||
|
||||
_thr_umtx_init(&_mutex_static_lock);
|
||||
_thr_umtx_init(&_cond_static_lock);
|
||||
_thr_umtx_init(&_rwlock_static_lock);
|
||||
_thr_umtx_init(&_keytable_lock);
|
||||
_thr_umtx_init(&_thr_atfork_lock);
|
||||
_thr_umtx_init(&_thr_event_lock);
|
||||
_thr_umutex_init(&_mutex_static_lock);
|
||||
_thr_umutex_init(&_cond_static_lock);
|
||||
_thr_umutex_init(&_rwlock_static_lock);
|
||||
_thr_umutex_init(&_keytable_lock);
|
||||
_thr_umutex_init(&_thr_atfork_lock);
|
||||
_thr_umutex_init(&_thr_event_lock);
|
||||
_thr_once_init();
|
||||
_thr_spinlock_init();
|
||||
_thr_list_init();
|
||||
|
@ -59,8 +59,8 @@
|
||||
* after a fork().
|
||||
*/
|
||||
static TAILQ_HEAD(, pthread) free_threadq;
|
||||
static umtx_t free_thread_lock;
|
||||
static umtx_t tcb_lock;
|
||||
static struct umutex free_thread_lock = DEFAULT_UMUTEX;
|
||||
static struct umutex tcb_lock = DEFAULT_UMUTEX;
|
||||
static int free_thread_count = 0;
|
||||
static int inited = 0;
|
||||
static int total_threads;
|
||||
@ -79,11 +79,11 @@ _thr_list_init(void)
|
||||
|
||||
_gc_count = 0;
|
||||
total_threads = 1;
|
||||
_thr_umtx_init(&_thr_list_lock);
|
||||
_thr_umutex_init(&_thr_list_lock);
|
||||
TAILQ_INIT(&_thread_list);
|
||||
TAILQ_INIT(&free_threadq);
|
||||
_thr_umtx_init(&free_thread_lock);
|
||||
_thr_umtx_init(&tcb_lock);
|
||||
_thr_umutex_init(&free_thread_lock);
|
||||
_thr_umutex_init(&tcb_lock);
|
||||
if (inited) {
|
||||
for (i = 0; i < HASH_QUEUES; ++i)
|
||||
LIST_INIT(&thr_hashtable[i]);
|
||||
|
@ -146,7 +146,7 @@ struct pthread_cond {
|
||||
/*
|
||||
* Lock for accesses to this structure.
|
||||
*/
|
||||
volatile umtx_t c_lock;
|
||||
struct umutex c_lock;
|
||||
volatile umtx_t c_seqno;
|
||||
volatile int c_waiters;
|
||||
volatile int c_wakeups;
|
||||
@ -160,7 +160,7 @@ struct pthread_cond_attr {
|
||||
};
|
||||
|
||||
struct pthread_barrier {
|
||||
volatile umtx_t b_lock;
|
||||
struct umutex b_lock;
|
||||
volatile umtx_t b_cycle;
|
||||
volatile int b_count;
|
||||
volatile int b_waiters;
|
||||
@ -171,7 +171,7 @@ struct pthread_barrierattr {
|
||||
};
|
||||
|
||||
struct pthread_spinlock {
|
||||
volatile umtx_t s_lock;
|
||||
struct umutex s_lock;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -313,7 +313,7 @@ struct pthread {
|
||||
/*
|
||||
* Lock for accesses to this thread structure.
|
||||
*/
|
||||
umtx_t lock;
|
||||
struct umutex lock;
|
||||
|
||||
/* Internal condition variable cycle number. */
|
||||
umtx_t cycle;
|
||||
@ -449,22 +449,22 @@ struct pthread {
|
||||
(thrd)->critical_count--; \
|
||||
_thr_ast(thrd);
|
||||
|
||||
#define THR_UMTX_TRYLOCK(thrd, lck) \
|
||||
_thr_umtx_trylock((lck), (thrd)->tid)
|
||||
#define THR_UMUTEX_TRYLOCK(thrd, lck) \
|
||||
_thr_umutex_trylock((lck), TID(thrd))
|
||||
|
||||
#define THR_UMTX_LOCK(thrd, lck) \
|
||||
_thr_umtx_lock((lck), (thrd)->tid)
|
||||
#define THR_UMUTEX_LOCK(thrd, lck) \
|
||||
_thr_umutex_lock((lck), TID(thrd))
|
||||
|
||||
#define THR_UMTX_TIMEDLOCK(thrd, lck, timo) \
|
||||
_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
|
||||
#define THR_UMUTEX_TIMEDLOCK(thrd, lck, timo) \
|
||||
_thr_umutex_timedlock((lck), TID(thrd), (timo))
|
||||
|
||||
#define THR_UMTX_UNLOCK(thrd, lck) \
|
||||
_thr_umtx_unlock((lck), (thrd)->tid)
|
||||
#define THR_UMUTEX_UNLOCK(thrd, lck) \
|
||||
_thr_umutex_unlock((lck), TID(thrd))
|
||||
|
||||
#define THR_LOCK_ACQUIRE(thrd, lck) \
|
||||
do { \
|
||||
(thrd)->locklevel++; \
|
||||
_thr_umtx_lock(lck, (thrd)->tid); \
|
||||
_thr_umutex_lock(lck, TID(thrd)); \
|
||||
} while (0)
|
||||
|
||||
#ifdef _PTHREADS_INVARIANTS
|
||||
@ -480,7 +480,7 @@ do { \
|
||||
#define THR_LOCK_RELEASE(thrd, lck) \
|
||||
do { \
|
||||
THR_ASSERT_LOCKLEVEL(thrd); \
|
||||
_thr_umtx_unlock((lck), (thrd)->tid); \
|
||||
_thr_umutex_unlock((lck), TID(thrd)); \
|
||||
(thrd)->locklevel--; \
|
||||
_thr_ast(thrd); \
|
||||
} while (0)
|
||||
@ -562,7 +562,7 @@ extern pthreadlist _thread_gc_list __hidden;
|
||||
|
||||
extern int _thread_active_threads;
|
||||
extern atfork_head _thr_atfork_list __hidden;
|
||||
extern umtx_t _thr_atfork_lock __hidden;
|
||||
extern struct umutex _thr_atfork_lock __hidden;
|
||||
|
||||
/* Default thread attributes: */
|
||||
extern struct pthread_attr _pthread_attr_default __hidden;
|
||||
@ -585,12 +585,12 @@ extern int _thr_page_size __hidden;
|
||||
/* Garbage thread count. */
|
||||
extern int _gc_count __hidden;
|
||||
|
||||
extern umtx_t _mutex_static_lock __hidden;
|
||||
extern umtx_t _cond_static_lock __hidden;
|
||||
extern umtx_t _rwlock_static_lock __hidden;
|
||||
extern umtx_t _keytable_lock __hidden;
|
||||
extern umtx_t _thr_list_lock __hidden;
|
||||
extern umtx_t _thr_event_lock __hidden;
|
||||
extern struct umutex _mutex_static_lock __hidden;
|
||||
extern struct umutex _cond_static_lock __hidden;
|
||||
extern struct umutex _rwlock_static_lock __hidden;
|
||||
extern struct umutex _keytable_lock __hidden;
|
||||
extern struct umutex _thr_list_lock __hidden;
|
||||
extern struct umutex _thr_event_lock __hidden;
|
||||
|
||||
/*
|
||||
* Function prototype definitions.
|
||||
|
@ -53,7 +53,7 @@ _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
|
||||
else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
|
||||
ret = ENOMEM;
|
||||
else {
|
||||
_thr_umtx_init(&lck->s_lock);
|
||||
_thr_umutex_init(&lck->s_lock);
|
||||
*lock = lck;
|
||||
ret = 0;
|
||||
}
|
||||
@ -87,7 +87,7 @@ _pthread_spin_trylock(pthread_spinlock_t *lock)
|
||||
if (lock == NULL || (lck = *lock) == NULL)
|
||||
ret = EINVAL;
|
||||
else
|
||||
ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock);
|
||||
ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -102,8 +102,8 @@ _pthread_spin_lock(pthread_spinlock_t *lock)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
count = SPIN_COUNT;
|
||||
while ((ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock)) != 0) {
|
||||
while (lck->s_lock) {
|
||||
while ((ret = THR_UMUTEX_TRYLOCK(curthread, &lck->s_lock)) != 0) {
|
||||
while (lck->s_lock.m_owner) {
|
||||
if (_thr_smp_cpus <= 1) {
|
||||
_pthread_yield();
|
||||
} else {
|
||||
@ -134,7 +134,7 @@ _pthread_spin_unlock(pthread_spinlock_t *lock)
|
||||
if (lock == NULL || (lck = *lock) == NULL)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
ret = THR_UMTX_UNLOCK(curthread, &lck->s_lock);
|
||||
ret = THR_UMUTEX_UNLOCK(curthread, &lck->s_lock);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ sem_alloc(unsigned int value, semid_t semid, int system_sem)
|
||||
errno = ENOSPC;
|
||||
return (NULL);
|
||||
}
|
||||
_thr_umtx_init((umtx_t *)&sem->lock);
|
||||
bzero(sem, sizeof(*sem));
|
||||
/*
|
||||
* Fortunatly count and nwaiters are adjacency, so we can
|
||||
* use umtx_wait to wait on it, umtx_wait needs an address
|
||||
|
@ -96,7 +96,7 @@ _thr_suspend_check(struct pthread *curthread)
|
||||
* ourself.
|
||||
*/
|
||||
curthread->critical_count++;
|
||||
THR_UMTX_LOCK(curthread, &(curthread)->lock);
|
||||
THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
|
||||
while ((curthread->flags & (THR_FLAGS_NEED_SUSPEND |
|
||||
THR_FLAGS_SUSPENDED)) == THR_FLAGS_NEED_SUSPEND) {
|
||||
curthread->cycle++;
|
||||
@ -112,12 +112,12 @@ _thr_suspend_check(struct pthread *curthread)
|
||||
if (curthread->state == PS_DEAD)
|
||||
break;
|
||||
curthread->flags |= THR_FLAGS_SUSPENDED;
|
||||
THR_UMTX_UNLOCK(curthread, &(curthread)->lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
|
||||
_thr_umtx_wait(&curthread->cycle, cycle, NULL);
|
||||
THR_UMTX_LOCK(curthread, &(curthread)->lock);
|
||||
THR_UMUTEX_LOCK(curthread, &(curthread)->lock);
|
||||
curthread->flags &= ~THR_FLAGS_SUSPENDED;
|
||||
}
|
||||
THR_UMTX_UNLOCK(curthread, &(curthread)->lock);
|
||||
THR_UMUTEX_UNLOCK(curthread, &(curthread)->lock);
|
||||
curthread->critical_count--;
|
||||
|
||||
/*
|
||||
|
@ -48,9 +48,10 @@
|
||||
*/
|
||||
struct spinlock_extra {
|
||||
spinlock_t *owner;
|
||||
struct umutex lock;
|
||||
};
|
||||
|
||||
static umtx_t spinlock_static_lock;
|
||||
static struct umutex spinlock_static_lock = DEFAULT_UMUTEX;
|
||||
static struct spinlock_extra extra[MAX_SPINLOCKS];
|
||||
static int spinlock_count;
|
||||
static int initialized;
|
||||
@ -65,19 +66,25 @@ static void init_spinlock(spinlock_t *lck);
|
||||
void
|
||||
_spinunlock(spinlock_t *lck)
|
||||
{
|
||||
THR_UMTX_UNLOCK(_get_curthread(), (volatile umtx_t *)&lck->access_lock);
|
||||
struct spinlock_extra *extra;
|
||||
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
THR_UMUTEX_UNLOCK(_get_curthread(), &extra->lock);
|
||||
}
|
||||
|
||||
void
|
||||
_spinlock(spinlock_t *lck)
|
||||
{
|
||||
struct spinlock_extra *extra;
|
||||
|
||||
if (!__isthreaded)
|
||||
PANIC("Spinlock called when not threaded.");
|
||||
if (!initialized)
|
||||
PANIC("Spinlocks not initialized.");
|
||||
if (lck->fname == NULL)
|
||||
init_spinlock(lck);
|
||||
THR_UMTX_LOCK(_get_curthread(), (volatile umtx_t *)&lck->access_lock);
|
||||
extra = (struct spinlock_extra *)lck->fname;
|
||||
THR_UMUTEX_LOCK(_get_curthread(), &extra->lock);
|
||||
}
|
||||
|
||||
void
|
||||
@ -89,17 +96,18 @@ _spinlock_debug(spinlock_t *lck, char *fname __unused, int lineno __unused)
|
||||
static void
|
||||
init_spinlock(spinlock_t *lck)
|
||||
{
|
||||
static int count = 0;
|
||||
struct pthread *curthread = _get_curthread();
|
||||
|
||||
THR_UMTX_LOCK(_get_curthread(), &spinlock_static_lock);
|
||||
THR_UMUTEX_LOCK(curthread, &spinlock_static_lock);
|
||||
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
|
||||
lck->fname = (char *)&extra[spinlock_count];
|
||||
_thr_umutex_init(&extra[spinlock_count].lock);
|
||||
extra[spinlock_count].owner = lck;
|
||||
spinlock_count++;
|
||||
}
|
||||
THR_UMTX_UNLOCK(_get_curthread(), &spinlock_static_lock);
|
||||
if (lck->fname == NULL && ++count < 5)
|
||||
stderr_debug("Warning: exceeded max spinlocks");
|
||||
THR_UMUTEX_UNLOCK(curthread, &spinlock_static_lock);
|
||||
if (lck->fname == NULL)
|
||||
PANIC("Warning: exceeded max spinlocks");
|
||||
}
|
||||
|
||||
void
|
||||
@ -107,7 +115,7 @@ _thr_spinlock_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
_thr_umtx_init(&spinlock_static_lock);
|
||||
_thr_umutex_init(&spinlock_static_lock);
|
||||
if (initialized != 0) {
|
||||
/*
|
||||
* called after fork() to reset state of libc spin locks,
|
||||
@ -118,8 +126,7 @@ _thr_spinlock_init(void)
|
||||
* it is better to do pthread_atfork in libc.
|
||||
*/
|
||||
for (i = 0; i < spinlock_count; i++)
|
||||
_thr_umtx_init((volatile umtx_t *)
|
||||
&extra[i].owner->access_lock);
|
||||
_thr_umutex_init(&extra[i].lock);
|
||||
} else {
|
||||
initialized = 1;
|
||||
}
|
||||
|
@ -30,37 +30,6 @@
|
||||
#include "thr_private.h"
|
||||
#include "thr_umtx.h"
|
||||
|
||||
int
|
||||
__thr_umtx_lock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
while (_umtx_op(__DEVOLATILE(struct umtx *, mtx),
|
||||
UMTX_OP_LOCK, id, 0, 0))
|
||||
;
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
__thr_umtx_timedlock(volatile umtx_t *mtx, long id,
|
||||
const struct timespec *timeout)
|
||||
{
|
||||
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
|
||||
timeout->tv_nsec <= 0)))
|
||||
return (ETIMEDOUT);
|
||||
if (_umtx_op(__DEVOLATILE(struct umtx *, mtx), UMTX_OP_LOCK, id, 0,
|
||||
__DECONST(void *, timeout)) == 0)
|
||||
return (0);
|
||||
return (errno);
|
||||
}
|
||||
|
||||
int
|
||||
__thr_umtx_unlock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (_umtx_op(__DEVOLATILE(struct umtx *, mtx), UMTX_OP_UNLOCK,
|
||||
id, 0, 0) == 0)
|
||||
return (0);
|
||||
return (errno);
|
||||
}
|
||||
|
||||
int
|
||||
__thr_umutex_lock(struct umutex *mtx, uint32_t id)
|
||||
{
|
||||
|
@ -29,17 +29,13 @@
|
||||
#ifndef _THR_FBSD_UMTX_H_
|
||||
#define _THR_FBSD_UMTX_H_
|
||||
|
||||
#include <strings.h>
|
||||
#include <sys/umtx.h>
|
||||
|
||||
#define DEFAULT_UMUTEX {0, 0, {0, 0}, {0, 0, 0, 0}}
|
||||
|
||||
typedef long umtx_t;
|
||||
|
||||
/* simple lock routines.*/
|
||||
int __thr_umtx_lock(volatile umtx_t *mtx, long id) __hidden;
|
||||
int __thr_umtx_timedlock(volatile umtx_t *mtx, long id,
|
||||
const struct timespec *timeout) __hidden;
|
||||
int __thr_umtx_unlock(volatile umtx_t *mtx, long id) __hidden;
|
||||
|
||||
/* POSIX semantic lock routines */
|
||||
int __thr_umutex_lock(struct umutex *mtx, uint32_t id) __hidden;
|
||||
int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
|
||||
const struct timespec *timeout) __hidden;
|
||||
@ -48,53 +44,17 @@ int __thr_umutex_kern_trylock(struct umutex *mtx) __hidden;
|
||||
int __thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
|
||||
uint32_t *oldceiling) __hidden;
|
||||
|
||||
static inline void
|
||||
_thr_umtx_init(volatile umtx_t *mtx)
|
||||
{
|
||||
*mtx = 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_trylock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return (EBUSY);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_lock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return (__thr_umtx_lock(mtx, id));
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_timedlock(volatile umtx_t *mtx, long id,
|
||||
const struct timespec *timeout)
|
||||
{
|
||||
if (atomic_cmpset_acq_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)UMTX_UNOWNED, (uintptr_t)id))
|
||||
return (0);
|
||||
return (__thr_umtx_timedlock(mtx, id, timeout));
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umtx_unlock(volatile umtx_t *mtx, long id)
|
||||
{
|
||||
if (atomic_cmpset_rel_ptr((volatile uintptr_t *)mtx,
|
||||
(uintptr_t)id, (uintptr_t)UMTX_UNOWNED))
|
||||
return (0);
|
||||
return __thr_umtx_unlock(mtx, id);
|
||||
}
|
||||
|
||||
int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
|
||||
const struct timespec *timeout) __hidden;
|
||||
int _thr_umtx_wake(volatile umtx_t *mtx, int count) __hidden;
|
||||
|
||||
static inline void
|
||||
_thr_umutex_init(struct umutex *mtx)
|
||||
{
|
||||
struct umutex tmp = DEFAULT_UMUTEX;
|
||||
*mtx = tmp;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user