callout: provide CALLOUT_TRYLOCK flag

If a callout was initialized with the flag, then the callout(9) system
will not drop the callwheel lock in softclock_call_cc() to obtain the
callout lock.  Instead it will use try-lock semantic to obtain the
callout's lock.  In case of a failure the callout will be rescheduled to
the 50% of the precision value.  The main benefit of such behavior is not
the avoidance of the lock contention in the callout thread, but the fact
that callout with such flag can be actually stopped in a safe manner,
because the race window in the beginning of softclock_call_cc() is closed.

Call of callout_stop() on such a callout would guarantee that nothing will
be executed after callout_stop() returns, neither callout lock will be
dereferenced.  A callout marked as CALLOUT_TRYLOCK |
CALLOUT_RETURNUNLOCKED can call callout_stop() from the callout function
itself (0, a failure to stop, will be returned), then unlock the lock and
then free the memory containing the callout structure.

Caveat: when calling callout_stop() from outside the callout function, the
return value from callout_stop() is still inconsistent.  A race window at
the end of softclock_call_cc() still exists, so callout_stop() may report
failure to stop, which would not be true.

Reviewed by:            jtl, kib
Differential Revision:	https://reviews.freebsd.org/D45746
This commit is contained in:
Gleb Smirnoff 2024-10-24 09:58:05 -07:00
parent 656991b0c6
commit efcb2ec8cb
2 changed files with 31 additions and 11 deletions

View File

@ -649,6 +649,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
static callout_func_t *lastfunc;
#endif
CC_LOCK_ASSERT(cc);
KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING,
("softclock_call_cc: pend %p %x", c, c->c_iflags));
KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE,
@ -671,16 +672,29 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
cc_exec_last_func(cc, direct) = c_func;
cc_exec_last_arg(cc, direct) = c_arg;
cc_exec_cancel(cc, direct) = false;
CC_UNLOCK(cc);
if (c_lock != NULL) {
class->lc_lock(c_lock, lock_status);
/*
* The callout may have been cancelled
* while we switched locks.
*/
if (cc_exec_cancel(cc, direct)) {
class->lc_unlock(c_lock);
goto skip;
if (c_iflags & CALLOUT_TRYLOCK) {
if (__predict_false(class->lc_trylock(c_lock,
lock_status) == 0)) {
cc_exec_curr(cc, direct) = NULL;
callout_cc_add(c, cc,
cc->cc_lastscan + c->c_precision / 2,
qmax(c->c_precision / 2, 1), c_func, c_arg,
(direct) ? C_DIRECT_EXEC : 0);
return;
}
CC_UNLOCK(cc);
} else {
CC_UNLOCK(cc);
class->lc_lock(c_lock, lock_status);
/*
* The callout may have been cancelled
* while we switched locks.
*/
if (cc_exec_cancel(cc, direct)) {
class->lc_unlock(c_lock);
goto skip;
}
}
/* The callout cannot be stopped now. */
cc_exec_cancel(cc, direct) = true;
@ -698,6 +712,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
c, c_func, c_arg);
}
} else {
CC_UNLOCK(cc);
#ifdef CALLOUT_PROFILING
(*mpcalls)++;
#endif
@ -1332,10 +1347,15 @@ void
_callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
{
KASSERT(lock != NULL, ("%s: no lock", __func__));
KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK |
CALLOUT_TRYLOCK)) == 0,
("%s: bad flags %d", __func__, flags));
KASSERT(!(LOCK_CLASS(lock)->lc_flags & LC_SLEEPABLE),
("%s: callout %p has sleepable lock", __func__, c));
KASSERT(!(flags & CALLOUT_TRYLOCK) ||
(LOCK_CLASS(lock)->lc_trylock != NULL),
("%s: CALLOUT_TRYLOCK requested for %s",
__func__, LOCK_CLASS(lock)->lc_name));
*c = (struct callout ){
.c_lock = lock,

View File

@ -39,7 +39,7 @@
#include <sys/_callout.h>
#define CALLOUT_LOCAL_ALLOC 0x0001 /* was allocated from callfree */
#define CALLOUT_TRYLOCK 0x0001 /* try semantic in softclock_call_cc */
#define CALLOUT_ACTIVE 0x0002 /* callout is currently active */
#define CALLOUT_PENDING 0x0004 /* callout is waiting for timeout */
#define CALLOUT_MPSAFE 0x0008 /* deprecated */