mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-22 03:04:34 +01:00
Schedule fast taskqueue callouts on right CPU.
With fast taskqueues using direct callouts we can reduce number of CPU wakeups by scheduling callout on current CPU if taskqueue calls taskqueue_enqueue_timeout() on itself. The trick won't work for regular taskqueues, since the callout thread will occupy the CPU. It also may not work in case of multiple threads since we do not know which thread will pick the task, and we do not want excessive callout migrations. So we optimize only the other cases we can. In practice this allows iichid(4) taskqueue to stay on CPU where underlying ig4(4) interrupts are routed and to not kick CPU 0 with timer interrupts on each sampling period (every 2nd/3rd sleep). MFC after: 1 month
This commit is contained in:
parent
a8b70cf260
commit
7bbac6419d
@ -363,8 +363,14 @@ taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
|
|||||||
if (sbt > 0) {
|
if (sbt > 0) {
|
||||||
if (queue->tq_spin)
|
if (queue->tq_spin)
|
||||||
flags |= C_DIRECT_EXEC;
|
flags |= C_DIRECT_EXEC;
|
||||||
callout_reset_sbt(&timeout_task->c, sbt, pr,
|
if (queue->tq_spin && queue->tq_tcount == 1 &&
|
||||||
taskqueue_timeout_func, timeout_task, flags);
|
queue->tq_threads[0] == curthread) {
|
||||||
|
callout_reset_sbt_curcpu(&timeout_task->c, sbt, pr,
|
||||||
|
taskqueue_timeout_func, timeout_task, flags);
|
||||||
|
} else {
|
||||||
|
callout_reset_sbt(&timeout_task->c, sbt, pr,
|
||||||
|
taskqueue_timeout_func, timeout_task, flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
TQ_UNLOCK(queue);
|
TQ_UNLOCK(queue);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user