sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-15 02:35:30 +00:00
parent aec8820748
commit 66d94126c9
Signed by: purplerain
GPG Key ID: F42C07F07E2E35B7
18 changed files with 112 additions and 108 deletions

View File

@ -1,4 +1,4 @@
/* $OpenBSD: c_sh.c,v 1.64 2020/05/22 07:50:07 benno Exp $ */
/* $OpenBSD: c_sh.c,v 1.65 2023/09/14 18:32:03 cheloha Exp $ */
/*
* built-in Bourne commands
@ -680,14 +680,10 @@ static void
p_tv(struct shf *shf, int posix, struct timeval *tv, int width, char *prefix,
char *suffix)
{
if (posix)
shf_fprintf(shf, "%s%*lld.%02ld%s", prefix ? prefix : "",
width, (long long)tv->tv_sec, tv->tv_usec / 10000, suffix);
else
shf_fprintf(shf, "%s%*lldm%02lld.%02lds%s", prefix ? prefix : "",
width, (long long)tv->tv_sec / 60,
(long long)tv->tv_sec % 60,
tv->tv_usec / 10000, suffix);
struct timespec ts;
TIMEVAL_TO_TIMESPEC(tv, &ts);
p_ts(shf, posix, &ts, width, prefix, suffix);
}
static void

View File

@ -1,4 +1,4 @@
.\" $OpenBSD: msyscall.2,v 1.2 2019/11/27 20:53:05 schwarze Exp $
.\" $OpenBSD: msyscall.2,v 1.3 2023/09/14 19:59:12 jmc Exp $
.\"
.\" Copyright (c) 2019 Theo de Raadt <deraadt@openbsd.org>
.\"
@ -14,7 +14,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd $Mdocdate: November 27 2019 $
.Dd $Mdocdate: September 14 2023 $
.Dt MSYSCALL 2
.Os
.Sh NAME
@ -41,7 +41,7 @@ If
is 0, no action is taken on the page that contains
.Fa addr .
.Pp
.Nm
.Fn msyscall
is currently intended for use by
.Xr ld.so 1
only, and may be called only once to indicate the location of

View File

@ -809,6 +809,7 @@ addr_to_nat64(const struct sockaddr_storage* addr,
struct sockaddr_in *sin = (struct sockaddr_in *)addr;
struct sockaddr_in6 *sin6;
uint8_t *v4_byte;
int i;
/* This needs to be checked by the caller */
log_assert(addr->ss_family == AF_INET);
@ -826,7 +827,7 @@ addr_to_nat64(const struct sockaddr_storage* addr,
nat64_prefixnet = nat64_prefixnet / 8;
v4_byte = (uint8_t *)&sin->sin_addr.s_addr;
for(int i = 0; i < 4; i++) {
for(i = 0; i < 4; i++) {
if(nat64_prefixnet == 8) {
/* bits 64...71 are MBZ */
sin6->sin6_addr.s6_addr[nat64_prefixnet++] = 0;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: lapic.c,v 1.69 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: lapic.c,v 1.70 2023/09/14 19:39:47 cheloha Exp $ */
/* $NetBSD: lapic.c,v 1.2 2003/05/08 01:04:35 fvdl Exp $ */
/*-
@ -498,7 +498,8 @@ lapic_initclocks(void)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
}

View File

@ -1,4 +1,4 @@
/* $OpenBSD: agtimer.c,v 1.19 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: agtimer.c,v 1.20 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
@ -230,7 +230,8 @@ agtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != agtimer_frequency) {
agtimer_set_clockrate(agtimer_frequency);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: amptimer.c,v 1.18 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: amptimer.c,v 1.19 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
*
@ -287,7 +287,8 @@ amptimer_cpu_initclocks(void)
stathz = hz;
profhz = hz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != amptimer_frequency) {
amptimer_set_clockrate(amptimer_frequency);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: agtimer.c,v 1.26 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: agtimer.c,v 1.27 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2011 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Patrick Wildt <patrick@blueri.se>
@ -293,7 +293,8 @@ agtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
if (sc->sc_ticks_per_second != agtimer_frequency) {
agtimer_set_clockrate(agtimer_frequency);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: dmtimer.c,v 1.20 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: dmtimer.c,v 1.21 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
@ -232,7 +232,8 @@ dmtimer_cpu_initclocks(void)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
sc->sc_ticks_per_second = TIMER_FREQUENCY; /* 32768 */
sc->sc_nsec_cycle_ratio =

View File

@ -1,4 +1,4 @@
/* $OpenBSD: gptimer.c,v 1.21 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: gptimer.c,v 1.22 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -198,7 +198,8 @@ gptimer_cpu_initclocks(void)
{
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
gptimer_nsec_cycle_ratio = TIMER_FREQUENCY * (1ULL << 32) / 1000000000;
gptimer_nsec_max = UINT64_MAX / gptimer_nsec_cycle_ratio;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: sxitimer.c,v 1.22 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: sxitimer.c,v 1.23 2023/09/14 19:39:47 cheloha Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Raphael Graf <r@undefined.ch>
@ -180,7 +180,8 @@ sxitimer_attach(struct device *parent, struct device *self, void *aux)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
/* stop timer, and set clk src */
bus_space_write_4(sxitimer_iot, sxitimer_ioh,

View File

@ -1,4 +1,4 @@
/* $OpenBSD: lapic.c,v 1.56 2023/08/23 01:55:46 cheloha Exp $ */
/* $OpenBSD: lapic.c,v 1.57 2023/09/14 19:39:48 cheloha Exp $ */
/* $NetBSD: lapic.c,v 1.1.2.8 2000/02/23 06:10:50 sommerfeld Exp $ */
/*-
@ -326,7 +326,8 @@ lapic_initclocks(void)
stathz = hz;
profhz = stathz * 10;
clockintr_init(CL_RNDSTAT);
statclock_is_randomized = 1;
clockintr_init(0);
}
extern int gettick(void); /* XXX put in header file */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clock.c,v 1.116 2023/09/09 18:19:03 cheloha Exp $ */
/* $OpenBSD: kern_clock.c,v 1.119 2023/09/14 22:27:09 cheloha Exp $ */
/* $NetBSD: kern_clock.c,v 1.34 1996/06/09 04:51:03 briggs Exp $ */
/*-
@ -39,6 +39,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/clockintr.h>
#include <sys/timeout.h>
#include <sys/kernel.h>
#include <sys/limits.h>
@ -86,17 +87,47 @@ int ticks = INT_MAX - (15 * 60 * HZ);
/* Don't force early wrap around, triggers bug in inteldrm */
volatile unsigned long jiffies;
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
int statclock_is_randomized; /* [I] fixed or pseudorandom period? */
/*
* Initialize clock frequencies and start both clocks running.
*/
void
initclocks(void)
{
uint32_t half_avg, var;
/*
* Let the machine-specific code do its bit.
*/
cpu_initclocks();
KASSERT(hz > 0 && hz <= 1000000000);
hardclock_period = 1000000000 / hz;
roundrobin_period = hardclock_period * 10;
KASSERT(stathz >= 1 && stathz <= 1000000000);
/*
* Compute the average statclock() period. Then find var, the
* largest power of two such that var <= statclock_avg / 2.
*/
statclock_avg = 1000000000 / stathz;
half_avg = statclock_avg / 2;
for (var = 1U << 31; var > half_avg; var /= 2)
continue;
/*
* Set a lower bound for the range using statclock_avg and var.
* The mask for that range is just (var - 1).
*/
statclock_min = statclock_avg - (var / 2);
statclock_mask = var - 1;
KASSERT(profhz >= stathz && profhz <= 1000000000);
KASSERT(profhz % stathz == 0);
profclock_period = 1000000000 / profhz;
@ -245,24 +276,33 @@ stopprofclock(struct process *pr)
* do process and kernel statistics.
*/
void
statclock(struct clockframe *frame)
statclock(struct clockintr *cl, void *cf, void *arg)
{
uint64_t count, i;
struct clockframe *frame = cf;
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
struct proc *p = curproc;
struct process *pr;
if (statclock_is_randomized) {
count = clockintr_advance_random(cl, statclock_min,
statclock_mask);
} else {
count = clockintr_advance(cl, statclock_avg);
}
if (CLKF_USERMODE(frame)) {
pr = p->p_p;
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
*/
p->p_uticks++;
p->p_uticks += count;
if (pr->ps_nice > NZERO)
spc->spc_cp_time[CP_NICE]++;
spc->spc_cp_time[CP_NICE] += count;
else
spc->spc_cp_time[CP_USER]++;
spc->spc_cp_time[CP_USER] += count;
} else {
/*
* Came from kernel mode, so we were:
@ -279,25 +319,27 @@ statclock(struct clockframe *frame)
*/
if (CLKF_INTR(frame)) {
if (p != NULL)
p->p_iticks++;
p->p_iticks += count;
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_INTR]++;
CP_SPIN : CP_INTR] += count;
} else if (p != NULL && p != spc->spc_idleproc) {
p->p_sticks++;
p->p_sticks += count;
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_SYS]++;
CP_SPIN : CP_SYS] += count;
} else
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_IDLE]++;
CP_SPIN : CP_IDLE] += count;
}
if (p != NULL) {
p->p_cpticks++;
p->p_cpticks += count;
/*
* schedclock() runs every fourth statclock().
*/
if ((++spc->spc_schedticks & 3) == 0)
schedclock(p);
for (i = 0; i < count; i++) {
if ((++spc->spc_schedticks & 3) == 0)
schedclock(p);
}
}
}

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_clockintr.c,v 1.47 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: kern_clockintr.c,v 1.52 2023/09/14 22:27:09 cheloha Exp $ */
/*
* Copyright (c) 2003 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2020 Mark Kettenis <kettenis@openbsd.org>
@ -37,16 +37,10 @@
* I Immutable after initialization.
*/
uint32_t clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t statclock_avg; /* [I] average statclock period (ns) */
uint32_t statclock_min; /* [I] minimum statclock period (ns) */
uint32_t statclock_mask; /* [I] set of allowed offsets */
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_hardclock(struct clockintr *, void *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
void clockintr_statclock(struct clockintr *, void *, void *);
void clockqueue_intrclock_install(struct clockintr_queue *,
const struct intrclock *);
uint64_t clockqueue_next(const struct clockintr_queue *);
@ -62,34 +56,10 @@ uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
void
clockintr_init(uint32_t flags)
{
uint32_t half_avg, var;
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
KASSERT(hz > 0 && hz <= 1000000000);
hardclock_period = 1000000000 / hz;
roundrobin_period = hardclock_period * 10;
KASSERT(stathz >= 1 && stathz <= 1000000000);
/*
* Compute the average statclock() period. Then find var, the
* largest power of two such that var <= statclock_avg / 2.
*/
statclock_avg = 1000000000 / stathz;
half_avg = statclock_avg / 2;
for (var = 1U << 31; var > half_avg; var /= 2)
continue;
/*
* Set a lower bound for the range using statclock_avg and var.
* The mask for that range is just (var - 1).
*/
statclock_min = statclock_avg - (var / 2);
statclock_mask = var - 1;
SET(clockintr_flags, flags | CL_INIT);
}
@ -112,19 +82,13 @@ clockintr_cpu_init(const struct intrclock *ic)
if (ic != NULL)
clockqueue_intrclock_install(cq, ic);
/* TODO: Remove these from struct clockintr_queue. */
/* TODO: Remove this from struct clockintr_queue. */
if (cq->cq_hardclock == NULL) {
cq->cq_hardclock = clockintr_establish(ci, clockintr_hardclock,
NULL);
if (cq->cq_hardclock == NULL)
panic("%s: failed to establish hardclock", __func__);
}
if (cq->cq_statclock == NULL) {
cq->cq_statclock = clockintr_establish(ci, clockintr_statclock,
NULL);
if (cq->cq_statclock == NULL)
panic("%s: failed to establish statclock", __func__);
}
/*
* Mask CQ_INTRCLOCK while we're advancing the internal clock
@ -169,13 +133,13 @@ clockintr_cpu_init(const struct intrclock *ic)
* We can always advance the statclock. There is no reason to
* stagger a randomized statclock.
*/
if (!ISSET(clockintr_flags, CL_RNDSTAT)) {
if (cq->cq_statclock->cl_expiration == 0) {
clockintr_stagger(cq->cq_statclock, statclock_avg,
if (!statclock_is_randomized) {
if (spc->spc_statclock->cl_expiration == 0) {
clockintr_stagger(spc->spc_statclock, statclock_avg,
multiplier, MAXCPUS);
}
}
clockintr_advance(cq->cq_statclock, statclock_avg);
clockintr_advance(spc->spc_statclock, statclock_avg);
/*
* XXX Need to find a better place to do this. We can't do it in
@ -470,21 +434,6 @@ clockintr_hardclock(struct clockintr *cl, void *frame, void *arg)
hardclock(frame);
}
void
clockintr_statclock(struct clockintr *cl, void *frame, void *arg)
{
uint64_t count, i;
if (ISSET(clockintr_flags, CL_RNDSTAT)) {
count = clockintr_advance_random(cl, statclock_min,
statclock_mask);
} else {
count = clockintr_advance(cl, statclock_avg);
}
for (i = 0; i < count; i++)
statclock(frame);
}
void
clockqueue_init(struct clockintr_queue *cq)
{

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sched.c,v 1.90 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: kern_sched.c,v 1.91 2023/09/14 22:07:11 cheloha Exp $ */
/*
* Copyright (c) 2007, 2008 Artur Grabowski <art@openbsd.org>
*
@ -97,6 +97,9 @@ sched_init_cpu(struct cpu_info *ci)
spc->spc_roundrobin = clockintr_establish(ci, roundrobin, NULL);
if (spc->spc_roundrobin == NULL)
panic("%s: clockintr_establish roundrobin", __func__);
spc->spc_statclock = clockintr_establish(ci, statclock, NULL);
if (spc->spc_statclock == NULL)
panic("%s: clockintr_establish statclock", __func__);
kthread_create_deferred(sched_kthreads_create, ci);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: clockintr.h,v 1.13 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: clockintr.h,v 1.16 2023/09/14 22:07:11 cheloha Exp $ */
/*
* Copyright (c) 2020-2022 Scott Cheloha <cheloha@openbsd.org>
*
@ -98,7 +98,6 @@ struct clockintr_queue {
TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */
struct clockintr *cq_running; /* [m] running clockintr */
struct clockintr *cq_hardclock; /* [o] hardclock handle */
struct clockintr *cq_statclock; /* [o] statclock handle */
struct intrclock cq_intrclock; /* [I] local interrupt clock */
struct clockintr_stat cq_stat; /* [o] dispatch statistics */
volatile uint32_t cq_gen; /* [o] cq_stat update generation */
@ -115,8 +114,7 @@ struct clockintr_queue {
#define CL_STATE_MASK 0x00000001
/* Global behavior flags. */
#define CL_RNDSTAT 0x80000000 /* randomized statclock */
#define CL_FLAG_MASK 0x80000000
#define CL_FLAG_MASK 0x00000000
void clockintr_cpu_init(const struct intrclock *);
int clockintr_dispatch(void *);
@ -128,6 +126,7 @@ void clockintr_trigger(void);
*/
uint64_t clockintr_advance(struct clockintr *, uint64_t);
uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
void clockintr_cancel(struct clockintr *);
struct clockintr *clockintr_establish(struct cpu_info *,
void (*)(struct clockintr *, void *, void *), void *);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: sched.h,v 1.63 2023/09/10 03:08:05 cheloha Exp $ */
/* $OpenBSD: sched.h,v 1.64 2023/09/14 22:07:11 cheloha Exp $ */
/* $NetBSD: sched.h,v 1.2 1999/02/28 18:14:58 ross Exp $ */
/*-
@ -109,6 +109,7 @@ struct schedstate_percpu {
struct clockintr *spc_itimer; /* [o] itimer_update handle */
struct clockintr *spc_profclock; /* [o] profclock handle */
struct clockintr *spc_roundrobin; /* [o] roundrobin handle */
struct clockintr *spc_statclock; /* [o] statclock handle */
u_int spc_nrun; /* procs on the run queues */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: systm.h,v 1.165 2023/08/23 01:55:45 cheloha Exp $ */
/* $OpenBSD: systm.h,v 1.167 2023/09/14 20:58:51 cheloha Exp $ */
/* $NetBSD: systm.h,v 1.50 1996/06/09 04:55:09 briggs Exp $ */
/*-
@ -234,10 +234,14 @@ int tstohz(const struct timespec *);
void realitexpire(void *);
extern uint32_t hardclock_period;
extern uint32_t statclock_avg;
extern int statclock_is_randomized;
struct clockframe;
void hardclock(struct clockframe *);
void statclock(struct clockframe *);
struct clockintr;
void statclock(struct clockintr *, void *, void *);
void initclocks(void);
void inittodr(time_t);

View File

@ -809,6 +809,7 @@ addr_to_nat64(const struct sockaddr_storage* addr,
struct sockaddr_in *sin = (struct sockaddr_in *)addr;
struct sockaddr_in6 *sin6;
uint8_t *v4_byte;
int i;
/* This needs to be checked by the caller */
log_assert(addr->ss_family == AF_INET);
@ -826,7 +827,7 @@ addr_to_nat64(const struct sockaddr_storage* addr,
nat64_prefixnet = nat64_prefixnet / 8;
v4_byte = (uint8_t *)&sin->sin_addr.s_addr;
for(int i = 0; i < 4; i++) {
for(i = 0; i < 4; i++) {
if(nat64_prefixnet == 8) {
/* bits 64...71 are MBZ */
sin6->sin6_addr.s6_addr[nat64_prefixnet++] = 0;