diff --git a/lib/libradius/radius.h b/lib/libradius/radius.h index 92c67bb0f..ac19d2b78 100644 --- a/lib/libradius/radius.h +++ b/lib/libradius/radius.h @@ -1,4 +1,4 @@ -/* $OpenBSD: radius.h,v 1.3 2024/01/19 06:32:28 yasuoka Exp $ */ +/* $OpenBSD: radius.h,v 1.4 2024/02/25 06:22:45 yasuoka Exp $ */ /*- * Copyright (c) 2009 Internet Initiative Japan Inc. @@ -173,7 +173,7 @@ #define RADIUS_SERVICE_TYPE_ADMINISTRATIVE 6 #define RADIUS_SERVICE_TYPE_NAS_PROMPT 7 #define RADIUS_SERVICE_TYPE_AUTHENTICAT_ONLY 8 -#define RADIUS_SERVICE_TYPE_CB_NAS_PROMPTi 9 +#define RADIUS_SERVICE_TYPE_CB_NAS_PROMPT 9 #define RADIUS_SERVICE_TYPE_CALL_CHECK 10 #define RADIUS_SERVICE_TYPE_CB_ADMINISTRATIVE 11 diff --git a/sbin/unwind/resolver.c b/sbin/unwind/resolver.c index 1596b3994..e9d877b01 100644 --- a/sbin/unwind/resolver.c +++ b/sbin/unwind/resolver.c @@ -1,4 +1,4 @@ -/* $OpenBSD: resolver.c,v 1.163 2023/12/14 09:59:27 claudio Exp $ */ +/* $OpenBSD: resolver.c,v 1.164 2024/02/25 10:13:09 florian Exp $ */ /* @@ -236,6 +236,20 @@ static const char * const forward_transparent_zones[] = { /* RFC1918 */ "10.in-addr.arpa. transparent", "16.172.in-addr.arpa. transparent", + "17.172.in-addr.arpa. transparent", + "18.172.in-addr.arpa. transparent", + "19.172.in-addr.arpa. transparent", + "20.172.in-addr.arpa. transparent", + "21.172.in-addr.arpa. transparent", + "22.172.in-addr.arpa. transparent", + "23.172.in-addr.arpa. transparent", + "24.172.in-addr.arpa. transparent", + "25.172.in-addr.arpa. transparent", + "26.172.in-addr.arpa. transparent", + "27.172.in-addr.arpa. transparent", + "28.172.in-addr.arpa. transparent", + "29.172.in-addr.arpa. transparent", + "30.172.in-addr.arpa. transparent", "31.172.in-addr.arpa. transparent", "168.192.in-addr.arpa. transparent", diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h index dd0537cb1..3eea95d56 100644 --- a/sys/arch/amd64/include/cpu.h +++ b/sys/arch/amd64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.162 2024/02/12 01:18:17 guenther Exp $ */ +/* $OpenBSD: cpu.h,v 1.163 2024/02/25 19:15:50 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */ /*- @@ -228,7 +228,7 @@ struct cpu_info { paddr_t ci_vmcs_pa; struct rwlock ci_vmcs_lock; - struct clockintr_queue ci_queue; + struct clockqueue ci_queue; }; #define CPUF_BSP 0x0001 /* CPU is the original BSP */ diff --git a/sys/arch/arm/include/cpu.h b/sys/arch/arm/include/cpu.h index 19c8c2749..eedd7ad25 100644 --- a/sys/arch/arm/include/cpu.h +++ b/sys/arch/arm/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.65 2024/01/24 19:23:39 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.66 2024/02/25 19:15:50 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.34 2003/06/23 11:01:08 martin Exp $ */ /* @@ -200,7 +200,7 @@ struct cpu_info { struct gmonparam *ci_gmon; struct clockintr ci_gmonclock; #endif - struct clockintr_queue ci_queue; + struct clockqueue ci_queue; char ci_panicbuf[512]; }; diff --git a/sys/arch/arm64/include/cpu.h b/sys/arch/arm64/include/cpu.h index 937f5fa0e..d0521a33a 100644 --- a/sys/arch/arm64/include/cpu.h +++ b/sys/arch/arm64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.42 2024/01/24 19:23:39 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.43 2024/02/25 19:15:50 cheloha Exp $ */ /* * Copyright (c) 2016 Dale Rahn * @@ -176,7 +176,7 @@ struct cpu_info { struct gmonparam *ci_gmon; struct clockintr ci_gmonclock; #endif - struct clockintr_queue ci_queue; + struct clockqueue ci_queue; char ci_panicbuf[512]; }; diff --git a/sys/arch/i386/include/cpu.h b/sys/arch/i386/include/cpu.h index df193f817..64bfd3530 100644 --- a/sys/arch/i386/include/cpu.h +++ b/sys/arch/i386/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.184 2024/01/24 19:23:39 cheloha Exp $ */ +/* $OpenBSD: cpu.h,v 1.185 2024/02/25 19:15:50 cheloha Exp $ */ /* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */ /*- @@ -170,7 +170,7 @@ struct cpu_info { struct gmonparam *ci_gmon; struct clockintr ci_gmonclock; #endif - struct clockintr_queue ci_queue; + struct clockqueue ci_queue; char ci_panicbuf[512]; }; diff --git a/sys/kern/kern_clockintr.c b/sys/kern/kern_clockintr.c index e7639dc73..d28c5da47 100644 --- a/sys/kern/kern_clockintr.c +++ b/sys/kern/kern_clockintr.c @@ -1,8 +1,8 @@ -/* $OpenBSD: kern_clockintr.c,v 1.68 2024/02/24 01:43:32 cheloha Exp $ */ +/* $OpenBSD: kern_clockintr.c,v 1.70 2024/02/25 19:15:50 cheloha Exp $ */ /* * Copyright (c) 2003 Dale Rahn * Copyright (c) 2020 Mark Kettenis - * Copyright (c) 2020-2022 Scott Cheloha + * Copyright (c) 2020-2024 Scott Cheloha * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -34,12 +34,12 @@ void clockintr_cancel_locked(struct clockintr *); void clockintr_hardclock(struct clockrequest *, void *, void *); void clockintr_schedule_locked(struct clockintr *, uint64_t); -void clockqueue_intrclock_install(struct clockintr_queue *, +void clockqueue_intrclock_install(struct clockqueue *, const struct intrclock *); -void clockqueue_intrclock_reprogram(struct clockintr_queue *); -uint64_t clockqueue_next(const struct clockintr_queue *); -void clockqueue_pend_delete(struct clockintr_queue *, struct clockintr *); -void clockqueue_pend_insert(struct clockintr_queue *, struct clockintr *, +void clockqueue_intrclock_reprogram(struct clockqueue *); +uint64_t clockqueue_next(const struct clockqueue *); +void clockqueue_pend_delete(struct clockqueue *, struct clockintr *); +void clockqueue_pend_insert(struct clockqueue *, struct clockintr *, uint64_t); void intrclock_rearm(struct intrclock *, uint64_t); void intrclock_trigger(struct intrclock *); @@ -55,14 +55,14 @@ clockintr_cpu_init(const struct intrclock *ic) { uint64_t multiplier = 0; struct cpu_info *ci = curcpu(); - struct clockintr_queue *cq = &ci->ci_queue; + struct clockqueue *cq = &ci->ci_queue; struct schedstate_percpu *spc = &ci->ci_schedstate; int reset_cq_intrclock = 0; if (ic != NULL) clockqueue_intrclock_install(cq, ic); - /* TODO: Remove this from struct clockintr_queue. */ + /* TODO: Remove this from struct clockqueue. */ if (CPU_IS_PRIMARY(ci) && cq->cq_hardclock.cl_expiration == 0) { clockintr_bind(&cq->cq_hardclock, ci, clockintr_hardclock, NULL); @@ -141,7 +141,7 @@ clockintr_cpu_init(const struct intrclock *ic) void clockintr_trigger(void) { - struct clockintr_queue *cq = &curcpu()->ci_queue; + struct clockqueue *cq = &curcpu()->ci_queue; KASSERT(ISSET(cq->cq_flags, CQ_INIT)); @@ -158,7 +158,7 @@ clockintr_dispatch(void *frame) uint64_t lateness, run = 0, start; struct cpu_info *ci = curcpu(); struct clockintr *cl; - struct clockintr_queue *cq = &ci->ci_queue; + struct clockqueue *cq = &ci->ci_queue; struct clockrequest *request = &cq->cq_request; void *arg; void (*func)(struct clockrequest *, void *, void *); @@ -272,7 +272,7 @@ uint64_t clockintr_advance(struct clockintr *cl, uint64_t period) { uint64_t count, expiration; - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; mtx_enter(&cq->cq_mtx); expiration = cl->cl_expiration; @@ -286,7 +286,7 @@ clockintr_advance(struct clockintr *cl, uint64_t period) uint64_t clockrequest_advance(struct clockrequest *cr, uint64_t period) { - struct clockintr_queue *cq = cr->cr_queue; + struct clockqueue *cq = cr->cr_queue; KASSERT(cr == &cq->cq_request); @@ -299,7 +299,7 @@ clockrequest_advance_random(struct clockrequest *cr, uint64_t min, uint32_t mask) { uint64_t count = 0; - struct clockintr_queue *cq = cr->cr_queue; + struct clockqueue *cq = cr->cr_queue; uint32_t off; KASSERT(cr == &cq->cq_request); @@ -317,7 +317,7 @@ clockrequest_advance_random(struct clockrequest *cr, uint64_t min, void clockintr_cancel(struct clockintr *cl) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; mtx_enter(&cq->cq_mtx); clockintr_cancel_locked(cl); @@ -327,7 +327,7 @@ clockintr_cancel(struct clockintr *cl) void clockintr_cancel_locked(struct clockintr *cl) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; int was_next; MUTEX_ASSERT_LOCKED(&cq->cq_mtx); @@ -350,7 +350,7 @@ void clockintr_bind(struct clockintr *cl, struct cpu_info *ci, void (*func)(struct clockrequest *, void *, void *), void *arg) { - struct clockintr_queue *cq = &ci->ci_queue; + struct clockqueue *cq = &ci->ci_queue; splassert(IPL_NONE); KASSERT(cl->cl_queue == NULL); @@ -366,7 +366,7 @@ clockintr_bind(struct clockintr *cl, struct cpu_info *ci, void clockintr_unbind(struct clockintr *cl, uint32_t flags) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; KASSERT(!ISSET(flags, ~CL_FLAG_MASK)); @@ -390,7 +390,7 @@ clockintr_unbind(struct clockintr *cl, uint32_t flags) void clockintr_schedule(struct clockintr *cl, uint64_t expiration) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; mtx_enter(&cq->cq_mtx); clockintr_schedule_locked(cl, expiration); @@ -400,7 +400,7 @@ clockintr_schedule(struct clockintr *cl, uint64_t expiration) void clockintr_schedule_locked(struct clockintr *cl, uint64_t expiration) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; MUTEX_ASSERT_LOCKED(&cq->cq_mtx); @@ -421,7 +421,7 @@ void clockintr_stagger(struct clockintr *cl, uint64_t period, uint32_t numer, uint32_t denom) { - struct clockintr_queue *cq = cl->cl_queue; + struct clockqueue *cq = cl->cl_queue; KASSERT(numer < denom); @@ -443,7 +443,7 @@ clockintr_hardclock(struct clockrequest *cr, void *frame, void *arg) } void -clockqueue_init(struct clockintr_queue *cq) +clockqueue_init(struct clockqueue *cq) { if (ISSET(cq->cq_flags, CQ_INIT)) return; @@ -457,7 +457,7 @@ clockqueue_init(struct clockintr_queue *cq) } void -clockqueue_intrclock_install(struct clockintr_queue *cq, +clockqueue_intrclock_install(struct clockqueue *cq, const struct intrclock *ic) { mtx_enter(&cq->cq_mtx); @@ -469,14 +469,14 @@ clockqueue_intrclock_install(struct clockintr_queue *cq, } uint64_t -clockqueue_next(const struct clockintr_queue *cq) +clockqueue_next(const struct clockqueue *cq) { MUTEX_ASSERT_LOCKED(&cq->cq_mtx); return TAILQ_FIRST(&cq->cq_pend)->cl_expiration; } void -clockqueue_pend_delete(struct clockintr_queue *cq, struct clockintr *cl) +clockqueue_pend_delete(struct clockqueue *cq, struct clockintr *cl) { MUTEX_ASSERT_LOCKED(&cq->cq_mtx); KASSERT(ISSET(cl->cl_flags, CLST_PENDING)); @@ -486,7 +486,7 @@ clockqueue_pend_delete(struct clockintr_queue *cq, struct clockintr *cl) } void -clockqueue_pend_insert(struct clockintr_queue *cq, struct clockintr *cl, +clockqueue_pend_insert(struct clockqueue *cq, struct clockintr *cl, uint64_t expiration) { struct clockintr *elm; @@ -507,7 +507,7 @@ clockqueue_pend_insert(struct clockintr_queue *cq, struct clockintr *cl, } void -clockqueue_intrclock_reprogram(struct clockintr_queue *cq) +clockqueue_intrclock_reprogram(struct clockqueue *cq) { uint64_t exp, now; @@ -564,7 +564,7 @@ sysctl_clockintr(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { struct clockintr_stat sum, tmp; - struct clockintr_queue *cq; + struct clockqueue *cq; struct cpu_info *ci; CPU_INFO_ITERATOR cii; uint32_t gen; @@ -636,7 +636,7 @@ void db_show_clockintr_cpu(struct cpu_info *ci) { struct clockintr *elm; - struct clockintr_queue *cq = &ci->ci_queue; + struct clockqueue *cq = &ci->ci_queue; u_int cpu = CPU_INFO_UNIT(ci); if (cq->cq_running != NULL) diff --git a/sys/sys/clockintr.h b/sys/sys/clockintr.h index 66b10420f..a1fd5dd1b 100644 --- a/sys/sys/clockintr.h +++ b/sys/sys/clockintr.h @@ -1,6 +1,6 @@ -/* $OpenBSD: clockintr.h,v 1.26 2024/02/09 16:52:58 cheloha Exp $ */ +/* $OpenBSD: clockintr.h,v 1.29 2024/02/25 19:15:50 cheloha Exp $ */ /* - * Copyright (c) 2020-2022 Scott Cheloha + * Copyright (c) 2020-2024 Scott Cheloha * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -35,6 +35,7 @@ struct clockintr_stat { #include #include +struct clockqueue; struct clockrequest; struct cpu_info; @@ -56,14 +57,13 @@ struct intrclock { * I Immutable after initialization. * m Parent queue mutex (cl_queue->cq_mtx). */ -struct clockintr_queue; struct clockintr { uint64_t cl_expiration; /* [m] dispatch time */ TAILQ_ENTRY(clockintr) cl_alink; /* [m] cq_all glue */ TAILQ_ENTRY(clockintr) cl_plink; /* [m] cq_pend glue */ void *cl_arg; /* [I] argument */ void (*cl_func)(struct clockrequest *, void*, void*); /* [I] callback */ - struct clockintr_queue *cl_queue; /* [I] parent queue */ + struct clockqueue *cl_queue; /* [I] parent queue */ uint32_t cl_flags; /* [m] CLST_* flags */ }; @@ -79,7 +79,7 @@ struct clockintr { */ struct clockrequest { uint64_t cr_expiration; /* [o] copy of dispatch time */ - struct clockintr_queue *cr_queue; /* [I] enclosing queue */ + struct clockqueue *cr_queue; /* [I] enclosing queue */ uint32_t cr_flags; /* [o] CR_* flags */ }; @@ -95,7 +95,7 @@ struct clockrequest { * m Per-queue mutex (cq_mtx). * o Owned by a single CPU. */ -struct clockintr_queue { +struct clockqueue { struct clockrequest cq_request; /* [o] callback request object */ struct mutex cq_mtx; /* [a] per-queue mutex */ uint64_t cq_uptime; /* [o] cached uptime */ @@ -136,7 +136,7 @@ void clockintr_stagger(struct clockintr *, uint64_t, uint32_t, uint32_t); void clockintr_unbind(struct clockintr *, uint32_t); uint64_t clockrequest_advance(struct clockrequest *, uint64_t); uint64_t clockrequest_advance_random(struct clockrequest *, uint64_t, uint32_t); -void clockqueue_init(struct clockintr_queue *); +void clockqueue_init(struct clockqueue *); int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t); #endif /* _KERNEL */