Merge branch 'freebsd/current/main' into hardened/current/master

This commit is contained in:
HardenedBSD Sync Services 2024-01-24 12:01:36 -07:00
commit fb27a902e4
No known key found for this signature in database
20 changed files with 289 additions and 461 deletions

View File

@ -51,6 +51,9 @@
# xargs -n1 | sort | uniq -d;
# done
# 20240122: callout_async_drain() removed
OLD_FILES+=usr/share/man/man9/callout_async_drain.9.gz
# 20240114: Removal of geom_map(4)
OLD_FILES+=usr/share/man/man4/geom_map.4

View File

@ -215,6 +215,7 @@ MAN+= abort2.2 \
ioctl.2 \
issetugid.2 \
jail.2 \
kcmp.2 \
kenv.2 \
kill.2 \
kldfind.2 \

230
lib/libc/sys/kcmp.2 Normal file
View File

@ -0,0 +1,230 @@
.\" SPDX-License-Identifier: BSD-2-Clause
.\"
.\" Copyright (c) 2024 The FreeBSD Foundation
.\"
.\" This documentation was written by
.\" Mark Johnston <markj@FreeBSD.org> under sponsorship
.\" from the FreeBSD Foundation.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.Dd January 23, 2024
.Dt KCMP 2
.Os
.Sh NAME
.Nm kcmp
.Nd compare two kernel objects
.Sh LIBRARY
.Lb libc
.Sh SYNOPSIS
.In unistd.h
.Ft int
.Fn kcmp "pid_t pid1" "pid_t pid2" "int type" "uintptr_t idx1" "uintptr_t idx2"
.Sh DESCRIPTION
The
.Nm
system call allows the caller to determine whether the two processes with
PIDs
.Fa pid1
and
.Fa pid2
reference the same kernel object.
The
.Fa type
parameter specifies the type of object, and
.Fa idx1
and
.Fa idx2
are identifiers which refer to some object in the context of process
.Fa pid1
and
.Fa pid2 ,
respectively.
.Pp
The following values for
.Fa type
may be specified:
.Bl -tag -width KCMP_FILE
.It Dv KCMP_FILE
Compare two file descriptions referred to by file descriptors
.Fa idx1
and
.Fa idx2 .
They may be equivalent if, for example, one of the descriptors was
created by applying
.Xr dup 2
to the other descriptor.
.It Dv KCMP_FILEOBJ
Perform a
.Dq deep comparison
of the file descriptions referred to by file descriptors
.Fa idx1
and
.Fa idx2 .
This tests whether the underlying object referred to by the file descriptions
is the same.
For example, if the same filesystem path is opened twice, the kernel will create
two separate file descriptions to back the two file descriptors, but they will
refer to the same underlying object, a
.Xr vnode 9 .
When compared using the
.Dv KCMP_FILE
type, these descriptors will be different, but using the
.Dv KCMP_FILEOBJ
type, they will be equal (assuming that the path was not unlinked in between
the two opens).
.It Dv KCMP_FILES
Determine whether the two processes share the same file descriptor table.
This will be the case if one of the processes was created by
.Xr rfork 2
without specifying the
.Dv RFFDG
flag.
The
.Fa idx1
and
.Fa idx2
parameters are ignored.
.It Dv KCMP_SIGHAND
Determine whether the two processes share the same signal handler table.
This will be the case if one of the processes was created using the
.Dv RFSIGSHARE
flag to
.Xr rfork 2 .
The
.Fa idx1
and
.Fa idx2
parameters are ignored.
.It Dv KCMP_VM
Determine whether the two processes share a virtual memory address space.
This may be the case if one of the processes created the other using
.Xr vfork 2
or
.Xr rfork 2
with the
.Dv RFMEM
flag.
The
.Fa idx1
and
.Fa idx2
parameters are ignored.
.El
The caller of
.Nm
must have permission to debug both processes, otherwise the system call
will fail.
.Sh RETURN VALUES
If
.Fa idx1
and
.Fa idx2
refer to the same object,
.Nm
returns 0.
If the object referred to by
.Fa pid1
and
.Fa idx1
is less or greater than the object referred to by
.Fa pid2
and
.Fa idx2 ,
.Nm
returns the values 1 and 2, respectively.
The order is defined internally by the kernel and is stable until the system
reboots.
If the two objects cannot be compared for some reason,
.Nm
returns 3.
For example, if
.Fa type
is
.Dv KCMP_FILEOBJ
and
.Fa idx1
and
.Fa idx2
are different descriptor types, e.g., a socket and a file, then
.Nm
will return 3.
.Pp
If an error occurs, the value -1 is returned and the global variable
.Va errno
is set to indicate the error.
.Sh ERRORS
.Nm
may fail with the following errors:
.Bl -tag -width Er
.It Bq Er ENODEV
.Dv KCMP_FILEOBJ
was specified and
.Fa idx1
refers to a file descriptor which does not implement a comparison operator.
.It Bq Er EINVAL
The value of
.Fa type
is invalid.
.It Bq Er EBADF
One of the file descriptors referred to by
.Fa idx1
or
.Fa idx2
is not valid.
.It Bq Er ESRCH
One of the processes referred to by
.Fa pid1
or
.Fa pid2
does not exist or is not visible (e.g., due to jail restrictions).
.It Bq Er EPERM
The caller does not have permission to access one of the processes referred to
by
.Fa pid1
or
.Fa pid2 .
.El
.Sh SEE ALSO
.Xr dup 2 ,
.Xr fcntl 2 ,
.Xr fork 2 ,
.Xr rfork 2 ,
.Xr vfork 2
.Sh STANDARDS
The
.Nm
system call originated in Linux.
This implementation aims to be source-compatible with the Linux implementation.
.Fx
implements only a subset of the possible values for
.Fa type
supported in Linux.
More values may be added in the future.
The
.Dv KCMP_FILEOBJ
type is a FreeBSD extension.
.Sh HISTORY
The
.Nm
function was introduced in
.Fx 14.1 .

View File

@ -77,12 +77,12 @@ VOLUME_LABEL= ${VENDOR_NAME}_Install
NOPORTS= true
.endif
EXTRA_PACKAGES=
DISTRIBUTIONS= base.txz kernel.txz
.if !defined(NOPORTS)
EXTRA_PACKAGES+= ports.txz
DISTRIBUTIONS+= ports.txz
.endif
.if !defined(NOSRC)
EXTRA_PACKAGES+= src.txz
DISTRIBUTIONS+= src.txz
.endif
RELEASE_TARGETS= ftp
@ -299,7 +299,7 @@ mini-memstick: mini-memstick.img
mini-memstick.img: bootonly
cd bootonly && sh ${.CURDIR}/${TARGET}/make-memstick.sh .${NO_ROOT:D/METALOG} ../${.TARGET}
packagesystem: base.txz kernel.txz ${EXTRA_PACKAGES}
packagesystem: ${DISTRIBUTIONS}
sh ${.CURDIR}/scripts/make-manifest.sh *.txz > MANIFEST
touch ${.TARGET}

View File

@ -27,7 +27,7 @@
.\"
.\" $NetBSD: ccdconfig.8,v 1.4 1996/02/28 01:01:17 thorpej Exp $
.\"
.Dd March 17, 2022
.Dd January 24, 2024
.Dt CCDCONFIG 8
.Os
.Sh NAME
@ -122,13 +122,6 @@ argument is optional.
ccd0 16 none /dev/da2s1 /dev/da3s1
.Ed
.Pp
The component devices need to name partitions of type
.Li FS_BSDFFS
(or
.Dq 4.2BSD
as shown by
.Xr disklabel 8 ) .
.Pp
If you want to use the
.Tn Linux
.Xr md 4
@ -187,30 +180,6 @@ When you create a new ccd disk you generally want to
and
.Xr disklabel 8
it before doing anything else.
Once you create the initial label you can
edit it, adding additional partitions.
The label itself takes up the first
16 sectors of the ccd disk.
If all you are doing is creating file systems
with newfs, you do not have to worry about this as newfs will skip the
label area.
However, if you intend to
.Xr dd 1
to or from a ccd partition it is usually a good idea to construct the
partition such that it does not overlap the label area.
For example, if
you have A ccd disk with 10000 sectors you might create a 'd' partition
with offset 16 and size 9984.
.Bd -literal
# disklabel ccd0 > /tmp/disklabel.ccd0
# disklabel -R ccd0 /tmp/disklabel.ccd0
# disklabel -e ccd0
.Ed
.Pp
The disklabeling of a ccd disk is usually a one-time affair.
If you reboot the machine and reconfigure the ccd disk,
the disklabel you
had created before will still be there and not require reinitialization.
Beware that changing any ccd parameters: interleave, flags, or the
device list making up the ccd disk, will usually destroy any prior
data on that ccd disk.
@ -246,12 +215,3 @@ The
.Nm
utility first appeared in
.Nx 1.1 .
.Sh BUGS
The initial disklabel returned by
.Xr ccd 4
specifies only 3 partitions.
One needs to change the number of partitions to 8 using
.Dq Nm disklabel Fl e
to get the usual
.Bx
expectations.

View File

@ -810,7 +810,6 @@ MLINKS+=byteorder.9 be16dec.9 \
byteorder.9 le64enc.9 \
byteorder.9 le64toh.9
MLINKS+=callout.9 callout_active.9 \
callout.9 callout_async_drain.9 \
callout.9 callout_deactivate.9 \
callout.9 callout_drain.9 \
callout.9 callout_init.9 \

View File

@ -27,13 +27,12 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd September 1, 2021
.Dd January 22, 2024
.Dt CALLOUT 9
.Os
.Sh NAME
.Nm callout_active ,
.Nm callout_deactivate ,
.Nm callout_async_drain ,
.Nm callout_drain ,
.Nm callout_init ,
.Nm callout_init_mtx ,
@ -66,8 +65,6 @@ typedef void callout_func_t (void *);
.Ft void
.Fn callout_deactivate "struct callout *c"
.Ft int
.Fn callout_async_drain "struct callout *c" "callout_func_t *drain"
.Ft int
.Fn callout_drain "struct callout *c"
.Ft void
.Fn callout_init "struct callout *c" "int mpsafe"
@ -293,30 +290,6 @@ If the callout has an associated lock,
then that lock must be held when this function is called.
.Pp
The function
.Fn callout_async_drain
is identical to
.Fn callout_stop
with one difference.
When
.Fn callout_async_drain
returns zero it will arrange for the function
.Fa drain
to be called using the same argument given to the
.Fn callout_reset
function.
.Fn callout_async_drain
If the callout has an associated lock,
then that lock must be held when this function is called.
Note that when stopping multiple callouts that use the same lock it is possible
to get multiple return's of zero and multiple calls to the
.Fa drain
function, depending upon which CPU's the callouts are running.
The
.Fa drain
function itself is called from the context of the completing callout
i.e. softclock or hardclock, just like a callout itself.
.Pp
The function
.Fn callout_drain
is identical to
.Fn callout_stop

View File

@ -142,7 +142,6 @@ static u_int __read_mostly callwheelmask;
*/
struct cc_exec {
struct callout *cc_curr;
callout_func_t *cc_drain;
void *cc_last_func;
void *cc_last_arg;
#ifdef SMP
@ -180,7 +179,6 @@ struct callout_cpu {
#define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr
#define cc_exec_last_func(cc, dir) cc->cc_exec_entity[dir].cc_last_func
#define cc_exec_last_arg(cc, dir) cc->cc_exec_entity[dir].cc_last_arg
#define cc_exec_drain(cc, dir) cc->cc_exec_entity[dir].cc_drain
#define cc_exec_next(cc) cc->cc_next
#define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel
#define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting
@ -631,7 +629,7 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
int direct)
{
struct rm_priotracker tracker;
callout_func_t *c_func, *drain;
callout_func_t *c_func;
void *c_arg;
struct lock_class *class;
struct lock_object *c_lock;
@ -673,7 +671,6 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
cc_exec_last_func(cc, direct) = c_func;
cc_exec_last_arg(cc, direct) = c_arg;
cc_exec_cancel(cc, direct) = false;
cc_exec_drain(cc, direct) = NULL;
CC_UNLOCK(cc);
if (c_lock != NULL) {
class->lc_lock(c_lock, lock_status);
@ -739,13 +736,6 @@ softclock_call_cc(struct callout *c, struct callout_cpu *cc,
CC_LOCK(cc);
KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr"));
cc_exec_curr(cc, direct) = NULL;
if (cc_exec_drain(cc, direct)) {
drain = cc_exec_drain(cc, direct);
cc_exec_drain(cc, direct) = NULL;
CC_UNLOCK(cc);
drain(c_arg);
CC_LOCK(cc);
}
if (cc_exec_waiting(cc, direct)) {
/*
* There is someone waiting for the
@ -969,7 +959,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
*/
if (c->c_lock != NULL && !cc_exec_cancel(cc, direct))
cancelled = cc_exec_cancel(cc, direct) = true;
if (cc_exec_waiting(cc, direct) || cc_exec_drain(cc, direct)) {
if (cc_exec_waiting(cc, direct)) {
/*
* Someone has called callout_drain to kill this
* callout. Don't reschedule.
@ -1080,7 +1070,7 @@ callout_schedule(struct callout *c, int to_ticks)
}
int
_callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
_callout_stop_safe(struct callout *c, int flags)
{
struct callout_cpu *cc, *old_cc;
struct lock_class *class;
@ -1091,9 +1081,6 @@ _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, c->c_lock,
"calling %s", __func__);
KASSERT((flags & CS_DRAIN) == 0 || drain == NULL,
("Cannot set drain callback and CS_DRAIN flag at the same time"));
/*
* Some old subsystems don't hold Giant while running a callout_stop(),
* so just discard this check for the moment.
@ -1228,8 +1215,7 @@ _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
goto again;
}
c->c_flags &= ~CALLOUT_ACTIVE;
} else if (use_lock &&
!cc_exec_cancel(cc, direct) && (drain == NULL)) {
} else if (use_lock && !cc_exec_cancel(cc, direct)) {
/*
* The current callout is waiting for its
@ -1237,8 +1223,7 @@ _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
* and return. After our caller drops the
* lock, the callout will be skipped in
* softclock(). This *only* works with a
* callout_stop() *not* callout_drain() or
* callout_async_drain().
* callout_stop() *not* with callout_drain().
*/
cc_exec_cancel(cc, direct) = true;
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
@ -1284,23 +1269,11 @@ _callout_stop_safe(struct callout *c, int flags, callout_func_t *drain)
#endif
CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
c, c->c_func, c->c_arg);
if (drain) {
KASSERT(cc_exec_drain(cc, direct) == NULL,
("callout drain function already set to %p",
cc_exec_drain(cc, direct)));
cc_exec_drain(cc, direct) = drain;
}
CC_UNLOCK(cc);
return (0);
} else {
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
if (drain) {
KASSERT(cc_exec_drain(cc, direct) == NULL,
("callout drain function already set to %p",
cc_exec_drain(cc, direct)));
cc_exec_drain(cc, direct) = drain;
}
}
KASSERT(!sq_locked, ("sleepqueue chain still locked"));
cancelled = 0;

View File

@ -1,7 +1,6 @@
SUBDIR+= framework
SUBDIR+= .WAIT
SUBDIR+= callout_test
SUBDIR_PARALLEL=

View File

@ -1,14 +0,0 @@
#
#
.PATH: ${SRCTOP}/sys/tests/callout_test
KMOD= callout_test
SRCS= callout_test.c
#
# Enable full debugging
#
#CFLAGS += -g
.include <bsd.kmod.mk>

View File

@ -1534,7 +1534,7 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
struct mbuf *mfree;
struct tcpopt to;
int tfo_syn;
u_int maxseg;
u_int maxseg = 0;
thflags = tcp_get_flags(th);
tp->sackhint.last_sack_ack = 0;
@ -2601,7 +2601,8 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
if (V_tcp_do_prr &&
IN_FASTRECOVERY(tp->t_flags) &&
(tp->t_flags & TF_SACK_PERMIT)) {
tcp_do_prr_ack(tp, th, &to, sack_changed);
tcp_do_prr_ack(tp, th, &to,
sack_changed, &maxseg);
} else if (tcp_is_sack_recovery(tp, &to) &&
IN_FASTRECOVERY(tp->t_flags)) {
int awnd;
@ -2806,19 +2807,24 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
*/
if (IN_FASTRECOVERY(tp->t_flags)) {
if (SEQ_LT(th->th_ack, tp->snd_recover)) {
if (tp->t_flags & TF_SACK_PERMIT)
if (tp->t_flags & TF_SACK_PERMIT) {
if (V_tcp_do_prr && to.to_flags & TOF_SACK) {
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
tcp_do_prr_ack(tp, th, &to, sack_changed);
tcp_do_prr_ack(tp, th, &to,
sack_changed, &maxseg);
tp->t_flags |= TF_ACKNOW;
(void) tcp_output(tp);
} else
tcp_sack_partialack(tp, th);
else
} else {
tcp_sack_partialack(tp, th,
&maxseg);
}
} else {
tcp_newreno_partial_ack(tp, th);
} else
}
} else {
cc_post_recovery(tp, th);
}
} else if (IN_CONGRECOVERY(tp->t_flags)) {
if (SEQ_LT(th->th_ack, tp->snd_recover)) {
if (V_tcp_do_prr) {
@ -2828,11 +2834,13 @@ tcp_do_segment(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
* During ECN cwnd reduction
* always use PRR-SSRB
*/
tcp_do_prr_ack(tp, th, &to, SACK_CHANGE);
tcp_do_prr_ack(tp, th, &to, SACK_CHANGE,
&maxseg);
(void) tcp_output(tp);
}
} else
} else {
cc_post_recovery(tp, th);
}
}
/*
* If we reach this point, ACK is not a duplicate,
@ -3951,13 +3959,18 @@ tcp_mssopt(struct in_conninfo *inc)
}
void
tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to, sackstatus_t sack_changed)
tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to,
sackstatus_t sack_changed, u_int *maxsegp)
{
int snd_cnt = 0, limit = 0, del_data = 0, pipe = 0;
int maxseg = tcp_maxseg(tp);
u_int maxseg;
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (*maxsegp == 0) {
*maxsegp = tcp_maxseg(tp);
}
maxseg = *maxsegp;
/*
* Compute the amount of data that this ACK is indicating
* (del_data) and an estimate of how many bytes are in the

View File

@ -936,13 +936,18 @@ tcp_resend_sackholes(struct tcpcb *tp)
* the midst of sack recovery.
*/
void
tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th, u_int *maxsegp)
{
struct sackhole *temp;
int num_segs = 1;
u_int maxseg = tcp_maxseg(tp);
u_int maxseg;
INP_WLOCK_ASSERT(tptoinpcb(tp));
if (*maxsegp == 0) {
*maxsegp = tcp_maxseg(tp);
}
maxseg = *maxsegp;
tcp_timer_activate(tp, TT_REXMT, 0);
tp->t_rtttime = 0;
/* Send one or 2 segments based on how much new data was acked. */

View File

@ -1481,14 +1481,16 @@ sackstatus_t
tcp_sack_doack(struct tcpcb *, struct tcpopt *, tcp_seq);
int tcp_dsack_block_exists(struct tcpcb *);
void tcp_update_dsack_list(struct tcpcb *, tcp_seq, tcp_seq);
void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart, tcp_seq rcv_lastend);
void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart,
tcp_seq rcv_lastend);
void tcp_clean_dsack_blocks(struct tcpcb *tp);
void tcp_clean_sackreport(struct tcpcb *tp);
void tcp_sack_adjust(struct tcpcb *tp);
struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt);
void tcp_do_prr_ack(struct tcpcb *, struct tcphdr *, struct tcpopt *, sackstatus_t);
void tcp_do_prr_ack(struct tcpcb *, struct tcphdr *, struct tcpopt *,
sackstatus_t, u_int *);
void tcp_lost_retransmission(struct tcpcb *, struct tcphdr *);
void tcp_sack_partialack(struct tcpcb *, struct tcphdr *);
void tcp_sack_partialack(struct tcpcb *, struct tcphdr *, u_int *);
void tcp_resend_sackholes(struct tcpcb *tp);
void tcp_free_sackholes(struct tcpcb *tp);
void tcp_sack_lost_retransmission(struct tcpcb *, struct tcphdr *);

View File

@ -8515,7 +8515,7 @@ pf_test6(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb
* confused and fail to send the icmp6 packet too big error. Just send
* it here, before we do any NAT.
*/
if (dir == PF_OUT && IN6_LINKMTU(ifp) < pf_max_frag_size(m)) {
if (dir == PF_OUT && pflags & PFIL_FWD && IN6_LINKMTU(ifp) < pf_max_frag_size(m)) {
PF_RULES_RUNLOCK();
*m0 = NULL;
icmp6_error(m, ICMP6_PACKET_TOO_BIG, 0, IN6_LINKMTU(ifp));

View File

@ -81,7 +81,7 @@
*/
#define callout_active(c) ((c)->c_flags & CALLOUT_ACTIVE)
#define callout_deactivate(c) ((c)->c_flags &= ~CALLOUT_ACTIVE)
#define callout_drain(c) _callout_stop_safe(c, CS_DRAIN, NULL)
#define callout_drain(c) _callout_stop_safe(c, CS_DRAIN)
void callout_init(struct callout *, int);
void _callout_init_lock(struct callout *, struct lock_object *, int);
#define callout_init_mtx(c, mtx, flags) \
@ -119,11 +119,9 @@ int callout_schedule(struct callout *, int);
int callout_schedule_on(struct callout *, int, int);
#define callout_schedule_curcpu(c, on_tick) \
callout_schedule_on((c), (on_tick), PCPU_GET(cpuid))
#define callout_stop(c) _callout_stop_safe(c, 0, NULL)
int _callout_stop_safe(struct callout *, int, void (*)(void *));
#define callout_stop(c) _callout_stop_safe(c, 0)
int _callout_stop_safe(struct callout *, int);
void callout_process(sbintime_t now);
#define callout_async_drain(c, d) \
_callout_stop_safe(c, 0, d)
void callout_when(sbintime_t sbt, sbintime_t precision, int flags,
sbintime_t *sbt_res, sbintime_t *prec_res);
#endif

View File

@ -1,32 +0,0 @@
#ifndef __callout_test_h__
#define __callout_test_h__
/*-
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
struct callout_test {
int number_of_callouts;
int test_number;
};
#endif

View File

@ -1,282 +0,0 @@
/*-
* Copyright (c) 2015 Netflix, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cpuctl.h>
#include <sys/fcntl.h>
#include <sys/ioccom.h>
#include <sys/kernel.h>
#include <sys/libkern.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/pmckern.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/uio.h>
#include <tests/kern_testfrwk.h>
#include <tests/callout_test.h>
#include <machine/cpu.h>
MALLOC_DEFINE(M_CALLTMP, "Temp callout Memory", "CalloutTest");
struct callout_run {
struct mtx lock;
struct callout *co_array;
int co_test;
int co_number_callouts;
int co_return_npa;
int co_completed;
int callout_waiting;
int drain_calls;
int cnt_zero;
int cnt_one;
int index;
};
static struct callout_run *comaster[MAXCPU];
uint64_t callout_total = 0;
static void execute_the_co_test(struct callout_run *rn);
static void
co_saydone(void *arg)
{
struct callout_run *rn;
rn = (struct callout_run *)arg;
printf("The callout test is now complete for thread %d\n",
rn->index);
printf("number_callouts:%d\n",
rn->co_number_callouts);
printf("Callouts that bailed (Not PENDING or ACTIVE cleared):%d\n",
rn->co_return_npa);
printf("Callouts that completed:%d\n", rn->co_completed);
printf("Drain calls:%d\n", rn->drain_calls);
printf("Zero returns:%d non-zero:%d\n",
rn->cnt_zero,
rn->cnt_one);
}
static void
drainit(void *arg)
{
struct callout_run *rn;
rn = (struct callout_run *)arg;
mtx_lock(&rn->lock);
rn->drain_calls++;
mtx_unlock(&rn->lock);
}
static void
test_callout(void *arg)
{
struct callout_run *rn;
int cpu;
critical_enter();
cpu = curcpu;
critical_exit();
rn = (struct callout_run *)arg;
atomic_add_int(&rn->callout_waiting, 1);
mtx_lock(&rn->lock);
if (callout_pending(&rn->co_array[cpu]) ||
!callout_active(&rn->co_array[cpu])) {
rn->co_return_npa++;
atomic_subtract_int(&rn->callout_waiting, 1);
mtx_unlock(&rn->lock);
return;
}
callout_deactivate(&rn->co_array[cpu]);
rn->co_completed++;
mtx_unlock(&rn->lock);
atomic_subtract_int(&rn->callout_waiting, 1);
}
void
execute_the_co_test(struct callout_run *rn)
{
int i, ret, cpu;
uint32_t tk_s, tk_e, tk_d;
mtx_lock(&rn->lock);
rn->callout_waiting = 0;
for (i = 0; i < rn->co_number_callouts; i++) {
if (rn->co_test == 1) {
/* start all on spread out cpu's */
cpu = i % mp_ncpus;
callout_reset_sbt_on(&rn->co_array[i], 3, 0, test_callout, rn,
cpu, 0);
} else {
/* Start all on the same CPU */
callout_reset_sbt_on(&rn->co_array[i], 3, 0, test_callout, rn,
rn->index, 0);
}
}
tk_s = ticks;
while (rn->callout_waiting != rn->co_number_callouts) {
cpu_spinwait();
tk_e = ticks;
tk_d = tk_e - tk_s;
if (tk_d > 100) {
break;
}
}
/* OK everyone is waiting and we have the lock */
for (i = 0; i < rn->co_number_callouts; i++) {
ret = callout_async_drain(&rn->co_array[i], drainit);
if (ret) {
rn->cnt_one++;
} else {
rn->cnt_zero++;
}
}
rn->callout_waiting -= rn->cnt_one;
mtx_unlock(&rn->lock);
/* Now wait until all are done */
tk_s = ticks;
while (rn->callout_waiting > 0) {
cpu_spinwait();
tk_e = ticks;
tk_d = tk_e - tk_s;
if (tk_d > 100) {
break;
}
}
co_saydone((void *)rn);
}
static void
run_callout_test(struct kern_test *test)
{
struct callout_test *u;
size_t sz;
int i;
struct callout_run *rn;
int index = test->tot_threads_running;
u = (struct callout_test *)test->test_options;
if (comaster[index] == NULL) {
rn = comaster[index] = malloc(sizeof(struct callout_run), M_CALLTMP, M_WAITOK);
memset(comaster[index], 0, sizeof(struct callout_run));
mtx_init(&rn->lock, "callouttest", NULL, MTX_DUPOK);
rn->index = index;
} else {
rn = comaster[index];
rn->co_number_callouts = rn->co_return_npa = 0;
rn->co_completed = rn->callout_waiting = 0;
rn->drain_calls = rn->cnt_zero = rn->cnt_one = 0;
if (rn->co_array) {
free(rn->co_array, M_CALLTMP);
rn->co_array = NULL;
}
}
rn->co_number_callouts = u->number_of_callouts;
rn->co_test = u->test_number;
sz = sizeof(struct callout) * rn->co_number_callouts;
rn->co_array = malloc(sz, M_CALLTMP, M_WAITOK);
for (i = 0; i < rn->co_number_callouts; i++) {
callout_init(&rn->co_array[i], CALLOUT_MPSAFE);
}
execute_the_co_test(rn);
}
int callout_test_is_loaded = 0;
static void
cocleanup(void)
{
int i;
for (i = 0; i < MAXCPU; i++) {
if (comaster[i]) {
if (comaster[i]->co_array) {
free(comaster[i]->co_array, M_CALLTMP);
comaster[i]->co_array = NULL;
}
free(comaster[i], M_CALLTMP);
comaster[i] = NULL;
}
}
}
static int
callout_test_modevent(module_t mod, int type, void *data)
{
int err = 0;
switch (type) {
case MOD_LOAD:
err = kern_testframework_register("callout_test",
run_callout_test);
if (err) {
printf("Can't load callout_test err:%d returned\n",
err);
} else {
memset(comaster, 0, sizeof(comaster));
callout_test_is_loaded = 1;
}
break;
case MOD_QUIESCE:
err = kern_testframework_deregister("callout_test");
if (err == 0) {
callout_test_is_loaded = 0;
cocleanup();
}
break;
case MOD_UNLOAD:
if (callout_test_is_loaded) {
err = kern_testframework_deregister("callout_test");
if (err == 0) {
cocleanup();
callout_test_is_loaded = 0;
}
}
break;
default:
return (EOPNOTSUPP);
}
return (err);
}
static moduledata_t callout_test_mod = {
.name = "callout_test",
.evhand = callout_test_modevent,
.priv = 0
};
MODULE_DEPEND(callout_test, kern_testframework, 1, 1, 1);
DECLARE_MODULE(callout_test, callout_test_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);

View File

@ -12,7 +12,7 @@ ln sh -
ln sh -sh
# /sbin
progs disklabel fdisk init mount newfs reboot umount
progs disklabel init mount newfs reboot umount
ln reboot halt
ln reboot fastboot
ln reboot fasthalt

View File

@ -21,7 +21,7 @@ ln sh -sh # init invokes the shell this way
# /sbin stuff
progs chown clri disklabel dump dmesg fdisk fsck ifconfig init
progs chown clri disklabel dump dmesg fsck ifconfig init
progs mknod mount newfs ping reboot restore swapon umount
ln dump rdump
ln restore rrestore

View File

@ -22,7 +22,7 @@ ln sh -sh
srcdirs /usr/src/sbin
progs bim clri disklabel dmesg dump dumpfs fdisk fsck halt
progs bim clri disklabel dmesg dump dumpfs fsck halt
progs ifconfig init mknod modload modunload mount mount_isofs
progs mount_lofs mount_msdosfs mountd
progs newfs nfsd nfsiod ping quotacheck reboot restore route routed savecore