mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-13 05:41:26 +01:00
Merge branch 'freebsd/current/main' into hardened/current/master
This commit is contained in:
commit
8b7a90c3d0
@ -2,13 +2,6 @@ PACKAGE=runtime
|
||||
PROG= ps
|
||||
SRCS= fmt.c keyword.c nlist.c print.c ps.c
|
||||
|
||||
#
|
||||
# To support "lazy" ps for non root/wheel users
|
||||
# add -DLAZY_PS to the cflags. This helps
|
||||
# keep ps from being an unnecessary load
|
||||
# on large systems.
|
||||
#
|
||||
CFLAGS+=-DLAZY_PS
|
||||
LIBADD= m kvm jail xo
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -253,8 +253,6 @@ state(KINFO *k, VARENT *ve __unused)
|
||||
*cp = '?';
|
||||
}
|
||||
cp++;
|
||||
if (!(flag & P_INMEM))
|
||||
*cp++ = 'W';
|
||||
if (k->ki_p->ki_nice < NZERO || k->ki_p->ki_pri.pri_class == PRI_REALTIME)
|
||||
*cp++ = '<';
|
||||
else if (k->ki_p->ki_nice > NZERO || k->ki_p->ki_pri.pri_class == PRI_IDLE)
|
||||
@ -633,7 +631,7 @@ getpcpu(const KINFO *k)
|
||||
#define fxtofl(fixpt) ((double)(fixpt) / fscale)
|
||||
|
||||
/* XXX - I don't like this */
|
||||
if (k->ki_p->ki_swtime == 0 || (k->ki_p->ki_flag & P_INMEM) == 0)
|
||||
if (k->ki_p->ki_swtime == 0)
|
||||
return (0.0);
|
||||
if (rawcpu)
|
||||
return (100.0 * fxtofl(k->ki_p->ki_pctcpu));
|
||||
@ -661,8 +659,6 @@ getpmem(KINFO *k)
|
||||
if (failure)
|
||||
return (0.0);
|
||||
|
||||
if ((k->ki_p->ki_flag & P_INMEM) == 0)
|
||||
return (0.0);
|
||||
/* XXX want pmap ptpages, segtab, etc. (per architecture) */
|
||||
/* XXX don't have info about shared */
|
||||
fracmem = ((double)k->ki_p->ki_rssize) / mempages;
|
||||
|
81
bin/ps/ps.1
81
bin/ps/ps.1
@ -159,9 +159,6 @@ does not imply
|
||||
but works well with it.
|
||||
.It Fl e
|
||||
Display the environment as well.
|
||||
.It Fl f
|
||||
Show command-line and environment information about swapped out processes.
|
||||
This option is honored only if the UID of the user is 0.
|
||||
.It Fl G
|
||||
Display information about processes which are running with the specified
|
||||
real group IDs.
|
||||
@ -331,36 +328,34 @@ The flags associated with the process as in
|
||||
the include file
|
||||
.In sys/proc.h :
|
||||
.Bl -column P_SINGLE_BOUNDARY 0x40000000
|
||||
.It Dv "P_ADVLOCK" Ta No "0x00001" Ta "Process may hold a POSIX advisory lock"
|
||||
.It Dv "P_CONTROLT" Ta No "0x00002" Ta "Has a controlling terminal"
|
||||
.It Dv "P_KPROC" Ta No "0x00004" Ta "Kernel process"
|
||||
.It Dv "P_PPWAIT" Ta No "0x00010" Ta "Parent is waiting for child to exec/exit"
|
||||
.It Dv "P_PROFIL" Ta No "0x00020" Ta "Has started profiling"
|
||||
.It Dv "P_STOPPROF" Ta No "0x00040" Ta "Has thread in requesting to stop prof"
|
||||
.It Dv "P_HADTHREADS" Ta No "0x00080" Ta "Has had threads (no cleanup shortcuts)"
|
||||
.It Dv "P_SUGID" Ta No "0x00100" Ta "Had set id privileges since last exec"
|
||||
.It Dv "P_SYSTEM" Ta No "0x00200" Ta "System proc: no sigs, stats or swapping"
|
||||
.It Dv "P_SINGLE_EXIT" Ta No "0x00400" Ta "Threads suspending should exit, not wait"
|
||||
.It Dv "P_TRACED" Ta No "0x00800" Ta "Debugged process being traced"
|
||||
.It Dv "P_WAITED" Ta No "0x01000" Ta "Someone is waiting for us"
|
||||
.It Dv "P_WEXIT" Ta No "0x02000" Ta "Working on exiting"
|
||||
.It Dv "P_EXEC" Ta No "0x04000" Ta "Process called exec"
|
||||
.It Dv "P_WKILLED" Ta No "0x08000" Ta "Killed, shall go to kernel/user boundary ASAP"
|
||||
.It Dv "P_CONTINUED" Ta No "0x10000" Ta "Proc has continued from a stopped state"
|
||||
.It Dv "P_STOPPED_SIG" Ta No "0x20000" Ta "Stopped due to SIGSTOP/SIGTSTP"
|
||||
.It Dv "P_STOPPED_TRACE" Ta No "0x40000" Ta "Stopped because of tracing"
|
||||
.It Dv "P_STOPPED_SINGLE" Ta No "0x80000" Ta "Only one thread can continue"
|
||||
.It Dv "P_PROTECTED" Ta No "0x100000" Ta "Do not kill on memory overcommit"
|
||||
.It Dv "P_SIGEVENT" Ta No "0x200000" Ta "Process pending signals changed"
|
||||
.It Dv "P_SINGLE_BOUNDARY" Ta No "0x400000" Ta "Threads should suspend at user boundary"
|
||||
.It Dv "P_HWPMC" Ta No "0x800000" Ta "Process is using HWPMCs"
|
||||
.It Dv "P_JAILED" Ta No "0x1000000" Ta "Process is in jail"
|
||||
.It Dv "P_TOTAL_STOP" Ta No "0x2000000" Ta "Stopped for system suspend"
|
||||
.It Dv "P_INEXEC" Ta No "0x4000000" Ta Process is in Xr execve 2
|
||||
.It Dv "P_STATCHILD" Ta No "0x8000000" Ta "Child process stopped or exited"
|
||||
.It Dv "P_INMEM" Ta No "0x10000000" Ta "Loaded into memory"
|
||||
.It Dv "P_SWAPPINGOUT" Ta No "0x20000000" Ta "Process is being swapped out"
|
||||
.It Dv "P_SWAPPINGIN" Ta No "0x40000000" Ta "Process is being swapped in"
|
||||
.It Dv "P_ADVLOCK" Ta No "0x00000001" Ta "Process may hold a POSIX advisory lock"
|
||||
.It Dv "P_CONTROLT" Ta No "0x00000002" Ta "Has a controlling terminal"
|
||||
.It Dv "P_KPROC" Ta No "0x00000004" Ta "Kernel process"
|
||||
.It Dv "P_PPWAIT" Ta No "0x00000010" Ta "Parent is waiting for child to exec/exit"
|
||||
.It Dv "P_PROFIL" Ta No "0x00000020" Ta "Has started profiling"
|
||||
.It Dv "P_STOPPROF" Ta No "0x00000040" Ta "Has thread in requesting to stop prof"
|
||||
.It Dv "P_HADTHREADS" Ta No "0x00000080" Ta "Has had threads (no cleanup shortcuts)"
|
||||
.It Dv "P_SUGID" Ta No "0x00000100" Ta "Had set id privileges since last exec"
|
||||
.It Dv "P_SYSTEM" Ta No "0x00000200" Ta "System proc: no sigs, stats or swapping"
|
||||
.It Dv "P_SINGLE_EXIT" Ta No "0x00000400" Ta "Threads suspending should exit, not wait"
|
||||
.It Dv "P_TRACED" Ta No "0x00000800" Ta "Debugged process being traced"
|
||||
.It Dv "P_WAITED" Ta No "0x00001000" Ta "Someone is waiting for us"
|
||||
.It Dv "P_WEXIT" Ta No "0x00002000" Ta "Working on exiting"
|
||||
.It Dv "P_EXEC" Ta No "0x00004000" Ta "Process called exec"
|
||||
.It Dv "P_WKILLED" Ta No "0x00008000" Ta "Killed, shall go to kernel/user boundary ASAP"
|
||||
.It Dv "P_CONTINUED" Ta No "0x00010000" Ta "Proc has continued from a stopped state"
|
||||
.It Dv "P_STOPPED_SIG" Ta No "0x00020000" Ta "Stopped due to SIGSTOP/SIGTSTP"
|
||||
.It Dv "P_STOPPED_TRACE" Ta No "0x00040000" Ta "Stopped because of tracing"
|
||||
.It Dv "P_STOPPED_SINGLE" Ta No "0x00080000" Ta "Only one thread can continue"
|
||||
.It Dv "P_PROTECTED" Ta No "0x00100000" Ta "Do not kill on memory overcommit"
|
||||
.It Dv "P_SIGEVENT" Ta No "0x00200000" Ta "Process pending signals changed"
|
||||
.It Dv "P_SINGLE_BOUNDARY" Ta No "0x00400000" Ta "Threads should suspend at user boundary"
|
||||
.It Dv "P_HWPMC" Ta No "0x00800000" Ta "Process is using HWPMCs"
|
||||
.It Dv "P_JAILED" Ta No "0x01000000" Ta "Process is in jail"
|
||||
.It Dv "P_TOTAL_STOP" Ta No "0x02000000" Ta "Stopped for system suspend"
|
||||
.It Dv "P_INEXEC" Ta No "0x04000000" Ta Process is in Xr execve 2
|
||||
.It Dv "P_STATCHILD" Ta No "0x08000000" Ta "Child process stopped or exited"
|
||||
.It Dv "P_INMEM" Ta No "0x10000000" Ta "Always set, unused"
|
||||
.It Dv "P_PPTRACE" Ta No "0x80000000" Ta "Vforked child issued ptrace(PT_TRACEME)"
|
||||
.El
|
||||
.It Cm flags2
|
||||
@ -375,6 +370,24 @@ the include file
|
||||
.It Dv "P2_NOTRACE_EXEC" Ta No "0x00000004" Ta Keep P2_NOPTRACE on Xr execve 2
|
||||
.It Dv "P2_AST_SU" Ta No "0x00000008" Ta "Handles SU ast for kthreads"
|
||||
.It Dv "P2_PTRACE_FSTP" Ta No "0x00000010" Ta "SIGSTOP from PT_ATTACH not yet handled"
|
||||
.It Dv "P2_TRAPCAP" Ta No "0x00000020" Ta "SIGTRAP on ENOTCAPABLE"
|
||||
.It Dv "P2_ASLR_ENABLE" Ta No "0x00000040" Ta "Force enable ASLR"
|
||||
.It Dv "P2_ASLR_DISABLE" Ta No "0x00000080" Ta "Force disable ASLR"
|
||||
.It Dv "P2_ASLR_IGNSTART" Ta No "0x00000100" Ta "Enable ASLR to consume sbrk area"
|
||||
.It Dv "P2_PROTMAX_ENABLE" Ta No "0x00000200" Ta "Force enable implied PROT_MAX"
|
||||
.It Dv "P2_PROTMAX_DISABLE" Ta No "0x00000400" Ta "Force disable implied PROT_MAX"
|
||||
.It Dv "P2_STKGAP_DISABLE" Ta No "0x00000800" Ta "Disable stack gap for MAP_STACK"
|
||||
.It Dv "P2_STKGAP_DISABLE_EXEC" Ta No " 0x00001000" Ta "Stack gap disabled after exec"
|
||||
.It Dv "P2_ITSTOPPED" Ta No "0x00002000" Ta "itimers stopped (as part of process stop)"
|
||||
.It Dv "P2_PTRACEREQ" Ta No "0x00004000" Ta "Active ptrace req"
|
||||
.It Dv "P2_NO_NEW_PRIVS" Ta No "0x00008000" Ta "Ignore setuid on exec"
|
||||
.It Dv "P2_WXORX_DISABLE" Ta No "0x00010000" Ta "WX mappings enabled"
|
||||
.It Dv "P2_WXORX_ENABLE_EXEC" Ta No "0x00020000" Ta "WxorX enabled after exec"
|
||||
.It Dv "P2_WEXIT" Ta No "0x00040000" Ta "Internal exit early state"
|
||||
.It Dv "P2_REAPKILLED" Ta No "0x00080000" Ta "REAP_KILL pass handled the process"
|
||||
.It Dv "P2_MEMBAR_PRIVE" Ta No "0x00100000" Ta "membarrier private expedited registered"
|
||||
.It Dv "P2_MEMBAR_PRIVE_SYNCORE" Ta No "0x00200000" Ta "membarrier private expedited sync core registered"
|
||||
.It Dv "P2_MEMBAR_GLOBE" Ta No "0x00400000" Ta "membar global expedited registered"
|
||||
.El
|
||||
.It Cm label
|
||||
The MAC label of the process.
|
||||
@ -473,8 +486,6 @@ The process is a session leader.
|
||||
The process' parent is suspended during a
|
||||
.Xr vfork 2 ,
|
||||
waiting for the process to exec or exit.
|
||||
.It Li W
|
||||
The process is swapped out.
|
||||
.It Li X
|
||||
The process is being traced or debugged.
|
||||
.El
|
||||
|
41
bin/ps/ps.c
41
bin/ps/ps.c
@ -68,14 +68,6 @@
|
||||
#define W_SEP " \t" /* "Whitespace" list separators */
|
||||
#define T_SEP "," /* "Terminate-element" list separators */
|
||||
|
||||
#ifdef LAZY_PS
|
||||
#define DEF_UREAD 0
|
||||
#define OPT_LAZY_f "f"
|
||||
#else
|
||||
#define DEF_UREAD 1 /* Always do the more-expensive read. */
|
||||
#define OPT_LAZY_f /* I.e., the `-f' option is not added. */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* isdigit takes an `int', but expects values in the range of unsigned char.
|
||||
* This wrapper ensures that values from a 'char' end up in the correct range.
|
||||
@ -92,7 +84,6 @@ int showthreads; /* will threads be shown? */
|
||||
|
||||
struct velisthead varlist = STAILQ_HEAD_INITIALIZER(varlist);
|
||||
|
||||
static int forceuread = DEF_UREAD; /* Do extra work to get u-area. */
|
||||
static kvm_t *kd;
|
||||
static int needcomm; /* -o "command" */
|
||||
static int needenv; /* -e */
|
||||
@ -154,7 +145,7 @@ static char vfmt[] = "pid,state,time,sl,re,pagein,vsz,rss,lim,tsiz,"
|
||||
"%cpu,%mem,command";
|
||||
static char Zfmt[] = "label";
|
||||
|
||||
#define PS_ARGS "AaCcD:de" OPT_LAZY_f "G:gHhjJ:LlM:mN:O:o:p:rSTt:U:uvwXxZ"
|
||||
#define PS_ARGS "AaCcD:defG:gHhjJ:LlM:mN:O:o:p:rSTt:U:uvwXxZ"
|
||||
|
||||
int
|
||||
main(int argc, char *argv[])
|
||||
@ -272,12 +263,9 @@ main(int argc, char *argv[])
|
||||
case 'e': /* XXX set ufmt */
|
||||
needenv = 1;
|
||||
break;
|
||||
#ifdef LAZY_PS
|
||||
case 'f':
|
||||
if (getuid() == 0 || getgid() == 0)
|
||||
forceuread = 1;
|
||||
/* compat */
|
||||
break;
|
||||
#endif
|
||||
case 'G':
|
||||
add_list(&gidlist, optarg);
|
||||
xkeep_implied = 1;
|
||||
@ -1276,31 +1264,21 @@ fmt(char **(*fn)(kvm_t *, const struct kinfo_proc *, int), KINFO *ki,
|
||||
return (s);
|
||||
}
|
||||
|
||||
#define UREADOK(ki) (forceuread || (ki->ki_p->ki_flag & P_INMEM))
|
||||
|
||||
static void
|
||||
saveuser(KINFO *ki)
|
||||
{
|
||||
char tdname[COMMLEN + 1];
|
||||
char *argsp;
|
||||
|
||||
if (ki->ki_p->ki_flag & P_INMEM) {
|
||||
/*
|
||||
* The u-area might be swapped out, and we can't get
|
||||
* at it because we have a crashdump and no swap.
|
||||
* If it's here fill in these fields, otherwise, just
|
||||
* leave them 0.
|
||||
*/
|
||||
ki->ki_valid = 1;
|
||||
} else
|
||||
ki->ki_valid = 0;
|
||||
ki->ki_valid = 1;
|
||||
|
||||
/*
|
||||
* save arguments if needed
|
||||
*/
|
||||
if (needcomm) {
|
||||
if (ki->ki_p->ki_stat == SZOMB) {
|
||||
ki->ki_args = strdup("<defunct>");
|
||||
} else if (UREADOK(ki) || (ki->ki_p->ki_args != NULL)) {
|
||||
} else if (ki->ki_p->ki_args != NULL) {
|
||||
(void)snprintf(tdname, sizeof(tdname), "%s%s",
|
||||
ki->ki_p->ki_tdname, ki->ki_p->ki_moretdname);
|
||||
ki->ki_args = fmt(kvm_getargv, ki,
|
||||
@ -1315,11 +1293,8 @@ saveuser(KINFO *ki)
|
||||
ki->ki_args = NULL;
|
||||
}
|
||||
if (needenv) {
|
||||
if (UREADOK(ki))
|
||||
ki->ki_env = fmt(kvm_getenvv, ki,
|
||||
(char *)NULL, (char *)NULL, 0);
|
||||
else
|
||||
ki->ki_env = strdup("()");
|
||||
ki->ki_env = fmt(kvm_getenvv, ki, (char *)NULL,
|
||||
(char *)NULL, 0);
|
||||
if (ki->ki_env == NULL)
|
||||
xo_errx(1, "malloc failed");
|
||||
} else {
|
||||
@ -1479,7 +1454,7 @@ pidmax_init(void)
|
||||
static void __dead2
|
||||
usage(void)
|
||||
{
|
||||
#define SINGLE_OPTS "[-aCcde" OPT_LAZY_f "HhjlmrSTuvwXxZ]"
|
||||
#define SINGLE_OPTS "[-aCcdeHhjlmrSTuvwXxZ]"
|
||||
|
||||
xo_error("%s\n%s\n%s\n%s\n%s\n",
|
||||
"usage: ps [--libxo] " SINGLE_OPTS " [-O fmt | -o fmt]",
|
||||
|
@ -820,7 +820,13 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
|
||||
aer->ctrlr = ctrlr;
|
||||
req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
|
||||
/*
|
||||
* XXX-MJ this should be M_WAITOK but we might be in a non-sleepable
|
||||
* callback context. AER completions should be handled on a dedicated
|
||||
* thread.
|
||||
*/
|
||||
req = nvme_allocate_request_null(M_NOWAIT, nvme_ctrlr_async_event_cb,
|
||||
aer);
|
||||
aer->req = req;
|
||||
|
||||
/*
|
||||
@ -1272,12 +1278,12 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
|
||||
goto err;
|
||||
}
|
||||
req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
|
||||
nvme_pt_done, pt);
|
||||
M_WAITOK, nvme_pt_done, pt);
|
||||
} else
|
||||
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
|
||||
nvme_pt_done, pt);
|
||||
M_WAITOK, nvme_pt_done, pt);
|
||||
} else
|
||||
req = nvme_allocate_request_null(nvme_pt_done, pt);
|
||||
req = nvme_allocate_request_null(M_WAITOK, nvme_pt_done, pt);
|
||||
|
||||
/* Assume user space already converted to little-endian */
|
||||
req->cmd.opc = pt->cmd.opc;
|
||||
@ -1363,14 +1369,14 @@ nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
|
||||
ret = EFAULT;
|
||||
goto err;
|
||||
}
|
||||
req = nvme_allocate_request_vaddr(buf->b_data, npc->data_len,
|
||||
nvme_npc_done, npc);
|
||||
req = nvme_allocate_request_vaddr(buf->b_data,
|
||||
npc->data_len, M_WAITOK, nvme_npc_done, npc);
|
||||
} else
|
||||
req = nvme_allocate_request_vaddr(
|
||||
(void *)(uintptr_t)npc->addr, npc->data_len,
|
||||
nvme_npc_done, npc);
|
||||
M_WAITOK, nvme_npc_done, npc);
|
||||
} else
|
||||
req = nvme_allocate_request_null(nvme_npc_done, npc);
|
||||
req = nvme_allocate_request_null(M_WAITOK, nvme_npc_done, npc);
|
||||
|
||||
req->cmd.opc = npc->opcode;
|
||||
req->cmd.fuse = npc->flags;
|
||||
|
@ -37,7 +37,7 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload,
|
||||
sizeof(struct nvme_controller_data), cb_fn, cb_arg);
|
||||
sizeof(struct nvme_controller_data), M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
@ -59,7 +59,7 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload,
|
||||
sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
|
||||
sizeof(struct nvme_namespace_data), M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_IDENTIFY;
|
||||
@ -79,7 +79,7 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_CQ;
|
||||
@ -103,7 +103,7 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_CREATE_IO_SQ;
|
||||
@ -127,7 +127,7 @@ nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_CQ;
|
||||
@ -148,7 +148,7 @@ nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_DELETE_IO_SQ;
|
||||
@ -171,7 +171,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_SET_FEATURES;
|
||||
@ -193,7 +193,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_FEATURES;
|
||||
@ -259,7 +259,12 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
|
||||
/*
|
||||
* XXX-MJ this should be M_WAITOK but we might be called from AER
|
||||
* completion processing, which is a non-sleepable context.
|
||||
*/
|
||||
req = nvme_allocate_request_vaddr(payload, payload_size,
|
||||
M_NOWAIT, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_GET_LOG_PAGE;
|
||||
@ -319,7 +324,11 @@ nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
|
||||
struct nvme_request *req;
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
/*
|
||||
* XXX-MJ this should be M_WAITOK, we do reset from non-sleepable
|
||||
* context and abort commands as part of that.
|
||||
*/
|
||||
req = nvme_allocate_request_null(M_NOWAIT, cb_fn, cb_arg);
|
||||
|
||||
cmd = &req->cmd;
|
||||
cmd->opc = NVME_OPC_ABORT;
|
||||
|
@ -36,8 +36,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
struct nvme_request *req;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload,
|
||||
lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
|
||||
|
||||
lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
@ -56,11 +55,9 @@ nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
uint64_t lba;
|
||||
uint64_t lba_count;
|
||||
|
||||
req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
|
||||
|
||||
req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
|
||||
lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
|
||||
nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
|
||||
@ -77,8 +74,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
|
||||
struct nvme_request *req;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload,
|
||||
lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
|
||||
|
||||
lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
@ -97,8 +93,7 @@ nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
|
||||
uint64_t lba;
|
||||
uint64_t lba_count;
|
||||
|
||||
req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
|
||||
|
||||
req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
|
||||
@ -118,8 +113,8 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
|
||||
struct nvme_command *cmd;
|
||||
|
||||
req = nvme_allocate_request_vaddr(payload,
|
||||
num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
|
||||
|
||||
num_ranges * sizeof(struct nvme_dsm_range), M_NOWAIT, cb_fn,
|
||||
cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
@ -141,8 +136,7 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = nvme_allocate_request_null(cb_fn, cb_arg);
|
||||
|
||||
req = nvme_allocate_request_null(M_NOWAIT, cb_fn, cb_arg);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
@ -165,8 +159,8 @@ nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
|
||||
int i;
|
||||
|
||||
status.done = FALSE;
|
||||
req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb,
|
||||
&status);
|
||||
req = nvme_allocate_request_vaddr(virt, len, M_NOWAIT,
|
||||
nvme_completion_poll_cb, &status);
|
||||
if (req == NULL)
|
||||
return (ENOMEM);
|
||||
|
||||
|
@ -486,11 +486,14 @@ nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
||||
}
|
||||
|
||||
static __inline struct nvme_request *
|
||||
_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
_nvme_allocate_request(const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
|
||||
KASSERT(how == M_WAITOK || how == M_NOWAIT,
|
||||
("nvme_allocate_request: invalid how %d", how));
|
||||
|
||||
req = malloc(sizeof(*req), M_NVME, how | M_ZERO);
|
||||
if (req != NULL) {
|
||||
req->cb_fn = cb_fn;
|
||||
req->cb_arg = cb_arg;
|
||||
@ -501,11 +504,11 @@ _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
|
||||
static __inline struct nvme_request *
|
||||
nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
|
||||
nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = _nvme_allocate_request(cb_fn, cb_arg);
|
||||
req = _nvme_allocate_request(how, cb_fn, cb_arg);
|
||||
if (req != NULL) {
|
||||
req->payload = memdesc_vaddr(payload, payload_size);
|
||||
req->payload_valid = true;
|
||||
@ -514,20 +517,21 @@ nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
|
||||
}
|
||||
|
||||
static __inline struct nvme_request *
|
||||
nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_null(const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = _nvme_allocate_request(cb_fn, cb_arg);
|
||||
req = _nvme_allocate_request(how, cb_fn, cb_arg);
|
||||
return (req);
|
||||
}
|
||||
|
||||
static __inline struct nvme_request *
|
||||
nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_bio(struct bio *bio, const int how, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = _nvme_allocate_request(cb_fn, cb_arg);
|
||||
req = _nvme_allocate_request(how, cb_fn, cb_arg);
|
||||
if (req != NULL) {
|
||||
req->payload = memdesc_bio(bio);
|
||||
req->payload_valid = true;
|
||||
@ -536,16 +540,16 @@ nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
}
|
||||
|
||||
static __inline struct nvme_request *
|
||||
nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
|
||||
nvme_allocate_request_ccb(union ccb *ccb, const int how, nvme_cb_fn_t cb_fn,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct nvme_request *req;
|
||||
|
||||
req = _nvme_allocate_request(cb_fn, cb_arg);
|
||||
req = _nvme_allocate_request(how, cb_fn, cb_arg);
|
||||
if (req != NULL) {
|
||||
req->payload = memdesc_ccb(ccb);
|
||||
req->payload_valid = true;
|
||||
}
|
||||
|
||||
return (req);
|
||||
}
|
||||
|
||||
|
@ -96,15 +96,16 @@ nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
|
||||
/* SG LIST ??? */
|
||||
if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
|
||||
req = nvme_allocate_request_bio((struct bio *)payload,
|
||||
nvme_sim_nvmeio_done, ccb);
|
||||
M_NOWAIT, nvme_sim_nvmeio_done, ccb);
|
||||
else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
|
||||
req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
|
||||
else if (payload == NULL)
|
||||
req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
|
||||
else
|
||||
req = nvme_allocate_request_vaddr(payload, size,
|
||||
req = nvme_allocate_request_ccb(ccb, M_NOWAIT,
|
||||
nvme_sim_nvmeio_done, ccb);
|
||||
else if (payload == NULL)
|
||||
req = nvme_allocate_request_null(M_NOWAIT, nvme_sim_nvmeio_done,
|
||||
ccb);
|
||||
else
|
||||
req = nvme_allocate_request_vaddr(payload, size, M_NOWAIT,
|
||||
nvme_sim_nvmeio_done, ccb);
|
||||
|
||||
if (req == NULL) {
|
||||
nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
|
||||
xpt_done(ccb);
|
||||
|
@ -876,7 +876,7 @@ struct proc {
|
||||
MAP_STACK */
|
||||
#define P2_STKGAP_DISABLE_EXEC 0x00001000 /* Stack gap disabled
|
||||
after exec */
|
||||
#define P2_ITSTOPPED 0x00002000
|
||||
#define P2_ITSTOPPED 0x00002000 /* itimers stopped */
|
||||
#define P2_PTRACEREQ 0x00004000 /* Active ptrace req */
|
||||
#define P2_NO_NEW_PRIVS 0x00008000 /* Ignore setuid */
|
||||
#define P2_WXORX_DISABLE 0x00010000 /* WX mappings enabled */
|
||||
@ -884,7 +884,7 @@ struct proc {
|
||||
#define P2_WEXIT 0x00040000 /* exit just started, no
|
||||
external thread_single() is
|
||||
permitted */
|
||||
#define P2_REAPKILLED 0x00080000
|
||||
#define P2_REAPKILLED 0x00080000 /* REAP_KILL pass touched me */
|
||||
#define P2_MEMBAR_PRIVE 0x00100000 /* membar private expedited
|
||||
registered */
|
||||
#define P2_MEMBAR_PRIVE_SYNCORE 0x00200000 /* membar private expedited
|
||||
|
Loading…
Reference in New Issue
Block a user