mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-11-11 04:42:16 +01:00
14bdbaf2e4
Coredump notes depend on being able to invoke dump routines twice; once in a dry-run mode to get the size of the note, and another to actually emit the note to the corefile. When a note helper emits a different length section the second time around than the length it requested the first time, the kernel produces a corrupt coredump. NT_PROCSTAT_FILES output length, when packing kinfo structs, is tied to the length of filenames corresponding to vnodes in the process' fd table via vn_fullpath. As vnodes may move around during dump, this is racy. So: - Detect badly behaved notes in putnote() and pad underfilled notes. - Add a fail point, debug.fail_point.fill_kinfo_vnode__random_path to exercise the NT_PROCSTAT_FILES corruption. It simply picks random lengths to expand or truncate paths to in fo_fill_kinfo_vnode(). - Add a sysctl, kern.coredump_pack_fileinfo, to allow users to disable kinfo packing for PROCSTAT_FILES notes. This should avoid both FILES note corruption and truncation, even if filenames change, at the cost of about 1 kiB in padding bloat per open fd. Document the new sysctl in core.5. - Fix note_procstat_files to self-limit in the 2nd pass. Since sometimes this will result in a short write, pad up to our advertised size. This addresses note corruption, at the risk of sometimes truncating the last several fd info entries. - Fix NT_PROCSTAT_FILES consumers libutil and libprocstat to grok the zero padding. With suggestions from: bjk, jhb, kib, wblock Approved by: markj (mentor) Relnotes: yes Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D3548
78 lines
1.4 KiB
C
78 lines
1.4 KiB
C
#include <sys/cdefs.h>
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/user.h>
|
|
#include <sys/sysctl.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include "libutil.h"
|
|
|
|
struct kinfo_file *
|
|
kinfo_getfile(pid_t pid, int *cntp)
|
|
{
|
|
int mib[4];
|
|
int error;
|
|
int cnt;
|
|
size_t len;
|
|
char *buf, *bp, *eb;
|
|
struct kinfo_file *kif, *kp, *kf;
|
|
|
|
*cntp = 0;
|
|
len = 0;
|
|
mib[0] = CTL_KERN;
|
|
mib[1] = KERN_PROC;
|
|
mib[2] = KERN_PROC_FILEDESC;
|
|
mib[3] = pid;
|
|
|
|
error = sysctl(mib, 4, NULL, &len, NULL, 0);
|
|
if (error)
|
|
return (NULL);
|
|
len = len * 4 / 3;
|
|
buf = malloc(len);
|
|
if (buf == NULL)
|
|
return (NULL);
|
|
error = sysctl(mib, 4, buf, &len, NULL, 0);
|
|
if (error) {
|
|
free(buf);
|
|
return (NULL);
|
|
}
|
|
/* Pass 1: count items */
|
|
cnt = 0;
|
|
bp = buf;
|
|
eb = buf + len;
|
|
while (bp < eb) {
|
|
kf = (struct kinfo_file *)(uintptr_t)bp;
|
|
if (kf->kf_structsize == 0)
|
|
break;
|
|
bp += kf->kf_structsize;
|
|
cnt++;
|
|
}
|
|
|
|
kif = calloc(cnt, sizeof(*kif));
|
|
if (kif == NULL) {
|
|
free(buf);
|
|
return (NULL);
|
|
}
|
|
bp = buf;
|
|
eb = buf + len;
|
|
kp = kif;
|
|
/* Pass 2: unpack */
|
|
while (bp < eb) {
|
|
kf = (struct kinfo_file *)(uintptr_t)bp;
|
|
if (kf->kf_structsize == 0)
|
|
break;
|
|
/* Copy/expand into pre-zeroed buffer */
|
|
memcpy(kp, kf, kf->kf_structsize);
|
|
/* Advance to next packed record */
|
|
bp += kf->kf_structsize;
|
|
/* Set field size to fixed length, advance */
|
|
kp->kf_structsize = sizeof(*kp);
|
|
kp++;
|
|
}
|
|
free(buf);
|
|
*cntp = cnt;
|
|
return (kif); /* Caller must free() return value */
|
|
}
|