Fix IBRS for machines with IBRS_ALL capability.

When turning IBRS mitigation using sysctl, as opposed to loader tunable,
send IPI to tweak MSR on all cores.  Right now code only performed MSR write
onr the CPU where sysctl was run.

Properly report hw.ibrs_active for IBRS_ALL.  Split hw_ibrs_ibpb_active out
from ibrs_active, to keep the current semantic of guiding kernel entry and
exit handlers.

Reported and tested by:	mav
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2020-02-25 17:26:10 +00:00
parent 59ffd5eb99
commit a324b7f71d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=358315
7 changed files with 19 additions and 15 deletions

View File

@ -255,7 +255,7 @@ initializecpu(void)
wrmsr(MSR_EFER, msr);
pg_nx = PG_NX;
}
hw_ibrs_recalculate();
hw_ibrs_recalculate(false);
hw_ssb_recalculate(false);
amd64_syscall_ret_flush_l1d_recalc();
switch (cpu_vendor_id) {

View File

@ -1632,7 +1632,7 @@ handle_ibrs_\l:
/* all callers already saved %rax, %rdx, and %rcx */
ENTRY(handle_ibrs_entry)
cmpb $0,hw_ibrs_active(%rip)
cmpb $0,hw_ibrs_ibpb_active(%rip)
je 1f
movl $MSR_IA32_SPEC_CTRL,%ecx
rdmsr

View File

@ -538,8 +538,8 @@ cpuctl_do_eval_cpu_features(int cpu, struct thread *td)
set_cpu(cpu, td);
identify_cpu1();
identify_cpu2();
hw_ibrs_recalculate();
restore_cpu(oldcpu, is_bound, td);
hw_ibrs_recalculate(true);
hw_ssb_recalculate(true);
#ifdef __amd64__
amd64_syscall_ret_flush_l1d_recalc();

View File

@ -446,7 +446,7 @@ msr_onfault:
ret
ENTRY(handle_ibrs_entry)
cmpb $0,hw_ibrs_active
cmpb $0,hw_ibrs_ibpb_active
je 1f
movl $MSR_IA32_SPEC_CTRL,%ecx
rdmsr

View File

@ -244,7 +244,7 @@ acpi_sleep_machdep(struct acpi_softc *sc, int state)
}
#endif
#ifdef __amd64__
hw_ibrs_active = 0;
hw_ibrs_ibpb_active = 0;
hw_ssb_active = 0;
cpu_stdext_feature3 = 0;
CPU_FOREACH(i) {

View File

@ -90,7 +90,7 @@ extern uint64_t xsave_mask;
extern u_int max_apic_id;
extern int i386_read_exec;
extern int pti;
extern int hw_ibrs_active;
extern int hw_ibrs_ibpb_active;
extern int hw_mds_disable;
extern int hw_ssb_active;
extern int x86_taa_enable;
@ -134,7 +134,7 @@ int is_physical_memory(vm_paddr_t addr);
int isa_nmi(int cd);
void handle_ibrs_entry(void);
void handle_ibrs_exit(void);
void hw_ibrs_recalculate(void);
void hw_ibrs_recalculate(bool all_cpus);
void hw_mds_recalculate(void);
void hw_ssb_recalculate(bool all_cpus);
void x86_taa_recalculate(void);

View File

@ -871,7 +871,8 @@ nmi_handle_intr(u_int type, struct trapframe *frame)
nmi_call_kdb(PCPU_GET(cpuid), type, frame);
}
int hw_ibrs_active;
static int hw_ibrs_active;
int hw_ibrs_ibpb_active;
int hw_ibrs_disable = 1;
SYSCTL_INT(_hw, OID_AUTO, ibrs_active, CTLFLAG_RD, &hw_ibrs_active, 0,
@ -884,16 +885,19 @@ SYSCTL_INT(_machdep_mitigations_ibrs, OID_AUTO, active, CTLFLAG_RD,
&hw_ibrs_active, 0, "Indirect Branch Restricted Speculation active");
void
hw_ibrs_recalculate(void)
hw_ibrs_recalculate(bool for_all_cpus)
{
if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_IBRS_ALL) != 0) {
x86_msr_op(MSR_IA32_SPEC_CTRL, MSR_OP_LOCAL |
(hw_ibrs_disable ? MSR_OP_ANDNOT : MSR_OP_OR),
x86_msr_op(MSR_IA32_SPEC_CTRL, (for_all_cpus ?
MSR_OP_RENDEZVOUS : MSR_OP_LOCAL) |
(hw_ibrs_disable != 0 ? MSR_OP_ANDNOT : MSR_OP_OR),
IA32_SPEC_CTRL_IBRS);
return;
hw_ibrs_active = hw_ibrs_disable == 0;
hw_ibrs_ibpb_active = 0;
} else {
hw_ibrs_active = hw_ibrs_ibpb_active = (cpu_stdext_feature3 &
CPUID_STDEXT3_IBPB) != 0 && !hw_ibrs_disable;
}
hw_ibrs_active = (cpu_stdext_feature3 & CPUID_STDEXT3_IBPB) != 0 &&
!hw_ibrs_disable;
}
static int
@ -906,7 +910,7 @@ hw_ibrs_disable_handler(SYSCTL_HANDLER_ARGS)
if (error != 0 || req->newptr == NULL)
return (error);
hw_ibrs_disable = val != 0;
hw_ibrs_recalculate();
hw_ibrs_recalculate(true);
return (0);
}
SYSCTL_PROC(_hw, OID_AUTO, ibrs_disable, CTLTYPE_INT | CTLFLAG_RWTUN |