diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c index 54f0d6ef6..0cb38c365 100644 --- a/sys/arch/amd64/amd64/cpu.c +++ b/sys/arch/amd64/amd64/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.178 2024/02/03 16:21:22 deraadt Exp $ */ +/* $OpenBSD: cpu.c,v 1.180 2024/02/12 02:57:14 jsg Exp $ */ /* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */ /*- @@ -188,7 +188,7 @@ replacemeltdown(void) { static int replacedone = 0; struct cpu_info *ci = &cpu_info_primary; - int swapgs_vuln = 0, ibrs = 0, s; + int swapgs_vuln = 0, ibrs = 0, s, ibpb = 0; if (strcmp(cpu_vendor, "GenuineIntel") == 0) { int family = ci->ci_family; @@ -211,6 +211,8 @@ replacemeltdown(void) } else if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS) { ibrs = 1; } + if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS) + ibpb = 1; } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0 && ci->ci_pnfeatset >= 0x80000008) { if (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_ALWAYSON) { @@ -219,6 +221,8 @@ replacemeltdown(void) (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_PREF)) { ibrs = 1; } + if (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBPB) + ibpb = 1; } /* Enhanced IBRS: turn it on once on each CPU and don't touch again */ @@ -230,6 +234,11 @@ replacemeltdown(void) replacedone = 1; s = splhigh(); + + /* If we don't have IBRS/IBPB, then don't use IBPB */ + if (ibpb == 0) + codepatch_nop(CPTAG_IBPB_NOP); + if (ibrs == 2 || (ci->ci_feature_sefflags_edx & SEFF0EDX_IBT)) { extern const char _jmprax, _jmpr11, _jmpr13; extern const short _jmprax_len, _jmpr11_len, _jmpr13_len; diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf index c34e8e2ce..42e77d782 100644 --- a/sys/arch/amd64/amd64/genassym.cf +++ b/sys/arch/amd64/amd64/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.44 2023/01/16 00:05:18 deraadt Exp $ +# $OpenBSD: genassym.cf,v 1.45 2024/02/12 01:18:17 guenther Exp $ # Written by Artur Grabowski art@openbsd.org, Public Domain include @@ -108,6 +108,7 @@ member CPU_INFO_APICID ci_apicid member CPU_INFO_RESCHED ci_want_resched member CPU_INFO_CURPROC ci_curproc member CPU_INFO_PROC_PMAP ci_proc_pmap +member CPU_INFO_USER_PMAP ci_user_pmap member CPU_INFO_CURPCB ci_curpcb member CPU_INFO_IDLE_PCB ci_idle_pcb member CPU_INFO_ILEVEL ci_ilevel diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index a5f159561..3ccdc2e0f 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.144 2023/12/12 15:30:55 deraadt Exp $ */ +/* $OpenBSD: locore.S,v 1.145 2024/02/12 01:18:17 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -595,6 +595,22 @@ GENTRY(Xsyscall) jz .Lsyscall_restore_fsbase .Lsyscall_restore_registers: + /* + * If the pmap we're now on isn't the same as the one we + * were on last time we were in userspace, then use IBPB + * to prevent cross-process branch-target injection. + */ + CODEPATCH_START + movq CPUVAR(PROC_PMAP),%rbx + cmpq CPUVAR(USER_PMAP),%rbx + je 1f + xorl %edx,%edx + movl $PRED_CMD_IBPB,%eax + movl $MSR_PRED_CMD,%ecx + wrmsr + movq %rbx,CPUVAR(USER_PMAP) +1: + CODEPATCH_END(CPTAG_IBPB_NOP) call pku_xonly RET_STACK_REFILL_WITH_RCX @@ -758,17 +774,33 @@ intr_user_exit_post_ast: testl $CPUPF_USERXSTATE,CPUVAR(PFLAGS) jz .Lintr_restore_xstate + /* Restore FS.base if it's not already in the CPU */ + testl $CPUPF_USERSEGS,CPUVAR(PFLAGS) + jz .Lintr_restore_fsbase + +.Lintr_restore_registers: #ifdef DIAGNOSTIC /* no more C calls after this, so check the SPL */ cmpl $0,CPUVAR(ILEVEL) jne .Luser_spl_not_lowered #endif /* DIAGNOSTIC */ - /* Restore FS.base if it's not already in the CPU */ - testl $CPUPF_USERSEGS,CPUVAR(PFLAGS) - jz .Lintr_restore_fsbase - -.Lintr_restore_registers: + /* + * If the pmap we're now on isn't the same as the one we + * were on last time we were in userspace, then use IBPB + * to prevent cross-process branch-target injection. + */ + CODEPATCH_START + movq CPUVAR(PROC_PMAP),%rbx + cmpq CPUVAR(USER_PMAP),%rbx + je 1f + xorl %edx,%edx + movl $PRED_CMD_IBPB,%eax + movl $MSR_PRED_CMD,%ecx + wrmsr + movq %rbx,CPUVAR(USER_PMAP) +1: + CODEPATCH_END(CPTAG_IBPB_NOP) call pku_xonly RET_STACK_REFILL_WITH_RCX diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S index af268a529..de24a291f 100644 --- a/sys/arch/amd64/amd64/vector.S +++ b/sys/arch/amd64/amd64/vector.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vector.S,v 1.94 2023/07/31 04:01:07 guenther Exp $ */ +/* $OpenBSD: vector.S,v 1.95 2024/02/12 01:18:17 guenther Exp $ */ /* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -149,6 +149,13 @@ INTRENTRY_LABEL(calltrap_specstk): movq %r12,%rax movq %r13,%rdx wrmsr + /* who knows what happened in this trap; use IPBP on the way out */ + CODEPATCH_START + xorl %edx,%edx + movl $PRED_CMD_IBPB,%eax + movl $MSR_PRED_CMD,%ecx + wrmsr + CODEPATCH_END(CPTAG_IBPB_NOP) call pku_xonly popq %rdi popq %rsi diff --git a/sys/arch/amd64/amd64/vmm_machdep.c b/sys/arch/amd64/amd64/vmm_machdep.c index 0bbd2a407..7cc375917 100644 --- a/sys/arch/amd64/amd64/vmm_machdep.c +++ b/sys/arch/amd64/amd64/vmm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vmm_machdep.c,v 1.16 2024/01/31 05:49:33 guenther Exp $ */ +/* $OpenBSD: vmm_machdep.c,v 1.18 2024/02/12 02:57:14 jsg Exp $ */ /* * Copyright (c) 2014 Mike Larkin * @@ -4185,6 +4185,16 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp) TRACEPOINT(vmm, guest_enter, vcpu, vrp); + /* + * If we're resuming to a different VCPU and have IBPB, + * then use it to prevent cross-VM branch-target injection. + */ + if (ci->ci_guest_vcpu != vcpu && + (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS)) { + wrmsr(MSR_PRED_CMD, PRED_CMD_IBPB); + ci->ci_guest_vcpu = vcpu; + } + /* Restore any guest PKRU state. */ if (vmm_softc->sc_md.pkru_enabled) wrpkru(0, vcpu->vc_pkru); @@ -6498,6 +6508,16 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp) break; } + /* + * If we're resuming to a different VCPU and have IBPB, + * then use it to prevent cross-VM branch-target injection. + */ + if (ci->ci_guest_vcpu != vcpu && + (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBPB)) { + wrmsr(MSR_PRED_CMD, PRED_CMD_IBPB); + ci->ci_guest_vcpu = vcpu; + } + /* Restore any guest PKRU state. */ if (vmm_softc->sc_md.pkru_enabled) wrpkru(0, vcpu->vc_pkru); diff --git a/sys/arch/amd64/include/codepatch.h b/sys/arch/amd64/include/codepatch.h index 6f2bdbe18..2ccb638a8 100644 --- a/sys/arch/amd64/include/codepatch.h +++ b/sys/arch/amd64/include/codepatch.h @@ -1,4 +1,4 @@ -/* $OpenBSD: codepatch.h,v 1.18 2023/07/31 04:01:07 guenther Exp $ */ +/* $OpenBSD: codepatch.h,v 1.19 2024/02/12 01:18:17 guenther Exp $ */ /* * Copyright (c) 2014-2015 Stefan Fritsch * @@ -69,6 +69,7 @@ void codepatch_disable(void); #define CPTAG_RETPOLINE_RAX 14 #define CPTAG_RETPOLINE_R11 15 #define CPTAG_RETPOLINE_R13 16 +#define CPTAG_IBPB_NOP 17 /* * stac/clac SMAP instructions have lfence like semantics. Let's diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h index 5c209b90a..dd0537cb1 100644 --- a/sys/arch/amd64/include/cpu.h +++ b/sys/arch/amd64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.161 2024/02/03 16:21:22 deraadt Exp $ */ +/* $OpenBSD: cpu.h,v 1.162 2024/02/12 01:18:17 guenther Exp $ */ /* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */ /*- @@ -98,6 +98,7 @@ union vmm_cpu_cap { * o owned (read/modified only) by this CPU */ struct x86_64_tss; +struct vcpu; struct cpu_info { /* * The beginning of this structure in mapped in the userspace "u-k" @@ -130,7 +131,8 @@ struct cpu_info { struct proc *ci_curproc; /* [o] */ struct schedstate_percpu ci_schedstate; /* scheduler state */ - struct pmap *ci_proc_pmap; /* last userspace pmap */ + struct pmap *ci_proc_pmap; /* active, non-kernel pmap */ + struct pmap *ci_user_pmap; /* [o] last pmap used in userspace */ struct pcb *ci_curpcb; /* [o] */ struct pcb *ci_idle_pcb; /* [o] */ @@ -219,6 +221,7 @@ struct cpu_info { union vmm_cpu_cap ci_vmm_cap; paddr_t ci_vmxon_region_pa; struct vmxon_region *ci_vmxon_region; + struct vcpu *ci_guest_vcpu; /* [o] last vcpu resumed */ char ci_panicbuf[512];