sync with OpenBSD -current

This commit is contained in:
purplerain 2024-04-30 02:20:47 +00:00
parent 6fc9e02a30
commit 7768d1f254
Signed by: purplerain
GPG Key ID: F42C07F07E2E35B7
35 changed files with 335 additions and 351 deletions

View File

@ -488,6 +488,7 @@
./usr/share/man/man1/tmux.1
./usr/share/man/man1/top.1
./usr/share/man/man1/touch.1
./usr/share/man/man1/tput.1
./usr/share/man/man1/tr.1
./usr/share/man/man1/tradcpp.1
./usr/share/man/man1/true.1
@ -1279,7 +1280,6 @@
./usr/share/man/man4/arm64/aplspi.4
./usr/share/man/man4/arm64/aplspmi.4
./usr/share/man/man4/arm64/apm.4
./usr/share/man/man4/arm64/efi.4
./usr/share/man/man4/arm64/intro.4
./usr/share/man/man4/arm64/smbios.4
./usr/share/man/man4/armv7/agtimer.4
@ -1417,6 +1417,7 @@
./usr/share/man/man4/ec.4
./usr/share/man/man4/eephy.4
./usr/share/man/man4/ef.4
./usr/share/man/man4/efi.4
./usr/share/man/man4/eg.4
./usr/share/man/man4/ehci.4
./usr/share/man/man4/eisa.4

View File

@ -1,4 +1,4 @@
.\" $OpenBSD: strftime.3,v 1.38 2019/05/16 13:35:17 schwarze Exp $
.\" $OpenBSD: strftime.3,v 1.39 2024/04/29 13:34:19 naddy Exp $
.\"
.\" Copyright (c) 1989, 1991 The Regents of the University of California.
.\" All rights reserved.
@ -33,7 +33,7 @@
.\"
.\" from: @(#)strftime.3 5.12 (Berkeley) 6/29/91
.\"
.Dd $Mdocdate: May 16 2019 $
.Dd $Mdocdate: April 29 2024 $
.Dt STRFTIME 3
.Os
.Sh NAME
@ -110,11 +110,21 @@ is replaced by the date in the format
.It Cm \&%G
is replaced by the
.St -iso8601
year with century as a decimal number.
week-numbering year with century as a decimal number.
See also the
.Cm \&%V
conversion specification and the
.Sx STANDARDS
section for more details.
.It Cm \&%g
is replaced by the
.St -iso8601
year without century as a decimal number (00\-99).
week-numbering year without century as a decimal number (00\-99).
See also the
.Cm \&%V
conversion specification and the
.Sx STANDARDS
section for more details.
.It Cm \&%H
is replaced by the hour (24-hour clock) as a decimal number (00\-23).
.It Cm \&%I
@ -169,6 +179,12 @@ the week) as a decimal number (01\-53).
If the week containing January
1 has four or more days in the new year, then it is week 1; otherwise
it is week 53 of the previous year, and the next week is week 1.
The year is given by the
.Cm \&%G
conversion specification.
See the
.Sx STANDARDS
section for more details.
.It Cm \&%v
is replaced by the date in the format
.Dq Li "%e-%b-%Y" .

View File

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.847 2024/03/02 20:21:33 kettenis Exp $
# $OpenBSD: Makefile,v 1.848 2024/04/29 20:18:12 kn Exp $
MAN= aac.4 abcrtc.4 abl.4 ac97.4 acphy.4 acrtc.4 \
acpi.4 acpiac.4 acpials.4 acpiasus.4 acpibat.4 \
@ -31,7 +31,7 @@ MAN= aac.4 abcrtc.4 abl.4 ac97.4 acphy.4 acrtc.4 \
dc.4 dcphy.4 ddb.4 de.4 diskmap.4 divert.4 drm.4 dsxrtc.4 dt.4 \
dwctwo.4 dwdog.4 dwge.4 dwgpio.4 dwiic.4 dwmmc.4 dwmshc.4 dwpcie.4 \
dwqe.4 dwxe.4 \
eap.4 ec.4 eephy.4 ef.4 eg.4 ehci.4 eisa.4 el.4 em.4 emc.4 gcu.4 \
eap.4 ec.4 eephy.4 ef.4 efi.4 eg.4 ehci.4 eisa.4 el.4 em.4 emc.4 gcu.4 \
emu.4 enc.4 endrun.4 envy.4 eoip.4 ep.4 epic.4 esa.4 escodec.4 \
eso.4 ess.4 et.4 etherip.4 etphy.4 ex.4 exphy.4 exrtc.4 \
fanpwr.4 fd.4 fdc.4 fec.4 fido.4 fins.4 fintek.4 fms.4 fusbtc.4 \

View File

@ -1,6 +1,7 @@
.\" $OpenBSD: efi.4,v 1.2 2018/04/07 13:52:48 jmc Exp $
.\" $OpenBSD: efi.4,v 1.1 2024/04/29 20:18:12 kn Exp $
.\"
.\" Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
.\" Copyright (c) 2024 Klemens Nanni <kn@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
@ -14,31 +15,45 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd $Mdocdate: April 7 2018 $
.Dt EFI 4 arm64
.Dd $Mdocdate: April 29 2024 $
.Dt EFI 4
.Os
.Sh NAME
.Nm efi
.Nd EFI runtime services
.Nd Unified Extensible Firmware Interface
.Sh SYNOPSIS
.Cd "# amd64"
.Cd "efi* at bios?"
.Pp
.Cd "# arm64"
.Cd "efi* at fdt?"
.Sh DESCRIPTION
The
.Nm
driver provides support for EFI runtime services offered by UEFI
firmware implementations.
Support is currently limited to reading and setting the Real Time
Clock on systems that offer such functionality.
It can read and write the Real Time Clock and provides userland applications
.Xr ioctl 2
access to the System Resource Table and to read and write variables.
.Sh FILES
.Bl -tag -width /dev/efi -compact
.It Pa /dev/efi
.El
.Sh SEE ALSO
.Xr ioctl 2 ,
.Xr intro 4
.Sh HISTORY
The
.Nm
device driver first appeared in
device driver first appeared for arm64 in
.Ox 6.3 .
Support for amd64, the ESRT and EFI variables appeard in
.Ox 7.3 .
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was written by
.An Mark Kettenis Aq Mt kettenis@openbsd.org .
Support for the ESRT and EFI variables is heavily based on work done by
.An Sergii Dmytruk Aq Mt sergii.dmytruk@3meb.com .

View File

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.34 2024/01/22 18:56:18 kettenis Exp $
# $OpenBSD: Makefile,v 1.35 2024/04/29 20:18:12 kn Exp $
MAN= agintc.4 agtimer.4 ampchwm.4 ampintc.4 \
aplaudio.4 aplcpu.4 apldart.4 apldcp.4 apldma.4 apldog.4 apldrm.4 \
@ -6,7 +6,7 @@ MAN= agintc.4 agtimer.4 ampchwm.4 ampintc.4 \
aplhidev.4 apliic.4 aplintc.4 aplmbox.4 aplmca.4 aplnco.4 aplns.4 \
aplpcie.4 aplpinctrl.4 aplpmgr.4 aplpmu.4 aplpwm.4 \
aplsart.4 aplsmc.4 aplspi.4 aplspmi.4 apm.4 \
efi.4 intro.4 smbios.4
intro.4 smbios.4
MANSUBDIR=arm64

View File

@ -1,4 +1,4 @@
/* $OpenBSD: vmm_machdep.c,v 1.24 2024/04/13 21:57:22 dv Exp $ */
/* $OpenBSD: vmm_machdep.c,v 1.25 2024/04/29 14:47:05 dv Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@ -3691,18 +3691,14 @@ vm_run(struct vm_run_params *vrp)
}
/*
* We may be returning from userland helping us from the last exit.
* If so (vrp_continue == 1), copy in the exit data from vmd. The
* exit data will be consumed before the next entry (this typically
* comprises VCPU register changes as the result of vmd(8)'s actions).
* We may be returning from userland helping us from the last
* exit. Copy in the exit data from vmd. The exit data will be
* consumed before the next entry (this typically comprises
* VCPU register changes as the result of vmd(8)'s actions).
*/
if (vrp->vrp_continue) {
if (copyin(vrp->vrp_exit, &vcpu->vc_exit,
sizeof(struct vm_exit)) == EFAULT) {
ret = EFAULT;
goto out_unlock;
}
}
ret = copyin(vrp->vrp_exit, &vcpu->vc_exit, sizeof(struct vm_exit));
if (ret)
goto out_unlock;
vcpu->vc_inject.vie_type = vrp->vrp_inject.vie_type;
vcpu->vc_inject.vie_vector = vrp->vrp_inject.vie_vector;
@ -4001,67 +3997,28 @@ vcpu_run_vmx(struct vcpu *vcpu, struct vm_run_params *vrp)
else
vcpu->vc_intr = 0;
if (vrp->vrp_continue) {
switch (vcpu->vc_gueststate.vg_exit_reason) {
case VMX_EXIT_IO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN)
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
if (vmwrite(VMCS_GUEST_IA32_RIP,
vcpu->vc_gueststate.vg_rip)) {
printf("%s: failed to update rip\n", __func__);
return (EINVAL);
}
break;
case VMX_EXIT_EPT_VIOLATION:
ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_GPRS, 0,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
case VM_EXIT_NONE:
case VMX_EXIT_HLT:
case VMX_EXIT_INT_WINDOW:
case VMX_EXIT_EXTINT:
case VMX_EXIT_CPUID:
case VMX_EXIT_XSETBV:
break;
#ifdef VMM_DEBUG
case VMX_EXIT_TRIPLE_FAULT:
DPRINTF("%s: vm %d vcpu %d triple fault\n",
__func__, vcpu->vc_parent->vm_id,
vcpu->vc_id);
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
vmx_dump_vmcs(vcpu);
break;
case VMX_EXIT_ENTRY_FAILED_GUEST_STATE:
DPRINTF("%s: vm %d vcpu %d failed entry "
"due to invalid guest state\n",
__func__, vcpu->vc_parent->vm_id,
vcpu->vc_id);
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
switch (vcpu->vc_gueststate.vg_exit_reason) {
case VMX_EXIT_IO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN)
vcpu->vc_gueststate.vg_rax = vcpu->vc_exit.vei.vei_data;
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
if (vmwrite(VMCS_GUEST_IA32_RIP, vcpu->vc_gueststate.vg_rip)) {
printf("%s: failed to update rip\n", __func__);
return (EINVAL);
default:
DPRINTF("%s: unimplemented exit type %d (%s)\n",
__func__,
vcpu->vc_gueststate.vg_exit_reason,
vmx_exit_reason_decode(
vcpu->vc_gueststate.vg_exit_reason));
vmx_vcpu_dump_regs(vcpu);
dump_vcpu(vcpu);
break;
#endif /* VMM_DEBUG */
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
break;
case VMX_EXIT_EPT_VIOLATION:
ret = vcpu_writeregs_vmx(vcpu, VM_RWREGS_GPRS, 0,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update registers\n",
__func__, vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
/* Host CR3 */
cr3 = rcr3();
@ -6519,31 +6476,29 @@ vcpu_run_svm(struct vcpu *vcpu, struct vm_run_params *vrp)
* needs to be fixed up depends on what vmd populated in the
* exit data structure.
*/
if (vrp->vrp_continue) {
switch (vcpu->vc_gueststate.vg_exit_reason) {
case SVM_VMEXIT_IOIO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN) {
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax;
}
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
break;
case SVM_VMEXIT_NPF:
ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_GPRS,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
switch (vcpu->vc_gueststate.vg_exit_reason) {
case SVM_VMEXIT_IOIO:
if (vcpu->vc_exit.vei.vei_dir == VEI_DIR_IN) {
vcpu->vc_gueststate.vg_rax =
vcpu->vc_exit.vei.vei_data;
vmcb->v_rax = vcpu->vc_gueststate.vg_rax;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
vcpu->vc_gueststate.vg_rip =
vcpu->vc_exit.vrs.vrs_gprs[VCPU_REGS_RIP];
vmcb->v_rip = vcpu->vc_gueststate.vg_rip;
break;
case SVM_VMEXIT_NPF:
ret = vcpu_writeregs_svm(vcpu, VM_RWREGS_GPRS,
&vcpu->vc_exit.vrs);
if (ret) {
printf("%s: vm %d vcpu %d failed to update "
"registers\n", __func__,
vcpu->vc_parent->vm_id, vcpu->vc_id);
return (EINVAL);
}
break;
}
memset(&vcpu->vc_exit, 0, sizeof(vcpu->vc_exit));
while (ret == 0) {
vmm_update_pvclock(vcpu);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: vmmvar.h,v 1.100 2024/04/09 21:55:16 dv Exp $ */
/* $OpenBSD: vmmvar.h,v 1.101 2024/04/29 14:47:05 dv Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
@ -478,7 +478,6 @@ struct vm_run_params {
/* Input parameters to VMM_IOC_RUN */
uint32_t vrp_vm_id;
uint32_t vrp_vcpu_id;
uint8_t vrp_continue; /* Continuing from an exit */
struct vcpu_inject_event vrp_inject;
uint8_t vrp_intr_pending; /* Additional intrs pending? */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: fault.c,v 1.47 2023/01/05 20:35:44 kettenis Exp $ */
/* $OpenBSD: fault.c,v 1.48 2024/04/29 12:33:17 jsg Exp $ */
/* $NetBSD: fault.c,v 1.46 2004/01/21 15:39:21 skrll Exp $ */
/*
@ -96,10 +96,6 @@
#include <arm/machdep.h>
#include <arm/vfp.h>
#ifdef DEBUG
int last_fault_code; /* For the benefit of pmap_fault_fixup() */
#endif
struct sigdata {
int signo;
int code;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: machdep.h,v 1.5 2016/09/24 13:43:25 kettenis Exp $ */
/* $OpenBSD: machdep.h,v 1.6 2024/04/29 12:24:46 jsg Exp $ */
/* $NetBSD: machdep.h,v 1.7 2002/02/21 02:52:21 thorpej Exp $ */
#ifndef _ARM_MACHDEP_H_
@ -6,7 +6,6 @@
/* misc prototypes used by the many arm machdeps */
void halt (void);
void parse_mi_bootargs (char *);
void data_abort_handler (trapframe_t *);
void prefetch_abort_handler (trapframe_t *);
void undefinedinstruction_bounce (trapframe_t *);
@ -18,10 +17,4 @@ void dumpsys (void);
*/
u_int initarm (void *, void *, void *, paddr_t);
/* from arm/arm/intr.c */
void dosoftints (void);
void set_spl_masks (void);
#ifdef DIAGNOSTIC
void dump_spl_masks (void);
#endif
#endif

View File

@ -1,4 +1,4 @@
/* $OpenBSD: pmap.h,v 1.55 2023/12/11 22:12:53 kettenis Exp $ */
/* $OpenBSD: pmap.h,v 1.56 2024/04/29 12:24:46 jsg Exp $ */
/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
/*
@ -153,18 +153,6 @@ union pmap_cache_state {
*/
#define PMAP_CACHE_STATE_ALL 0xffffffffu
/*
* This structure is used by machine-dependent code to describe
* static mappings of devices, created at bootstrap time.
*/
struct pmap_devmap {
vaddr_t pd_va; /* virtual address */
paddr_t pd_pa; /* physical address */
psize_t pd_size; /* size of region */
vm_prot_t pd_prot; /* protection code */
int pd_cache; /* cache attributes */
};
/*
* The pmap structure itself
*/
@ -245,12 +233,6 @@ extern struct pmap kernel_pmap_store;
#define pmap_unuse_final(p) do { /* nothing */ } while (0)
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
/*
* Functions that we need to export
*/
void pmap_remove_all(pmap_t);
void pmap_uncache_page(paddr_t, vaddr_t);
#define PMAP_CHECK_COPYIN 1
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
@ -258,7 +240,6 @@ void pmap_uncache_page(paddr_t, vaddr_t);
/* Functions we use internally. */
void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
int pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
int pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
@ -270,16 +251,11 @@ void vector_page_setprot(int);
/* XXX */
void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
/* Bootstrapping routines. */
void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
void pmap_devmap_register(const struct pmap_devmap *);
/*
* The current top of kernel VM

View File

@ -1,4 +1,4 @@
/* $OpenBSD: machdep.c,v 1.88 2024/03/17 13:05:40 kettenis Exp $ */
/* $OpenBSD: machdep.c,v 1.89 2024/04/29 13:01:54 jsg Exp $ */
/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
* Copyright (c) 2021 Mark Kettenis <kettenis@openbsd.org>
@ -620,11 +620,6 @@ dumpsys(void)
int (*dump)(dev_t, daddr_t, caddr_t, size_t);
int error;
#if 0
/* Save registers. */
savectx(&dumppcb);
#endif
if (dumpdev == NODEV)
return;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: cpu.h,v 1.45 2024/04/19 10:22:50 mpi Exp $ */
/* $OpenBSD: cpu.h,v 1.46 2024/04/29 13:01:54 jsg Exp $ */
/*
* Copyright (c) 2016 Dale Rahn <drahn@dalerahn.com>
*
@ -96,10 +96,6 @@ extern uint64_t cpu_id_aa64pfr1;
#define PROC_PC(p) ((p)->p_addr->u_pcb.pcb_tf->tf_elr)
#define PROC_STACK(p) ((p)->p_addr->u_pcb.pcb_tf->tf_sp)
/* The address of the vector page. */
extern vaddr_t vector_page;
void arm32_vector_init(vaddr_t, int);
/*
* Per-CPU information. For now we assume one CPU.
*/
@ -276,29 +272,15 @@ void need_resched(struct cpu_info *);
// asm code to start new kernel contexts.
void proc_trampoline(void);
void child_trampoline(void);
/*
* Random cruft
*/
void dumpconf(void);
// cpuswitch.S
struct pcb;
void savectx (struct pcb *pcb);
// machdep.h
void bootsync (int);
// fault.c
int badaddr_read (void *, size_t, void *);
// syscall.c
void svc_handler (trapframe_t *);
/* machine_machdep.c */
void board_startup(void);
// functions to manipulate interrupt state
static __inline void
restore_daif(uint32_t daif)

View File

@ -1,4 +1,4 @@
/* $OpenBSD: intc.c,v 1.12 2022/01/03 03:06:50 jsg Exp $ */
/* $OpenBSD: intc.c,v 1.14 2024/04/29 12:42:06 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -91,8 +91,6 @@ struct intrq {
int iq_ist; /* share type */
};
volatile int softint_pending;
struct intrq intc_handler[INTC_MAX_IRQ];
u_int32_t intc_smask[NIPL];
u_int32_t intc_imask[INTC_MAX_BANKS][NIPL];
@ -310,18 +308,6 @@ intc_setipl(int new)
restore_interrupts(psw);
}
void
intc_intr_bootstrap(vaddr_t addr)
{
int i, j;
extern struct bus_space armv7_bs_tag;
intc_iot = &armv7_bs_tag;
intc_ioh = addr;
for (i = 0; i < INTC_NUM_BANKS; i++)
for (j = 0; j < NIPL; j++)
intc_imask[i][j] = 0xffffffff;
}
void
intc_irq_handler(void *frame)
{

View File

@ -1,4 +1,4 @@
/* $OpenBSD: intc.h,v 1.4 2020/07/14 15:34:15 patrick Exp $ */
/* $OpenBSD: intc.h,v 1.8 2024/04/29 12:46:22 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -25,42 +25,11 @@
#include <machine/intr.h>
#include <arm/softintr.h>
extern volatile int current_spl_level;
extern volatile int softint_pending;
void intc_do_pending(void);
#define SI_TO_IRQBIT(si) (1U<<(si))
void intc_setipl(int new);
void intc_splx(int new);
int intc_splraise(int ipl);
int intc_spllower(int ipl);
void intc_setsoftintr(int si);
/*
* An useful function for interrupt handlers.
* XXX: This shouldn't be here.
*/
static __inline int
find_first_bit( uint32_t bits )
{
int count;
/* since CLZ is available only on ARMv5, this isn't portable
* to all ARM CPUs. This file is for OMAPINTC processor.
*/
asm( "clz %0, %1" : "=r" (count) : "r" (bits) );
return 31-count;
}
/*
* This function *MUST* be called very early on in a port's
* initarm() function, before ANY spl*() functions are called.
*
* The parameter is the virtual address of the OMAPINTC's Interrupt
* Controller registers.
*/
void intc_intr_bootstrap(vaddr_t);
void intc_irq_handler(void *);
void *intc_intr_establish(int irqno, int level, struct cpu_info *ci,

View File

@ -1,4 +1,4 @@
/* $OpenBSD: sxiintc.c,v 1.11 2022/01/03 03:06:50 jsg Exp $ */
/* $OpenBSD: sxiintc.c,v 1.12 2024/04/29 12:33:17 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
* Copyright (c) 2013 Artturi Alm
@ -131,8 +131,6 @@ struct intrq {
int iq_ist; /* share type */
};
volatile int a1xsoftint_pending;
struct intrq sxiintc_handler[NIRQ];
u_int32_t sxiintc_smask[NIPL];
u_int32_t sxiintc_imask[NBANKS][NIPL];

View File

@ -1,4 +1,4 @@
/* $OpenBSD: sxiintc.h,v 1.2 2020/07/14 15:34:15 patrick Exp $ */
/* $OpenBSD: sxiintc.h,v 1.4 2024/04/29 12:33:17 jsg Exp $ */
/*
* Copyright (c) 2007,2009 Dale Rahn <drahn@openbsd.org>
*
@ -25,16 +25,11 @@
#include <machine/intr.h>
#include <arm/softintr.h>
extern volatile int current_spl_level;
extern volatile int softint_pending;
void sxiintc_do_pending(void);
#define SI_TO_IRQBIT(si) (1U<<(si))
void sxiintc_setipl(int);
void sxiintc_splx(int);
int sxiintc_splraise(int);
int sxiintc_spllower(int);
void sxiintc_setsoftintr(int);
void sxiintc_irq_handler(void *);
void *sxiintc_intr_establish(int, int, struct cpu_info *,

View File

@ -562,7 +562,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@ -573,9 +572,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
if (adev->mman.aper_base_kaddr &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS)

View File

@ -1562,6 +1562,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -1588,21 +1619,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~LINUX_PAGE_MASK || offset & ~LINUX_PAGE_MASK || size & ~LINUX_PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -1655,17 +1679,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~LINUX_PAGE_MASK || offset & ~LINUX_PAGE_MASK || size & ~LINUX_PAGE_MASK)
return -EINVAL;
if (saddr + size <= saddr || offset + size <= offset)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if ((bo && offset + size > amdgpu_bo_size(bo)) ||
(eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -1679,7 +1695,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -1766,10 +1782,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
DRM_LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View File

@ -818,9 +818,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL);
process = ERR_PTR(-EINVAL);
goto out;
}
/* A prior open of /dev/kfd could have already created the process. */

View File

@ -236,9 +236,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
}
}

View File

@ -259,6 +259,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
drm_property_blob_get(crtc_state->post_csc_lut);
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
crtc_state->disable_lp_wm = false;
crtc_state->disable_cxsr = false;
crtc_state->update_wm_pre = false;

View File

@ -2453,7 +2453,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2462,12 +2463,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
if (new_cdclk_state->disable_pipes ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
if (new_cdclk_state->disable_pipes) {
cdclk_config = new_cdclk_state->actual;
pipe = INVALID_PIPE;
} else {
if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
cdclk_config = new_cdclk_state->actual;
pipe = new_cdclk_state->pipe;
} else {
cdclk_config = old_cdclk_state->actual;
pipe = INVALID_PIPE;
}
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
old_cdclk_state->actual.voltage_level);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@ -2485,7 +2499,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe = new_cdclk_state->pipe;
enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@ -2495,11 +2509,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_cdclk_pcode_post_notify(state);
if (!new_cdclk_state->disable_pipes &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
pipe = new_cdclk_state->pipe;
else
pipe = INVALID_PIPE;
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)

View File

@ -468,9 +468,56 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
return vblank_start;
}
static void intel_crtc_vblank_evade_scanlines(struct intel_atomic_state *state,
struct intel_crtc *crtc,
int *min, int *max, int *vblank_start)
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *crtc_state;
const struct drm_display_mode *adjusted_mode;
/*
* During fastsets/etc. the transcoder is still
* running with the old timings at this point.
*
* TODO: maybe just use the active timings here?
*/
if (intel_crtc_needs_modeset(new_crtc_state))
crtc_state = new_crtc_state;
else
crtc_state = old_crtc_state;
adjusted_mode = &crtc_state->hw.adjusted_mode;
if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
if (intel_vrr_is_push_sent(crtc_state))
*vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
else
*vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
} else {
*vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
*min = *vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
*max = *vblank_start - 1;
/*
* M/N is double buffered on the transcoder's undelayed vblank,
* so with seamless M/N we must evade both vblanks.
*/
if (new_crtc_state->update_m_n)
*min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
}
/**
* intel_pipe_update_start() - start update of a set of display registers
* @new_crtc_state: the new crtc state
* @state: the atomic state
* @crtc: the crtc
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@ -480,11 +527,12 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode)
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays.
*/
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &new_crtc_state->hw.adjusted_mode;
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@ -500,27 +548,7 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
if (new_crtc_state->vrr.enable) {
if (intel_vrr_is_push_sent(new_crtc_state))
vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state);
else
vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state);
} else {
vblank_start = intel_mode_vblank_start(adjusted_mode);
}
/* FIXME needs to be calibrated sensibly */
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
/*
* M/N is double buffered on the transcoder's undelayed vblank,
* so with seamless M/N we must evade both vblanks.
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
intel_crtc_vblank_evade_scanlines(state, crtc, &min, &max, &vblank_start);
if (min <= 0 || max <= 0)
goto irq_disable;
@ -631,15 +659,18 @@ static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {}
/**
* intel_pipe_update_end() - end update of a set of display registers
* @new_crtc_state: the new crtc state
* @state: the atomic state
* @crtc: the crtc
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
* before a vblank.
*/
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@ -697,15 +728,6 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
*/
intel_vrr_send_push(new_crtc_state);
/*
* Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
local_irq_enable();
if (intel_vgpu_active(dev_priv))

View File

@ -37,8 +37,10 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_start(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_wait_for_vblank_workers(struct intel_atomic_state *state);
struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915);
struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915,

View File

@ -5215,7 +5215,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
if (!fastset || !pipe_config->seamless_m_n)
if (!fastset || !pipe_config->update_m_n)
PIPE_CONF_CHECK_M_N(dp_m_n);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
@ -5353,7 +5353,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
if (!fastset || !pipe_config->seamless_m_n) {
if (!fastset || !pipe_config->update_m_n) {
PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
}
@ -5448,6 +5448,7 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
crtc_state->uapi.mode_changed = true;
crtc_state->update_pipe = false;
crtc_state->update_m_n = false;
ret = drm_atomic_add_affected_connectors(&state->base,
&crtc->base);
@ -5565,13 +5566,14 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
{
struct drm_i915_private *i915 = to_i915(old_crtc_state->uapi.crtc->dev);
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true)) {
if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
drm_dbg_kms(&i915->drm, "fastset requirement not met, forcing full modeset\n");
else
new_crtc_state->uapi.mode_changed = false;
return;
}
if (intel_crtc_needs_modeset(new_crtc_state))
new_crtc_state->update_m_n = false;
new_crtc_state->uapi.mode_changed = false;
if (!intel_crtc_needs_modeset(new_crtc_state))
new_crtc_state->update_pipe = true;
}
@ -6297,6 +6299,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
@ -6309,6 +6312,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_cpu_transcoders_need_modeset(state, trans)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
@ -6316,6 +6320,7 @@ int intel_atomic_check(struct drm_device *dev,
if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
new_crtc_state->update_m_n = false;
}
}
}
@ -6494,7 +6499,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
if (new_crtc_state->seamless_m_n)
if (new_crtc_state->update_m_n)
intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
&new_crtc_state->dp_m_n);
}
@ -6533,6 +6538,8 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@ -6544,6 +6551,9 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 9 &&
!intel_crtc_needs_modeset(new_crtc_state))
skl_detach_scalers(new_crtc_state);
if (vrr_enabling(old_crtc_state, new_crtc_state))
intel_vrr_enable(new_crtc_state);
}
static void intel_enable_crtc(struct intel_atomic_state *state,
@ -6584,12 +6594,6 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_dpt_configure(crtc);
}
if (vrr_enabling(old_crtc_state, new_crtc_state)) {
intel_vrr_enable(new_crtc_state);
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
}
if (!modeset) {
if (new_crtc_state->preload_luts &&
intel_crtc_needs_color_update(new_crtc_state))
@ -6616,7 +6620,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_crtc_planes_update_noarm(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
intel_pipe_update_start(state, crtc);
commit_pipe_pre_planes(state, crtc);
@ -6624,7 +6628,16 @@ static void intel_update_crtc(struct intel_atomic_state *state,
commit_pipe_post_planes(state, crtc);
intel_pipe_update_end(new_crtc_state);
intel_pipe_update_end(state, crtc);
/*
* VRR/Seamless M/N update may need to update frame timings.
*
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (vrr_enabling(old_crtc_state, new_crtc_state) || new_crtc_state->update_m_n)
intel_crtc_update_active_timings(new_crtc_state,
new_crtc_state->vrr.enable);
/*
* We usually enable FIFO underrun interrupts as part of the

View File

@ -47,6 +47,7 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)

View File

@ -1084,6 +1084,7 @@ struct intel_crtc_state {
unsigned fb_bits; /* framebuffers to flip */
bool update_pipe; /* can a fast modeset be performed? */
bool update_m_n; /* update M/N seamlessly during fastset? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fifo_changed; /* FIFO split is changed */
@ -1196,7 +1197,6 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;

View File

@ -1310,13 +1310,14 @@ bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp)
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
/* On TGL, FEC is supported on all Pipes */
if (DISPLAY_VER(dev_priv) >= 12)
return true;
if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A)
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
@ -2147,8 +2148,12 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
if (has_seamless_m_n(connector))
pipe_config->seamless_m_n = true;
/*
* FIXME all joined pipes share the same transcoder.
* Need to account for that when updating M/N live.
*/
if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))

View File

@ -964,7 +964,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
if (DISPLAY_VER(dev_priv) >= 10 &&
if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
/*
* TBD pass the connector BPC,

View File

@ -33,6 +33,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@ -110,12 +111,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_tryget(vma))
return -ENOENT;
/*
* Exclude global GTT VMA from holding a GT wakeref
* while active, otherwise GPU never goes idle.
*/
if (!i915_vma_is_ggtt(vma))
intel_gt_pm_get(vma->vm->gt);
return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
i915_vma_put(active_to_vma(ref));
struct i915_vma *vma = active_to_vma(ref);
if (!i915_vma_is_ggtt(vma)) {
/*
* Since we can be called from atomic contexts,
* use an async variant of intel_gt_pm_put().
*/
intel_gt_pm_put_async(vma->vm->gt);
}
i915_vma_put(vma);
}
static struct i915_vma *
@ -1413,7 +1436,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
intel_wakeref_t wakeref = 0;
intel_wakeref_t wakeref;
unsigned int bound;
int err;
@ -1433,8 +1456,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
/*
* In case of a global GTT, we must hold a runtime-pm wakeref
* while global PTEs are updated. In other cases, we hold
* the rpm reference while the VMA is active. Since runtime
* resume may require allocations, which are forbidden inside
* vm->mutex, get the first rpm wakeref outside of the mutex.
*/
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@ -1570,8 +1599,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
if (wakeref)
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);

View File

@ -8,4 +8,8 @@
#define struct_size(p, member, n) \
(sizeof(*(p)) + ((n) * (sizeof(*(p)->member))))
#if defined(__clang__) || (defined(__GNUC__) && __GNUC__ >= 5)
#define check_add_overflow(x, y, sum) __builtin_add_overflow(x, y, sum)
#endif
#endif

View File

@ -926,8 +926,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
for (i = 0; i < max_device; i++) {
ATOM_CONNECTOR_INFO_I2C ci =
supported_devices->info.asConnInfo[i];
ATOM_CONNECTOR_INFO_I2C ci;
if (frev > 1)
ci = supported_devices->info_2d1.asConnInfo[i];
else
ci = supported_devices->info.asConnInfo[i];
bios_connectors[i].valid = false;

View File

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.14 2024/04/27 15:05:55 jmc Exp $
# $OpenBSD: Makefile,v 1.15 2024/04/29 13:23:19 naddy Exp $
PROG= tput
SRCS= clear_cmd.c reset_cmd.c tparm_type.c tput.c transform.c \
@ -11,7 +11,7 @@ TIC= ${.CURDIR}/../tic
CFLAGS+= -I${CURSES} -I${TIC} -I${.CURDIR} -I.
.PATH: ${TIC}
CLEANFILES+= termsort.h
MAN+= clear.1
MAN= tput.1 clear.1
termsort.h: ${TIC}/MKtermsort.sh
sh ${TIC}/MKtermsort.sh awk ${CURSES}/Caps > ${.TARGET}

View File

@ -1,4 +1,4 @@
/* $OpenBSD: server_file.c,v 1.79 2024/04/16 17:15:50 florian Exp $ */
/* $OpenBSD: server_file.c,v 1.80 2024/04/29 16:17:46 florian Exp $ */
/*
* Copyright (c) 2006 - 2017 Reyk Floeter <reyk@openbsd.org>
@ -287,6 +287,7 @@ server_file_request(struct httpd *env, struct client *clt, struct media_type
if ((ret = server_response_http(clt, ret, media, -1,
MINIMUM(time(NULL), st->st_mtim.tv_sec))) == -1)
goto fail;
close(fd);
goto done;
}

View File

@ -1,4 +1,4 @@
/* $OpenBSD: vm.c,v 1.99 2024/04/09 21:55:16 dv Exp $ */
/* $OpenBSD: vm.c,v 1.100 2024/04/29 14:47:06 dv Exp $ */
/*
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@ -1538,7 +1538,6 @@ vcpu_run_loop(void *arg)
intptr_t ret = 0;
uint32_t n;
vrp->vrp_continue = 0;
n = vrp->vrp_vcpu_id;
for (;;) {
@ -1917,8 +1916,6 @@ vcpu_exit(struct vm_run_params *vrp)
__progname, vrp->vrp_exit_reason);
}
vrp->vrp_continue = 1;
return (0);
}