sync code with last improvements from OpenBSD

This commit is contained in:
purplerain 2023-09-13 16:41:13 +00:00
parent 256236394b
commit 6b03483410
Signed by: purplerain
GPG Key ID: F42C07F07E2E35B7
31 changed files with 409 additions and 280 deletions

View File

@ -1,9 +1,26 @@
.\" $OpenBSD: EVP_PKEY_CTX_ctrl.3,v 1.22 2019/11/01 13:53:25 schwarze Exp $ .\" $OpenBSD: EVP_PKEY_CTX_ctrl.3,v 1.23 2023/09/13 13:32:01 schwarze Exp $
.\" full merge up to: OpenSSL 99d63d46 Oct 26 13:56:48 2016 -0400 .\" full merge up to: OpenSSL 99d63d46 Oct 26 13:56:48 2016 -0400
.\" selective merge up to: OpenSSL df75c2bf Dec 9 01:02:36 2018 +0100 .\" selective merge up to: OpenSSL 24a535ea Sep 22 13:14:20 2020 +0100
.\" Parts were split out into RSA_pkey_ctx_ctrl(3). .\" Parts were split out into RSA_pkey_ctx_ctrl(3).
.\" .\"
.\" This file was written by Dr. Stephen Henson <steve@openssl.org> .\" This file is a derived work.
.\" The changes are covered by the following Copyright and license:
.\"
.\" Copyright (c) 2019, 2023 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
.\" copyright notice and this permission notice appear in all copies.
.\"
.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.\" The original file was written by Dr. Stephen Henson <steve@openssl.org>
.\" and Antoine Salon <asalon@vmware.com>. .\" and Antoine Salon <asalon@vmware.com>.
.\" Copyright (c) 2006, 2009, 2013, 2014, 2015, 2018 The OpenSSL Project. .\" Copyright (c) 2006, 2009, 2013, 2014, 2015, 2018 The OpenSSL Project.
.\" All rights reserved. .\" All rights reserved.
@ -52,7 +69,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
.\" OF THE POSSIBILITY OF SUCH DAMAGE. .\" OF THE POSSIBILITY OF SUCH DAMAGE.
.\" .\"
.Dd $Mdocdate: November 1 2019 $ .Dd $Mdocdate: September 13 2023 $
.Dt EVP_PKEY_CTX_CTRL 3 .Dt EVP_PKEY_CTX_CTRL 3
.Os .Os
.Sh NAME .Sh NAME
@ -254,6 +271,20 @@ If the key is of the type
.Dv EVP_PKEY_RSA_PSS .Dv EVP_PKEY_RSA_PSS
and has usage restrictions, an error occurs if an attempt is made and has usage restrictions, an error occurs if an attempt is made
to set the digest to anything other than the restricted value. to set the digest to anything other than the restricted value.
.Pp
These two macros expand to
.Fn EVP_PKEY_CTX_ctrl
with an
.Fa optype
of
.Dv EVP_PKEY_OP_TYPE_SIG
and the following command arguments:
.Pp
.Bl -column -compact EVP_PKEY_CTRL_GET_MD EVP_PKEY_CTX_get_signature_md()
.It Fa cmd No constant Ta corresponding macro
.It Dv EVP_PKEY_CTRL_MD Ta Fn EVP_PKEY_CTX_set_signature_md
.It Dv EVP_PKEY_CTRL_GET_MD Ta Fn EVP_PKEY_CTX_get_signature_md
.El
.Ss DSA parameters .Ss DSA parameters
The macro The macro
.Fn EVP_PKEY_CTX_set_dsa_paramgen_bits .Fn EVP_PKEY_CTX_set_dsa_paramgen_bits

View File

@ -1,4 +1,4 @@
.\" $OpenBSD: EVP_PKEY_CTX_set_hkdf_md.3,v 1.2 2022/05/06 10:10:10 tb Exp $ .\" $OpenBSD: EVP_PKEY_CTX_set_hkdf_md.3,v 1.3 2023/09/13 13:46:52 schwarze Exp $
.\" full merge up to: OpenSSL 1cb7eff4 Sep 10 13:56:40 2019 +0100 .\" full merge up to: OpenSSL 1cb7eff4 Sep 10 13:56:40 2019 +0100
.\" .\"
.\" This file was written by Alessandro Ghedini <alessandro@ghedini.me>, .\" This file was written by Alessandro Ghedini <alessandro@ghedini.me>,
@ -49,7 +49,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
.\" OF THE POSSIBILITY OF SUCH DAMAGE. .\" OF THE POSSIBILITY OF SUCH DAMAGE.
.\" .\"
.Dd $Mdocdate: May 6 2022 $ .Dd $Mdocdate: September 13 2023 $
.Dt EVP_PKEY_CTX_SET_HKDF_MD 3 .Dt EVP_PKEY_CTX_SET_HKDF_MD 3
.Os .Os
.Sh NAME .Sh NAME
@ -90,7 +90,9 @@
.Fa "int infolen" .Fa "int infolen"
.Fc .Fc
.Sh DESCRIPTION .Sh DESCRIPTION
The EVP_PKEY_HKDF algorithm implements the HKDF key derivation function. The
.Dv EVP_PKEY_HKDF
algorithm implements the HKDF key derivation function.
HKDF follows the "extract-then-expand" paradigm, where the KDF logically HKDF follows the "extract-then-expand" paradigm, where the KDF logically
consists of two modules. consists of two modules.
The first stage takes the input keying material and "extracts" from it a The first stage takes the input keying material and "extracts" from it a
@ -106,7 +108,9 @@ There are three modes that are currently defined:
This is the default mode. This is the default mode.
Calling Calling
.Xr EVP_PKEY_derive 3 .Xr EVP_PKEY_derive 3
on an EVP_PKEY_CTX set up for HKDF will perform an extract followed by on an
.Vt EVP_PKEY_CTX
set up for HKDF will perform an extract followed by
an expand operation in one go. an expand operation in one go.
The derived key returned will be the result after the expand operation. The derived key returned will be the result after the expand operation.
The intermediate fixed-length pseudorandom key K is not returned. The intermediate fixed-length pseudorandom key K is not returned.

View File

@ -1,4 +1,4 @@
.\" $OpenBSD: EVP_PKEY_asn1_get_count.3,v 1.6 2023/08/27 13:23:12 schwarze Exp $ .\" $OpenBSD: EVP_PKEY_asn1_get_count.3,v 1.7 2023/09/13 13:55:50 schwarze Exp $
.\" full merge up to: OpenSSL 72a7a702 Feb 26 14:05:09 2019 +0000 .\" full merge up to: OpenSSL 72a7a702 Feb 26 14:05:09 2019 +0000
.\" .\"
.\" This file is a derived work. .\" This file is a derived work.
@ -65,7 +65,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
.\" OF THE POSSIBILITY OF SUCH DAMAGE. .\" OF THE POSSIBILITY OF SUCH DAMAGE.
.\" .\"
.Dd $Mdocdate: August 27 2023 $ .Dd $Mdocdate: September 13 2023 $
.Dt EVP_PKEY_ASN1_GET_COUNT 3 .Dt EVP_PKEY_ASN1_GET_COUNT 3
.Os .Os
.Sh NAME .Sh NAME
@ -169,9 +169,55 @@ retrieves the public key ID as returned by
the base public key ID as returned by the base public key ID as returned by
.Xr EVP_PKEY_base_id 3 .Xr EVP_PKEY_base_id 3
.Pq both NIDs , .Pq both NIDs ,
any flags, the method description, any flags, and internal pointers owned by
and the PEM type string associated with .Fa ameth
.Fa ameth . pointing to its method description string and its PEM type string.
.Pp
The following flags bits can occur, OR'ed together in
.Pf * Fa ppkey_flags :
.Bl -tag -width Ds
.It Dv ASN1_PKEY_ALIAS
This
.Fa ameth
object serves as an alias for another
.Vt EVP_PKEY_ASN1_METHOD
object and will never be returned from
.Fn EVP_PKEY_asn1_find
or
.Fn EVP_PKEY_asn1_find_str .
It is either an alias built into the library, or it was created with
.Xr EVP_PKEY_asn1_add_alias 3 .
.It Dv ASN1_PKEY_DYNAMIC
This
.Fa ameth
object is marked as dynamically allocated.
If this flag is set,
.Xr EVP_PKEY_asn1_free 3
can free
.Fa ameth ;
otherwise,
.Xr EVP_PKEY_asn1_free 3
has no effect on it.
.It Dv ASN1_PKEY_SIGPARAM_NULL
If the signing
.Fa ctx
uses an
.Vt EVP_PKEY
private key associated with this
.Fa ameth ,
instruct
.Xr ASN1_item_sign_ctx 3
to use a parameter type of
.Dv V_ASN1_NULL
instead of the default
.Dv V_ASN1_UNDEF
when encoding the ASN.1
.Vt AlgorithmIdentifier
objects with
.Xr X509_ALGOR_set0 3 .
In particular, this is used for
.Dv EVP_PKEY_RSA .
.El
.Pp .Pp
.Fn EVP_PKEY_asn1_get_count , .Fn EVP_PKEY_asn1_get_count ,
.Fn EVP_PKEY_asn1_get0 , .Fn EVP_PKEY_asn1_get0 ,

View File

@ -1,8 +1,25 @@
.\" $OpenBSD: EVP_PKEY_asn1_new.3,v 1.9 2023/08/27 13:15:29 schwarze Exp $ .\" $OpenBSD: EVP_PKEY_asn1_new.3,v 1.10 2023/09/13 14:18:21 schwarze Exp $
.\" selective merge up to: .\" selective merge up to:
.\" OpenSSL man3/EVP_PKEY_ASN1_METHOD b0004708 Nov 1 00:45:24 2017 +0800 .\" OpenSSL man3/EVP_PKEY_ASN1_METHOD b0004708 Nov 1 00:45:24 2017 +0800
.\" .\"
.\" This file was written by Richard Levitte <levitte@openssl.org> .\" This file is a derived work.
.\" The changes are covered by the following Copyright and license:
.\"
.\" Copyright (c) 2023 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
.\" copyright notice and this permission notice appear in all copies.
.\"
.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.\" The original file was written by Richard Levitte <levitte@openssl.org>
.\" and Paul Yang <yang.yang@baishancloud.com>. .\" and Paul Yang <yang.yang@baishancloud.com>.
.\" Copyright (c) 2017 The OpenSSL Project. All rights reserved. .\" Copyright (c) 2017 The OpenSSL Project. All rights reserved.
.\" .\"
@ -50,7 +67,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
.\" OF THE POSSIBILITY OF SUCH DAMAGE. .\" OF THE POSSIBILITY OF SUCH DAMAGE.
.\" .\"
.Dd $Mdocdate: August 27 2023 $ .Dd $Mdocdate: September 13 2023 $
.Dt EVP_PKEY_ASN1_NEW 3 .Dt EVP_PKEY_ASN1_NEW 3
.Os .Os
.Sh NAME .Sh NAME
@ -345,11 +362,34 @@ and
.Ed .Ed
.Pp .Pp
Add extra algorithm specific control. Add extra algorithm specific control.
This method is called by .Pp
.Xr EVP_PKEY_get_default_digest_nid 3 , If the
.Fa op
argument is
.Dv ASN1_PKEY_CTRL_DEFAULT_MD_NID ,
the
.Fa pkey_ctrl
method is supposed to write the message digest NID
for public key signature operations with the given
.Fa pkey
to
.Pf * Fa arg2
as documented in the
.Xr EVP_PKEY_get_default_digest_nid 3
manual page.
.Pp
The
.Fa pkey_ctrl
method is also called by
.Fn PKCS7_SIGNER_INFO_set , .Fn PKCS7_SIGNER_INFO_set ,
.Fn PKCS7_RECIP_INFO_set , .Fn PKCS7_RECIP_INFO_set ,
and other functions. and other functions.
.\" TODO:
.\" ASN1_PKEY_CTRL_CMS_ENVELOPE in cms_env.c rsa_ameth.c
.\" ASN1_PKEY_CTRL_CMS_RI_TYPE in cms_env.c dsa_ameth.c ec_ameth.c rsa_ameth.c
.\" ASN1_PKEY_CTRL_CMS_SIGN in cms_sd.c dsa_ameth.c ec_ameth.c rsa_ameth.c
.\" ASN1_PKEY_CTRL_PKCS7_ENCRYPT in pk7_lib.c rsa_ameth.c
.\" ASN1_PKEY_CTRL_PKCS7_SIGN in pk7_lib.c dsa_ameth.c ec_ameth.c rsa_ameth.c
.Bd -unfilled .Bd -unfilled
.Ft int Fn (*pkey_check) "const EVP_PKEY *pk" .Ft int Fn (*pkey_check) "const EVP_PKEY *pk"
.Ft int Fn (*pkey_public_check) "const EVP_PKEY *pk" .Ft int Fn (*pkey_public_check) "const EVP_PKEY *pk"
@ -378,7 +418,7 @@ It is called by
.Fn EVP_PKEY_asn1_new .Fn EVP_PKEY_asn1_new
creates and returns a new creates and returns a new
.Vt EVP_PKEY_ASN1_METHOD .Vt EVP_PKEY_ASN1_METHOD
object, and associates the given object, marks it as dynamically allocated, and associates the given
.Fa id , .Fa id ,
.Fa flags , .Fa flags ,
.Fa pem_str .Fa pem_str
@ -404,20 +444,28 @@ See
for more information. for more information.
.Pp .Pp
.Fn EVP_PKEY_asn1_copy .Fn EVP_PKEY_asn1_copy
copies an copies all function pointers from
.Vt EVP_PKEY_ASN1_METHOD
object from
.Fa src .Fa src
to to
.Fa dst . .Fa dst .
The data in
.Fa dst
that can be set with
.Fn EVP_PKEY_asn1_new
\(em NIDs, flags, and strings \(em
remains unchanged.
This function is not thread safe, it is recommended to only use this when This function is not thread safe, it is recommended to only use this when
initializing the application. initializing the application.
.Pp .Pp
.Fn EVP_PKEY_asn1_free .Fn EVP_PKEY_asn1_free
frees an existing frees the dynamically allocated
.Vt EVP_PKEY_ASN1_METHOD .Fa ameth
pointed by including all memory it refers to.
.Fa ameth . If
.Fa ameth
is
.Dv NULL
of not marked as dynamically allocated, no action occurs.
.Pp .Pp
.Fn EVP_PKEY_asn1_add0 .Fn EVP_PKEY_asn1_add0
adds adds
@ -456,11 +504,11 @@ set the diverse methods of the given
object. object.
.Sh RETURN VALUES .Sh RETURN VALUES
.Fn EVP_PKEY_asn1_new .Fn EVP_PKEY_asn1_new
returns a pointer to an returns a pointer to the new
.Vt EVP_PKEY_ASN1_METHOD .Vt EVP_PKEY_ASN1_METHOD
object or object or
.Dv NULL .Dv NULL
on error. if memory allocation fails.
.Pp .Pp
.Fn EVP_PKEY_asn1_add0 .Fn EVP_PKEY_asn1_add0
and and

View File

@ -1,8 +1,26 @@
.\" $OpenBSD: EVP_PKEY_get_default_digest_nid.3,v 1.5 2019/06/06 01:06:58 schwarze Exp $ .\" $OpenBSD: EVP_PKEY_get_default_digest_nid.3,v 1.7 2023/09/13 14:57:21 schwarze Exp $
.\" OpenSSL 99d63d46 Oct 26 13:56:48 2016 -0400 .\" full merge up to: OpenSSL df75c2bf Dec 9 01:02:36 2018 +0100
.\" .\"
.\" This file was written by Dr. Stephen Henson <steve@openssl.org>. .\" This file is a derived work.
.\" Copyright (c) 2006, 2009, 2013 The OpenSSL Project. All rights reserved. .\" The changes are covered by the following Copyright and license:
.\"
.\" Copyright (c) 2023 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
.\" copyright notice and this permission notice appear in all copies.
.\"
.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.\" The original file was written by Dr. Stephen Henson <steve@openssl.org>.
.\" Copyright (c) 2006, 2009, 2013, 2018 The OpenSSL Project.
.\" All rights reserved.
.\" .\"
.\" Redistribution and use in source and binary forms, with or without .\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions .\" modification, are permitted provided that the following conditions
@ -48,7 +66,7 @@
.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
.\" OF THE POSSIBILITY OF SUCH DAMAGE. .\" OF THE POSSIBILITY OF SUCH DAMAGE.
.\" .\"
.Dd $Mdocdate: June 6 2019 $ .Dd $Mdocdate: September 13 2023 $
.Dt EVP_PKEY_GET_DEFAULT_DIGEST_NID 3 .Dt EVP_PKEY_GET_DEFAULT_DIGEST_NID 3
.Os .Os
.Sh NAME .Sh NAME
@ -65,12 +83,31 @@
The The
.Fn EVP_PKEY_get_default_digest_nid .Fn EVP_PKEY_get_default_digest_nid
function sets function sets
.Fa pnid .Pf * Fa pnid
to the default message digest NID for the public key signature to the default message digest NID for the public key signature
operations associated with key operations associated with
.Fa pkey . .Fa pkey .
.Pp .Pp
For all current standard OpenSSL public key algorithms, SHA1 is returned. Some signature algorithms, for example
.Dv EVP_PKEY_ED25519 ,
do not use a digest during signing.
In this case,
.Pf * Fa pnid
is set to
.Dv NID_undef .
.Pp
Support for the following public key algorithms is built into the library:
.Pp
.Bl -column -compact EVP_PKEY_base_id(3) NID_id_Gost28147_89_MAC mandatory
.It Xr EVP_PKEY_base_id 3 Ta Pf * Fa pnid Ta return value
.It Dv EVP_PKEY_DSA Ta Dv NID_sha1 Ta mandatory
.It Dv EVP_PKEY_EC Ta Dv NID_sha1 Ta mandatory
.It Dv EVP_PKEY_ED25519 Ta Dv NID_undef Ta mandatory
.It Dv EVP_PKEY_GOSTIMIT Ta Dv NID_id_Gost28147_89_MAC Ta mandatory
.It Dv EVP_PKEY_GOSTR01 Ta variable Ta mandatory
.It Dv EVP_PKEY_HMAC Ta Dv NID_sha1 Ta advisory
.It Dv EVP_PKEY_RSA Ta Dv NID_sha256 Ta advisory
.El
.Sh RETURN VALUES .Sh RETURN VALUES
The The
.Fn EVP_PKEY_get_default_digest_nid .Fn EVP_PKEY_get_default_digest_nid

View File

@ -1574,17 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2; u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp; u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
&bridge_cfg); pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1); tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >> max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@ -1637,21 +1628,14 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
drm_msleep(100); drm_msleep(100);
/* linkctl */ /* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
&tmp16); PCI_EXP_LNKCTL_HAWD,
tmp16 &= ~PCI_EXP_LNKCTL_HAWD; bridge_cfg &
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16); PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL_HAWD);
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(adev->pdev,
PCI_EXP_LNKCTL,
tmp16);
/* linkctl2 */ /* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2, pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View File

@ -1229,6 +1229,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
u16 cmd; u16 cmd;
int r; int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
/* Bypass for VF */ /* Bypass for VF */
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return 0; return 0;

View File

@ -558,6 +558,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
crtc = (struct drm_crtc *)minfo->crtcs[i]; crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) { if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ui32 = amdgpu_crtc->crtc_id; ui32 = amdgpu_crtc->crtc_id;
found = 1; found = 1;
break; break;
@ -576,7 +577,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (ret) if (ret)
return ret; return ret;
ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
return ret ? -EFAULT : 0; return ret ? -EFAULT : 0;
} }
case AMDGPU_INFO_HW_IP_COUNT: { case AMDGPU_INFO_HW_IP_COUNT: {
@ -722,17 +723,18 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
? -EFAULT : 0; ? -EFAULT : 0;
} }
case AMDGPU_INFO_READ_MMR_REG: { case AMDGPU_INFO_READ_MMR_REG: {
unsigned n, alloc_size; unsigned int n, alloc_size;
uint32_t *regs; uint32_t *regs;
unsigned se_num = (info->read_mmr_reg.instance >> unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK; AMDGPU_INFO_MMR_SE_INDEX_MASK;
unsigned sh_num = (info->read_mmr_reg.instance >> unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK; AMDGPU_INFO_MMR_SH_INDEX_MASK;
/* set full masks if the userspace set all bits /* set full masks if the userspace set all bits
* in the bitfields */ * in the bitfields
*/
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff; se_num = 0xffffffff;
else if (se_num >= AMDGPU_GFX_MAX_SE) else if (se_num >= AMDGPU_GFX_MAX_SE)
@ -856,7 +858,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return ret; return ret;
} }
case AMDGPU_INFO_VCE_CLOCK_TABLE: { case AMDGPU_INFO_VCE_CLOCK_TABLE: {
unsigned i; unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state; struct amd_vce_state *vce_state;

View File

@ -2276,17 +2276,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2; u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp; u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
&bridge_cfg); pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1); tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -2331,21 +2322,14 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
mdelay(100); mdelay(100);
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
&tmp16); PCI_EXP_LNKCTL_HAWD,
tmp16 &= ~PCI_EXP_LNKCTL_HAWD; bridge_cfg &
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
tmp16); PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL_HAWD);
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(adev->pdev,
PCI_EXP_LNKCTL,
tmp16);
pcie_capability_read_word(root, PCI_EXP_LNKCTL2, pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
&tmp16); &tmp16);

View File

@ -147,14 +147,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret; int ret;
int retry_loop; int retry_loop;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
for (retry_loop = 0; retry_loop < 10; retry_loop++) { for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is ret = psp_wait_for(
ready having bit 31 of C2PMSG_35 set to 1 */ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
ret = psp_wait_for(psp, 0x80000000, 0xffffffff, false);
SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
if (ret == 0) if (ret == 0)
return 0; return 0;

View File

@ -5923,8 +5923,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/ */
DRM_DEBUG_DRIVER("No preferred mode found\n"); DRM_DEBUG_DRIVER("No preferred mode found\n");
} else { } else {
recalculate_timing = amdgpu_freesync_vid_mode && recalculate_timing = is_freesync_video_mode(&mode, aconnector);
is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) { if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false); freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode); drm_mode_copy(&saved_mode, &mode);
@ -7018,7 +7017,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector = struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector); to_amdgpu_dm_connector(connector);
if (!(amdgpu_freesync_vid_mode && edid)) if (!edid)
return; return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@ -7863,10 +7862,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* fast updates. * fast updates.
*/ */
if (crtc->state->async_flip && if (crtc->state->async_flip &&
acrtc_state->update_type != UPDATE_TYPE_FAST) (acrtc_state->update_type != UPDATE_TYPE_FAST ||
get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
drm_warn_once(state->dev, drm_warn_once(state->dev,
"[PLANE:%d:%s] async flip with non-fast update\n", "[PLANE:%d:%s] async flip with non-fast update\n",
plane->base.id, plane->name); plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate = bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip && crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST && acrtc_state->update_type == UPDATE_TYPE_FAST &&
@ -9026,8 +9027,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work * TODO: Refactor this function to allow this check to work
* in all conditions. * in all conditions.
*/ */
if (amdgpu_freesync_vid_mode && if (dm_new_crtc_state->stream &&
dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset; goto skip_modeset;
@ -9067,7 +9067,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
} }
/* Now check if we should set freesync video mode */ /* Now check if we should set freesync video mode */
if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && if (dm_new_crtc_state->stream &&
dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
is_timing_unchanged_for_freesync(new_crtc_state, is_timing_unchanged_for_freesync(new_crtc_state,
@ -9080,7 +9080,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state); set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset; goto skip_modeset;
} else if (amdgpu_freesync_vid_mode && aconnector && } else if (aconnector &&
is_freesync_video_mode(&new_crtc_state->mode, is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) { aconnector)) {
struct drm_display_mode *high_mode; struct drm_display_mode *high_mode;
@ -9819,6 +9819,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */ /* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
if (old_plane_state->fb && new_plane_state->fb &&
get_mem_type(old_plane_state->fb) !=
get_mem_type(new_plane_state->fb))
lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane, ret = dm_update_plane_state(dc, state, plane,
old_plane_state, old_plane_state,
new_plane_state, new_plane_state,
@ -10070,9 +10075,20 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_new_crtc_state = struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state); to_dm_crtc_state(new_crtc_state);
/*
* Only allow async flips for fast updates that don't change
* the FB pitch, the DCC state, rotation, etc.
*/
if (new_crtc_state->async_flip && lock_and_validation_needed) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
ret = -EINVAL;
goto fail;
}
dm_new_crtc_state->update_type = lock_and_validation_needed ? dm_new_crtc_state->update_type = lock_and_validation_needed ?
UPDATE_TYPE_FULL : UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
UPDATE_TYPE_FAST;
} }
/* Must be success */ /* Must be success */

View File

@ -406,18 +406,6 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */ /* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream) if (!dm_crtc_state->stream)
return 0; return 0;

View File

@ -32,6 +32,7 @@
#define MAX_INSTANCE 6 #define MAX_INSTANCE 6
#define MAX_SEGMENT 6 #define MAX_SEGMENT 6
#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE struct IP_BASE_INSTANCE
{ {
@ -134,6 +135,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param) unsigned int msg_id, unsigned int param)
{ {
uint32_t result; uint32_t result;
uint32_t i = 0;
uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@ -150,10 +153,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */ /* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param); REG_WRITE(MP1_SMN_C2PMSG_37, param);
/* Trigger the message transaction by writing the message ID */ for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
generic_write_indirect_reg(CTX, /* Trigger the message transaction by writing the message ID */
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA), generic_write_indirect_reg(CTX,
mmMP1_C2PMSG_3, msg_id); REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3, msg_id);
read_back_data = generic_read_indirect_reg(CTX,
REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
mmMP1_C2PMSG_3);
if (read_back_data == msg_id)
break;
udelay(2);
smu_print("SMU msg id write fail %x times. \n", i + 1);
}
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000); result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);

View File

@ -1813,10 +1813,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false); hws->funcs.edp_backlight_control(edp_link_with_sink, false);
} }
/*resume from S3, no vbios posting, no need to power down again*/ /*resume from S3, no vbios posting, no need to power down again*/
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc); power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc); disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on) if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false); dc->hwss.edp_power_control(edp_link_with_sink, false);
clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
} }
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
} }

View File

@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state, .get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits, .clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control, .edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position, .set_cursor_position = dcn10_set_cursor_position,

View File

@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn, struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src) enum phyd32clk_clock_source src)
{ {
if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC) if (src == PHYD32CLKC)
src = PHYD32CLKF; src = PHYD32CLKF;
if (src == PHYD32CLKD) if (src == PHYD32CLKD)

View File

@ -32,7 +32,7 @@
#include "dml/display_mode_vba.h" #include "dml/display_mode_vba.h"
struct _vcs_dpi_ip_params_st dcn3_14_ip = { struct _vcs_dpi_ip_params_st dcn3_14_ip = {
.VBlankNomDefaultUS = 800, .VBlankNomDefaultUS = 668,
.gpuvm_enable = 1, .gpuvm_enable = 1,
.gpuvm_max_page_table_levels = 1, .gpuvm_max_page_table_levels = 1,
.hostvm_enable = 1, .hostvm_enable = 1,

View File

@ -2074,15 +2074,19 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
uint32_t mask, struct list_head *attr_list) uint32_t mask, struct list_head *attr_list)
{ {
int ret = 0; int ret = 0;
struct device_attribute *dev_attr = &attr->dev_attr;
const char *name = dev_attr->attr.name;
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
struct amdgpu_device_attr_entry *attr_entry; struct amdgpu_device_attr_entry *attr_entry;
struct device_attribute *dev_attr;
const char *name;
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
BUG_ON(!attr); if (!attr)
return -EINVAL;
dev_attr = &attr->dev_attr;
name = dev_attr->attr.name;
attr_update = attr->attr_update ? attr->attr_update : default_attr_update; attr_update = attr->attr_update ? attr->attr_update : default_attr_update;

View File

@ -1307,7 +1307,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];

View File

@ -2,7 +2,7 @@
#define _DRM_DEVICE_H_ #define _DRM_DEVICE_H_
#include <sys/types.h> #include <sys/types.h>
#include <sys/selinfo.h> #include <sys/event.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kref.h> #include <linux/kref.h>

View File

@ -1,4 +1,4 @@
/* $OpenBSD: pci.h,v 1.13 2023/01/01 01:34:58 jsg Exp $ */ /* $OpenBSD: pci.h,v 1.14 2023/09/13 12:31:49 jsg Exp $ */
/* /*
* Copyright (c) 2015 Mark Kettenis * Copyright (c) 2015 Mark Kettenis
* *
@ -305,6 +305,27 @@ pcie_capability_write_word(struct pci_dev *pdev, int off, u16 val)
return 0; return 0;
} }
static inline int
pcie_capability_set_word(struct pci_dev *pdev, int off, u16 val)
{
u16 r;
pcie_capability_read_word(pdev, off, &r);
r |= val;
pcie_capability_write_word(pdev, off, r);
return 0;
}
static inline int
pcie_capability_clear_and_set_word(struct pci_dev *pdev, int off, u16 c, u16 s)
{
u16 r;
pcie_capability_read_word(pdev, off, &r);
r &= ~c;
r |= s;
pcie_capability_write_word(pdev, off, r);
return 0;
}
static inline int static inline int
pcie_get_readrq(struct pci_dev *pdev) pcie_get_readrq(struct pci_dev *pdev)
{ {

View File

@ -9536,17 +9536,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2; u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp; u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
&bridge_cfg); pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
tmp16);
tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1); tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -9593,21 +9584,14 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
drm_msleep(100); drm_msleep(100);
/* linkctl */ /* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
&tmp16); PCI_EXP_LNKCTL_HAWD,
tmp16 &= ~PCI_EXP_LNKCTL_HAWD; bridge_cfg &
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
tmp16); PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL_HAWD);
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(rdev->pdev,
PCI_EXP_LNKCTL,
tmp16);
/* linkctl2 */ /* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2, pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View File

@ -1500,7 +1500,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
} else if (of_machine_is_compatible("PowerMac3,5")) { } else if (of_machine_is_compatible("PowerMac3,5")) {
/* PowerMac G4 Silver radeon 7500 */ /* PowerMac G4 Silver radeon 7500 */
rdev->mode_info.connector_table = CT_MAC_G4_SILVER; rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
} else if (of_machine_is_compatible("PowerMac4,4")) { } else if (of_machine_is_compatible("PowerMac4,4") ||
of_machine_is_compatible("PowerMac6,4")) {
/* emac */ /* emac */
rdev->mode_info.connector_table = CT_EMAC; rdev->mode_info.connector_table = CT_EMAC;
} else if (of_machine_is_compatible("PowerMac10,1")) { } else if (of_machine_is_compatible("PowerMac10,1")) {

View File

@ -7133,17 +7133,8 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
u16 bridge_cfg2, gpu_cfg2; u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp; u32 max_lw, current_lw, tmp;
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
&bridge_cfg); pcie_capability_set_word(rdev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
&gpu_cfg);
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
tmp16);
tmp = RREG32_PCIE(PCIE_LC_STATUS1); tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT; max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@ -7190,22 +7181,14 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
drm_msleep(100); drm_msleep(100);
/* linkctl */ /* linkctl */
pcie_capability_read_word(root, PCI_EXP_LNKCTL, pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
&tmp16); PCI_EXP_LNKCTL_HAWD,
tmp16 &= ~PCI_EXP_LNKCTL_HAWD; bridge_cfg &
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD); PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(root, pcie_capability_clear_and_set_word(rdev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD,
tmp16); gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_read_word(rdev->pdev,
PCI_EXP_LNKCTL,
&tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pcie_capability_write_word(rdev->pdev,
PCI_EXP_LNKCTL,
tmp16);
/* linkctl2 */ /* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2, pcie_capability_read_word(root, PCI_EXP_LNKCTL2,

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_exit.c,v 1.214 2023/09/08 09:06:31 claudio Exp $ */ /* $OpenBSD: kern_exit.c,v 1.215 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */
/* /*
@ -119,7 +119,7 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
struct process *pr, *qr, *nqr; struct process *pr, *qr, *nqr;
struct rusage *rup; struct rusage *rup;
struct timespec ts; struct timespec ts;
int s, wake; int s;
atomic_setbits_int(&p->p_flag, P_WEXIT); atomic_setbits_int(&p->p_flag, P_WEXIT);
@ -157,22 +157,14 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
} }
/* unlink ourselves from the active threads */ /* unlink ourselves from the active threads */
mtx_enter(&pr->ps_mtx); SCHED_LOCK(s);
TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link); TAILQ_REMOVE(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt--; SCHED_UNLOCK(s);
wake = (pr->ps_single && pr->ps_singlecnt == pr->ps_threadcnt);
mtx_leave(&pr->ps_mtx);
if (wake)
wakeup(&pr->ps_singlecnt);
if ((p->p_flag & P_THREAD) == 0) { if ((p->p_flag & P_THREAD) == 0) {
/* main thread gotta wait because it has the pid, et al */ /* main thread gotta wait because it has the pid, et al */
mtx_enter(&pr->ps_mtx); while (pr->ps_threadcnt > 1)
while (pr->ps_threadcnt > 0) tsleep_nsec(&pr->ps_threads, PWAIT, "thrdeath", INFSLP);
msleep_nsec(&pr->ps_threads, &pr->ps_mtx, PWAIT,
"thrdeath", INFSLP);
mtx_leave(&pr->ps_mtx);
if (pr->ps_flags & PS_PROFIL) if (pr->ps_flags & PS_PROFIL)
stopprofclock(pr); stopprofclock(pr);
} }
@ -345,10 +337,9 @@ exit1(struct proc *p, int xexit, int xsig, int flags)
/* just a thread? detach it from its process */ /* just a thread? detach it from its process */
if (p->p_flag & P_THREAD) { if (p->p_flag & P_THREAD) {
/* scheduler_wait_hook(pr->ps_mainproc, p); XXX */ /* scheduler_wait_hook(pr->ps_mainproc, p); XXX */
mtx_enter(&pr->ps_mtx); if (--pr->ps_threadcnt == 1)
if (pr->ps_threadcnt == 0)
wakeup(&pr->ps_threads); wakeup(&pr->ps_threads);
mtx_leave(&pr->ps_mtx); KASSERT(pr->ps_threadcnt > 0);
} }
/* Release the thread's read reference of resource limit structure. */ /* Release the thread's read reference of resource limit structure. */
@ -832,7 +823,7 @@ process_zap(struct process *pr)
if (otvp) if (otvp)
vrele(otvp); vrele(otvp);
KASSERT(pr->ps_threadcnt == 0); KASSERT(pr->ps_threadcnt == 1);
if (pr->ps_ptstat != NULL) if (pr->ps_ptstat != NULL)
free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat)); free(pr->ps_ptstat, M_SUBPROC, sizeof(*pr->ps_ptstat));
pool_put(&rusage_pool, pr->ps_ru); pool_put(&rusage_pool, pr->ps_ru);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_fork.c,v 1.251 2023/09/08 09:06:31 claudio Exp $ */ /* $OpenBSD: kern_fork.c,v 1.252 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */ /* $NetBSD: kern_fork.c,v 1.29 1996/02/09 18:59:34 christos Exp $ */
/* /*
@ -519,7 +519,7 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
struct proc *p; struct proc *p;
pid_t tid; pid_t tid;
vaddr_t uaddr; vaddr_t uaddr;
int error; int s, error;
if (stack == NULL) if (stack == NULL)
return EINVAL; return EINVAL;
@ -543,6 +543,7 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
/* other links */ /* other links */
p->p_p = pr; p->p_p = pr;
pr->ps_threadcnt++;
/* local copies */ /* local copies */
p->p_fd = pr->ps_fd; p->p_fd = pr->ps_fd;
@ -561,17 +562,18 @@ thread_fork(struct proc *curp, void *stack, void *tcb, pid_t *tidptr,
LIST_INSERT_HEAD(&allproc, p, p_list); LIST_INSERT_HEAD(&allproc, p, p_list);
LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash); LIST_INSERT_HEAD(TIDHASH(p->p_tid), p, p_hash);
mtx_enter(&pr->ps_mtx); SCHED_LOCK(s);
TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link); TAILQ_INSERT_TAIL(&pr->ps_threads, p, p_thr_link);
pr->ps_threadcnt++;
/* /*
* if somebody else wants to take us to single threaded mode, * if somebody else wants to take us to single threaded mode,
* count ourselves in. * count ourselves in.
*/ */
if (pr->ps_single) if (pr->ps_single) {
atomic_inc_int(&pr->ps_singlecount);
atomic_setbits_int(&p->p_flag, P_SUSPSINGLE); atomic_setbits_int(&p->p_flag, P_SUSPSINGLE);
mtx_leave(&pr->ps_mtx); }
SCHED_UNLOCK(s);
/* /*
* Return tid to parent thread and copy it out to userspace * Return tid to parent thread and copy it out to userspace

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_resource.c,v 1.79 2023/09/08 09:06:31 claudio Exp $ */ /* $OpenBSD: kern_resource.c,v 1.80 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */ /* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*- /*-
@ -212,13 +212,11 @@ donice(struct proc *curp, struct process *chgpr, int n)
if (n < chgpr->ps_nice && suser(curp)) if (n < chgpr->ps_nice && suser(curp))
return (EACCES); return (EACCES);
chgpr->ps_nice = n; chgpr->ps_nice = n;
mtx_enter(&chgpr->ps_mtx); SCHED_LOCK(s);
TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) { TAILQ_FOREACH(p, &chgpr->ps_threads, p_thr_link) {
SCHED_LOCK(s);
setpriority(p, p->p_estcpu, n); setpriority(p, p->p_estcpu, n);
SCHED_UNLOCK(s);
} }
mtx_leave(&chgpr->ps_mtx); SCHED_UNLOCK(s);
return (0); return (0);
} }
@ -478,9 +476,8 @@ dogetrusage(struct proc *p, int who, struct rusage *rup)
struct process *pr = p->p_p; struct process *pr = p->p_p;
struct proc *q; struct proc *q;
KERNEL_ASSERT_LOCKED();
switch (who) { switch (who) {
case RUSAGE_SELF: case RUSAGE_SELF:
/* start with the sum of dead threads, if any */ /* start with the sum of dead threads, if any */
if (pr->ps_ru != NULL) if (pr->ps_ru != NULL)

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_sig.c,v 1.316 2023/09/09 14:50:09 claudio Exp $ */ /* $OpenBSD: kern_sig.c,v 1.317 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */
/* /*
@ -2008,12 +2008,11 @@ userret(struct proc *p)
} }
int int
single_thread_check_locked(struct proc *p, int deep) single_thread_check_locked(struct proc *p, int deep, int s)
{ {
struct process *pr = p->p_p; struct process *pr = p->p_p;
int s, wake;
MUTEX_ASSERT_LOCKED(&pr->ps_mtx); SCHED_ASSERT_LOCKED();
if (pr->ps_single == NULL || pr->ps_single == p) if (pr->ps_single == NULL || pr->ps_single == p)
return (0); return (0);
@ -2027,24 +2026,19 @@ single_thread_check_locked(struct proc *p, int deep)
return (EINTR); return (EINTR);
} }
if (atomic_dec_int_nv(&pr->ps_singlecount) == 0)
wakeup(&pr->ps_singlecount);
if (pr->ps_flags & PS_SINGLEEXIT) { if (pr->ps_flags & PS_SINGLEEXIT) {
mtx_leave(&pr->ps_mtx); SCHED_UNLOCK(s);
KERNEL_LOCK(); KERNEL_LOCK();
exit1(p, 0, 0, EXIT_THREAD_NOCHECK); exit1(p, 0, 0, EXIT_THREAD_NOCHECK);
/* NOTREACHED */ /* NOTREACHED */
} }
/* not exiting and don't need to unwind, so suspend */ /* not exiting and don't need to unwind, so suspend */
wake = (++pr->ps_singlecnt == pr->ps_threadcnt);
mtx_leave(&pr->ps_mtx);
if (wake)
wakeup(&pr->ps_singlecnt);
SCHED_LOCK(s);
p->p_stat = SSTOP; p->p_stat = SSTOP;
mi_switch(); mi_switch();
SCHED_UNLOCK(s);
mtx_enter(&pr->ps_mtx);
} while (pr->ps_single != NULL); } while (pr->ps_single != NULL);
return (0); return (0);
@ -2053,11 +2047,11 @@ single_thread_check_locked(struct proc *p, int deep)
int int
single_thread_check(struct proc *p, int deep) single_thread_check(struct proc *p, int deep)
{ {
int error; int s, error;
mtx_enter(&p->p_p->ps_mtx); SCHED_LOCK(s);
error = single_thread_check_locked(p, deep); error = single_thread_check_locked(p, deep, s);
mtx_leave(&p->p_p->ps_mtx); SCHED_UNLOCK(s);
return error; return error;
} }
@ -2077,14 +2071,13 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
struct process *pr = p->p_p; struct process *pr = p->p_p;
struct proc *q; struct proc *q;
int error, s; int error, s;
u_int count = 0;
KASSERT(curproc == p); KASSERT(curproc == p);
mtx_enter(&pr->ps_mtx); SCHED_LOCK(s);
error = single_thread_check_locked(p, (mode == SINGLE_UNWIND)); error = single_thread_check_locked(p, (mode == SINGLE_UNWIND), s);
if (error) { if (error) {
mtx_leave(&pr->ps_mtx); SCHED_UNLOCK(s);
return error; return error;
} }
@ -2103,25 +2096,26 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
panic("single_thread_mode = %d", mode); panic("single_thread_mode = %d", mode);
#endif #endif
} }
pr->ps_singlecnt = 1; /* count ourselfs in already */ pr->ps_singlecount = 0;
membar_producer();
pr->ps_single = p; pr->ps_single = p;
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p) if (q == p)
continue; continue;
if (q->p_flag & P_WEXIT) { if (q->p_flag & P_WEXIT) {
SCHED_LOCK(s); if (mode == SINGLE_EXIT) {
if (mode == SINGLE_EXIT && q->p_stat == SSTOP) if (q->p_stat == SSTOP) {
setrunnable(q); setrunnable(q);
SCHED_UNLOCK(s); atomic_inc_int(&pr->ps_singlecount);
}
}
continue; continue;
} }
SCHED_LOCK(s);
atomic_setbits_int(&q->p_flag, P_SUSPSINGLE); atomic_setbits_int(&q->p_flag, P_SUSPSINGLE);
switch (q->p_stat) { switch (q->p_stat) {
case SIDL: case SIDL:
case SDEAD:
case SRUN: case SRUN:
atomic_inc_int(&pr->ps_singlecount);
break; break;
case SSLEEP: case SSLEEP:
/* if it's not interruptible, then just have to wait */ /* if it's not interruptible, then just have to wait */
@ -2129,29 +2123,28 @@ single_thread_set(struct proc *p, enum single_thread_mode mode, int wait)
/* merely need to suspend? just stop it */ /* merely need to suspend? just stop it */
if (mode == SINGLE_SUSPEND) { if (mode == SINGLE_SUSPEND) {
q->p_stat = SSTOP; q->p_stat = SSTOP;
count++;
break; break;
} }
/* need to unwind or exit, so wake it */ /* need to unwind or exit, so wake it */
setrunnable(q); setrunnable(q);
} }
atomic_inc_int(&pr->ps_singlecount);
break; break;
case SSTOP: case SSTOP:
if (mode == SINGLE_EXIT) { if (mode == SINGLE_EXIT) {
setrunnable(q); setrunnable(q);
break; atomic_inc_int(&pr->ps_singlecount);
} }
count++; break;
case SDEAD:
break; break;
case SONPROC: case SONPROC:
atomic_inc_int(&pr->ps_singlecount);
signotify(q); signotify(q);
break; break;
} }
SCHED_UNLOCK(s);
} }
SCHED_UNLOCK(s);
pr->ps_singlecnt += count;
mtx_leave(&pr->ps_mtx);
if (wait) if (wait)
single_thread_wait(pr, 1); single_thread_wait(pr, 1);
@ -2170,16 +2163,14 @@ single_thread_wait(struct process *pr, int recheck)
int wait; int wait;
/* wait until they're all suspended */ /* wait until they're all suspended */
mtx_enter(&pr->ps_mtx); wait = pr->ps_singlecount > 0;
wait = pr->ps_singlecnt < pr->ps_threadcnt;
while (wait) { while (wait) {
msleep_nsec(&pr->ps_singlecnt, &pr->ps_mtx, PWAIT, "suspend", sleep_setup(&pr->ps_singlecount, PWAIT, "suspend");
INFSLP); wait = pr->ps_singlecount > 0;
sleep_finish(0, wait);
if (!recheck) if (!recheck)
break; break;
wait = pr->ps_singlecnt < pr->ps_threadcnt;
} }
mtx_leave(&pr->ps_mtx);
return wait; return wait;
} }
@ -2194,10 +2185,9 @@ single_thread_clear(struct proc *p, int flag)
KASSERT(pr->ps_single == p); KASSERT(pr->ps_single == p);
KASSERT(curproc == p); KASSERT(curproc == p);
mtx_enter(&pr->ps_mtx); SCHED_LOCK(s);
pr->ps_single = NULL; pr->ps_single = NULL;
atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT); atomic_clearbits_int(&pr->ps_flags, PS_SINGLEUNWIND | PS_SINGLEEXIT);
TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) {
if (q == p || (q->p_flag & P_SUSPSINGLE) == 0) if (q == p || (q->p_flag & P_SUSPSINGLE) == 0)
continue; continue;
@ -2208,7 +2198,6 @@ single_thread_clear(struct proc *p, int flag)
* then clearing that either makes it runnable or puts * then clearing that either makes it runnable or puts
* it back into some sleep queue * it back into some sleep queue
*/ */
SCHED_LOCK(s);
if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) { if (q->p_stat == SSTOP && (q->p_flag & flag) == 0) {
if (q->p_wchan == NULL) if (q->p_wchan == NULL)
setrunnable(q); setrunnable(q);
@ -2217,9 +2206,8 @@ single_thread_clear(struct proc *p, int flag)
q->p_stat = SSLEEP; q->p_stat = SSLEEP;
} }
} }
SCHED_UNLOCK(s);
} }
mtx_leave(&pr->ps_mtx); SCHED_UNLOCK(s);
} }
void void

View File

@ -1,4 +1,4 @@
/* $OpenBSD: kern_synch.c,v 1.199 2023/09/08 09:06:31 claudio Exp $ */ /* $OpenBSD: kern_synch.c,v 1.200 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
/* /*
@ -566,18 +566,15 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval)
uint8_t newprio; uint8_t newprio;
int s; int s;
SCHED_LOCK(s);
/* /*
* If one of the threads of a multi-threaded process called * If one of the threads of a multi-threaded process called
* sched_yield(2), drop its priority to ensure its siblings * sched_yield(2), drop its priority to ensure its siblings
* can make some progress. * can make some progress.
*/ */
mtx_enter(&p->p_p->ps_mtx);
newprio = p->p_usrpri; newprio = p->p_usrpri;
TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
newprio = max(newprio, q->p_runpri); newprio = max(newprio, q->p_runpri);
mtx_leave(&p->p_p->ps_mtx);
SCHED_LOCK(s);
setrunqueue(p->p_cpu, p, newprio); setrunqueue(p->p_cpu, p, newprio);
p->p_ru.ru_nvcsw++; p->p_ru.ru_nvcsw++;
mi_switch(); mi_switch();

View File

@ -1,4 +1,4 @@
/* $OpenBSD: proc.h,v 1.350 2023/09/08 09:06:31 claudio Exp $ */ /* $OpenBSD: proc.h,v 1.351 2023/09/13 14:25:49 claudio Exp $ */
/* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */ /* $NetBSD: proc.h,v 1.44 1996/04/22 01:23:21 christos Exp $ */
/*- /*-
@ -138,7 +138,7 @@ struct process {
struct ucred *ps_ucred; /* Process owner's identity. */ struct ucred *ps_ucred; /* Process owner's identity. */
LIST_ENTRY(process) ps_list; /* List of all processes. */ LIST_ENTRY(process) ps_list; /* List of all processes. */
TAILQ_HEAD(,proc) ps_threads; /* [K|m] Threads in this process. */ TAILQ_HEAD(,proc) ps_threads; /* [K|S] Threads in this process. */
LIST_ENTRY(process) ps_pglist; /* List of processes in pgrp. */ LIST_ENTRY(process) ps_pglist; /* List of processes in pgrp. */
struct process *ps_pptr; /* Pointer to parent process. */ struct process *ps_pptr; /* Pointer to parent process. */
@ -173,8 +173,8 @@ struct process {
u_int ps_flags; /* [a] PS_* flags. */ u_int ps_flags; /* [a] PS_* flags. */
int ps_siglist; /* Signals pending for the process. */ int ps_siglist; /* Signals pending for the process. */
struct proc *ps_single; /* [m] Thread for single-threading. */ struct proc *ps_single; /* [S] Thread for single-threading. */
u_int ps_singlecnt; /* [m] Number of suspended threads. */ u_int ps_singlecount; /* [a] Not yet suspended threads. */
int ps_traceflag; /* Kernel trace points. */ int ps_traceflag; /* Kernel trace points. */
struct vnode *ps_tracevp; /* Trace to vnode. */ struct vnode *ps_tracevp; /* Trace to vnode. */
@ -242,7 +242,7 @@ struct process {
/* End area that is copied on creation. */ /* End area that is copied on creation. */
#define ps_endcopy ps_threadcnt #define ps_endcopy ps_threadcnt
u_int ps_threadcnt; /* [m] Number of threads. */ u_int ps_threadcnt; /* Number of threads. */
struct timespec ps_start; /* starting uptime. */ struct timespec ps_start; /* starting uptime. */
struct timeout ps_realit_to; /* [m] ITIMER_REAL timeout */ struct timeout ps_realit_to; /* [m] ITIMER_REAL timeout */
@ -310,14 +310,13 @@ struct p_inentry {
* U uidinfolk * U uidinfolk
* l read only reference, see lim_read_enter() * l read only reference, see lim_read_enter()
* o owned (read/modified only) by this thread * o owned (read/modified only) by this thread
* m this proc's' `p->p_p->ps_mtx'
*/ */
struct proc { struct proc {
TAILQ_ENTRY(proc) p_runq; /* [S] current run/sleep queue */ TAILQ_ENTRY(proc) p_runq; /* [S] current run/sleep queue */
LIST_ENTRY(proc) p_list; /* List of all threads. */ LIST_ENTRY(proc) p_list; /* List of all threads. */
struct process *p_p; /* [I] The process of this thread. */ struct process *p_p; /* [I] The process of this thread. */
TAILQ_ENTRY(proc) p_thr_link; /* [K|m] Threads in a process linkage. */ TAILQ_ENTRY(proc) p_thr_link; /* Threads in a process linkage. */
TAILQ_ENTRY(proc) p_fut_link; /* Threads in a futex linkage. */ TAILQ_ENTRY(proc) p_fut_link; /* Threads in a futex linkage. */
struct futex *p_futex; /* Current sleeping futex. */ struct futex *p_futex; /* Current sleeping futex. */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: btrace.c,v 1.76 2023/09/11 19:01:26 mpi Exp $ */ /* $OpenBSD: btrace.c,v 1.77 2023/09/13 13:47:58 mpi Exp $ */
/* /*
* Copyright (c) 2019 - 2023 Martin Pieuchot <mpi@openbsd.org> * Copyright (c) 2019 - 2023 Martin Pieuchot <mpi@openbsd.org>
@ -990,6 +990,7 @@ stmt_insert(struct bt_stmt *bs, struct dt_evt *dtev)
bval = ba_new(val, B_AT_LONG); bval = ba_new(val, B_AT_LONG);
break; break;
default: default:
bval = baeval(bval, dtev);
break; break;
} }