sync with OpenBSD -current
This commit is contained in:
parent
af487c914f
commit
bf24177229
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: ssl_err.c,v 1.51 2024/07/14 15:56:08 tb Exp $ */
|
||||
/* $OpenBSD: ssl_err.c,v 1.52 2024/09/09 07:40:03 tb Exp $ */
|
||||
/* ====================================================================
|
||||
* Copyright (c) 1999-2011 The OpenSSL Project. All rights reserved.
|
||||
*
|
||||
@ -417,7 +417,6 @@ static const ERR_STRING_DATA SSL_str_reasons[] = {
|
||||
{ERR_REASON(SSL_R_TLS_INVALID_ECPOINTFORMAT_LIST), "tls invalid ecpointformat list"},
|
||||
{ERR_REASON(SSL_R_TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST), "tls peer did not respond with certificate list"},
|
||||
{ERR_REASON(SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG), "tls rsa encrypted value length is wrong"},
|
||||
{ERR_REASON(SSL_R_TRIED_TO_USE_UNSUPPORTED_CIPHER), "tried to use unsupported cipher"},
|
||||
{ERR_REASON(SSL_R_UNABLE_TO_DECODE_DH_CERTS), "unable to decode dh certs"},
|
||||
{ERR_REASON(SSL_R_UNABLE_TO_DECODE_ECDH_CERTS), "unable to decode ecdh certs"},
|
||||
{ERR_REASON(SSL_R_UNABLE_TO_EXTRACT_PUBLIC_KEY), "unable to extract public key"},
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: tls13_internal.h,v 1.103 2024/01/27 14:31:01 jsing Exp $ */
|
||||
/* $OpenBSD: tls13_internal.h,v 1.104 2024/09/09 03:32:29 tb Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2018 Bob Beck <beck@openbsd.org>
|
||||
* Copyright (c) 2018 Theo Buehler <tb@openbsd.org>
|
||||
@ -210,6 +210,8 @@ void tls13_record_layer_set_hash(struct tls13_record_layer *rl,
|
||||
void tls13_record_layer_set_legacy_version(struct tls13_record_layer *rl,
|
||||
uint16_t version);
|
||||
void tls13_record_layer_set_retry_after_phh(struct tls13_record_layer *rl, int retry);
|
||||
void tls13_record_layer_alert_sent(struct tls13_record_layer *rl,
|
||||
uint8_t alert_level, uint8_t alert_desc);
|
||||
void tls13_record_layer_handshake_completed(struct tls13_record_layer *rl);
|
||||
int tls13_record_layer_set_read_traffic_key(struct tls13_record_layer *rl,
|
||||
struct tls13_secret *read_key, enum ssl_encryption_level_t read_level);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: tls13_quic.c,v 1.7 2022/11/26 16:08:56 tb Exp $ */
|
||||
/* $OpenBSD: tls13_quic.c,v 1.8 2024/09/09 03:55:55 tb Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2022 Joel Sing <jsing@openbsd.org>
|
||||
*
|
||||
@ -131,6 +131,8 @@ tls13_quic_alert_send_cb(int alert_desc, void *arg)
|
||||
{
|
||||
struct tls13_ctx *ctx = arg;
|
||||
SSL *ssl = ctx->ssl;
|
||||
uint8_t alert_level = TLS13_ALERT_LEVEL_FATAL;
|
||||
int ret = TLS13_IO_ALERT;
|
||||
|
||||
if (!ssl->quic_method->send_alert(ssl, ctx->hs->tls13.quic_write_level,
|
||||
alert_desc)) {
|
||||
@ -138,7 +140,15 @@ tls13_quic_alert_send_cb(int alert_desc, void *arg)
|
||||
return TLS13_IO_FAILURE;
|
||||
}
|
||||
|
||||
return TLS13_IO_SUCCESS;
|
||||
if (alert_desc == TLS13_ALERT_CLOSE_NOTIFY ||
|
||||
alert_desc == TLS13_ALERT_USER_CANCELED) {
|
||||
alert_level = TLS13_ALERT_LEVEL_WARNING;
|
||||
ret = TLS13_IO_SUCCESS;
|
||||
}
|
||||
|
||||
tls13_record_layer_alert_sent(ctx->rl, alert_level, alert_desc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct tls13_record_layer_callbacks quic_rl_callbacks = {
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: tls13_record_layer.c,v 1.73 2024/01/27 14:23:51 jsing Exp $ */
|
||||
/* $OpenBSD: tls13_record_layer.c,v 1.74 2024/09/09 03:32:29 tb Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2018, 2019 Joel Sing <jsing@openbsd.org>
|
||||
*
|
||||
@ -332,6 +332,13 @@ tls13_record_layer_process_alert(struct tls13_record_layer *rl)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
tls13_record_layer_alert_sent(struct tls13_record_layer *rl,
|
||||
uint8_t alert_level, uint8_t alert_desc)
|
||||
{
|
||||
rl->cb.alert_sent(alert_level, alert_desc, rl->cb_arg);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
tls13_record_layer_send_alert(struct tls13_record_layer *rl)
|
||||
{
|
||||
@ -361,7 +368,7 @@ tls13_record_layer_send_alert(struct tls13_record_layer *rl)
|
||||
ret = TLS13_IO_ALERT;
|
||||
}
|
||||
|
||||
rl->cb.alert_sent(rl->alert_level, rl->alert_desc, rl->cb_arg);
|
||||
tls13_record_layer_alert_sent(rl, rl->alert_level, rl->alert_desc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
# $OpenBSD: Makefile,v 1.15 2024/05/19 19:10:01 anton Exp $
|
||||
# $OpenBSD: Makefile,v 1.16 2024/09/09 03:13:39 djm Exp $
|
||||
|
||||
PROG=test_kex
|
||||
SRCS=tests.c test_kex.c test_proposal.c
|
||||
@ -25,6 +25,7 @@ SRCS+= kexc25519.c
|
||||
SRCS+= smult_curve25519_ref.c
|
||||
SRCS+= kexgen.c
|
||||
SRCS+= kexsntrup761x25519.c
|
||||
SRCS+= kexmlkem768x25519.c
|
||||
SRCS+= sntrup761.c
|
||||
SRCS+= utf8.c
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: test_kex.c,v 1.8 2024/03/25 19:28:09 djm Exp $ */
|
||||
/* $OpenBSD: test_kex.c,v 1.9 2024/09/09 03:13:39 djm Exp $ */
|
||||
/*
|
||||
* Regress test KEX
|
||||
*
|
||||
@ -145,6 +145,7 @@ do_kex_with_key(char *kex, int keytype, int bits)
|
||||
server2->kex->kex[KEX_ECDH_SHA2] = kex_gen_server;
|
||||
server2->kex->kex[KEX_C25519_SHA256] = kex_gen_server;
|
||||
server2->kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_server;
|
||||
server2->kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_server;
|
||||
server2->kex->load_host_public_key = server->kex->load_host_public_key;
|
||||
server2->kex->load_host_private_key = server->kex->load_host_private_key;
|
||||
server2->kex->sign = server->kex->sign;
|
||||
@ -193,4 +194,5 @@ kex_tests(void)
|
||||
do_kex("diffie-hellman-group14-sha1");
|
||||
do_kex("diffie-hellman-group1-sha1");
|
||||
do_kex("sntrup761x25519-sha512@openssh.com");
|
||||
do_kex("mlkem768x25519-sha256");
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
.\" $OpenBSD: sysctl.8,v 1.214 2018/02/16 07:27:07 jmc Exp $
|
||||
.\" $OpenBSD: sysctl.8,v 1.215 2024/09/09 05:36:17 kn Exp $
|
||||
.\" $NetBSD: sysctl.8,v 1.4 1995/09/30 07:12:49 thorpej Exp $
|
||||
.\"
|
||||
.\" Copyright (c) 1993
|
||||
@ -30,7 +30,7 @@
|
||||
.\"
|
||||
.\" @(#)sysctl.8 8.2 (Berkeley) 5/9/95
|
||||
.\"
|
||||
.Dd $Mdocdate: February 16 2018 $
|
||||
.Dd $Mdocdate: September 9 2024 $
|
||||
.Dt SYSCTL 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -39,7 +39,7 @@
|
||||
.Sh SYNOPSIS
|
||||
.Nm sysctl
|
||||
.Op Fl Aanq
|
||||
.Op Ar name Ns Op = Ns Ar value
|
||||
.Op Ar name Ns Oo = Ns Ar value Oc ...
|
||||
.Sh DESCRIPTION
|
||||
The
|
||||
.Nm
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: sysctl.c,v 1.261 2024/05/09 08:35:40 florian Exp $ */
|
||||
/* $OpenBSD: sysctl.c,v 1.262 2024/09/09 05:36:17 kn Exp $ */
|
||||
/* $NetBSD: sysctl.c,v 1.9 1995/09/30 07:12:50 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
@ -2973,8 +2973,7 @@ findname(char *string, char *level, char **bufp, struct list *namelist)
|
||||
void
|
||||
usage(void)
|
||||
{
|
||||
|
||||
(void)fprintf(stderr,
|
||||
"usage: sysctl [-Aanq] [name[=value]]\n");
|
||||
"usage: sysctl [-Aanq] [name[=value] ...]\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
# $OpenBSD: files,v 1.737 2024/09/04 07:45:08 jsg Exp $
|
||||
# $OpenBSD: files,v 1.738 2024/09/09 03:50:14 jsg Exp $
|
||||
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
|
||||
|
||||
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
|
||||
@ -957,11 +957,12 @@ file nfs/krpc_subr.c nfsclient
|
||||
file nfs/nfs_bio.c nfsclient
|
||||
file nfs/nfs_boot.c nfsclient
|
||||
file nfs/nfs_debug.c nfsclient & ddb
|
||||
file nfs/nfs_node.c nfsclient
|
||||
file nfs/nfs_kq.c nfsclient
|
||||
file nfs/nfs_node.c nfsclient
|
||||
file nfs/nfs_serv.c nfsserver
|
||||
file nfs/nfs_socket.c nfsserver | nfsclient
|
||||
file nfs/nfs_srvcache.c nfsserver
|
||||
file nfs/nfs_srvsubs.c nfsserver
|
||||
file nfs/nfs_subs.c nfsserver | nfsclient
|
||||
file nfs/nfs_syscalls.c nfsserver | nfsclient
|
||||
file nfs/nfs_vfsops.c nfsclient
|
||||
|
@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
|
||||
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
|
||||
res.clock = clock;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -407,6 +407,10 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
|
||||
"Called with userptr BO"))
|
||||
return -EINVAL;
|
||||
|
||||
/* bo has been pinned, not need validate it */
|
||||
if (bo->tbo.pin_count)
|
||||
return 0;
|
||||
|
||||
amdgpu_bo_placement_from_domain(bo, domain);
|
||||
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
@ -2631,7 +2635,7 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
|
||||
|
||||
/* keep mem without hmm range at userptr_inval_list */
|
||||
if (!mem->range)
|
||||
continue;
|
||||
continue;
|
||||
|
||||
/* Only check mem with hmm range associated */
|
||||
valid = amdgpu_ttm_tt_get_user_pages_done(
|
||||
@ -2848,9 +2852,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
if (!attachment->is_mapped)
|
||||
continue;
|
||||
|
||||
if (attachment->bo_va->base.bo->tbo.pin_count)
|
||||
continue;
|
||||
|
||||
kfd_mem_dmaunmap_attachment(mem, attachment);
|
||||
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
|
||||
if (ret) {
|
||||
|
@ -1476,6 +1476,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
|
||||
(u32)le32_to_cpu(*((u32 *)reg_data + j));
|
||||
j++;
|
||||
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
|
||||
if (i == 0)
|
||||
continue;
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
|
||||
}
|
||||
|
@ -213,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
||||
struct amdgpu_firmware_info *ucode;
|
||||
|
||||
id = fw_type_convert(cgs_device, type);
|
||||
if (id >= AMDGPU_UCODE_ID_MAXIMUM)
|
||||
return -EINVAL;
|
||||
|
||||
ucode = &adev->firmware.ucode[id];
|
||||
if (ucode->fw == NULL)
|
||||
return -EINVAL;
|
||||
|
@ -4605,7 +4605,8 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
||||
shadow = vmbo->shadow;
|
||||
|
||||
/* No need to recover an evicted BO */
|
||||
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
if (!shadow->tbo.resource ||
|
||||
shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
continue;
|
||||
@ -5372,7 +5373,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
* to put adev in the 1st position.
|
||||
*/
|
||||
INIT_LIST_HEAD(&device_list);
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
|
||||
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
|
||||
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
list_add_tail(&tmp_adev->reset_list, &device_list);
|
||||
if (gpu_reset_for_dev_remove && adev->shutdown)
|
||||
|
@ -1567,7 +1567,7 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
|
||||
break;
|
||||
case 2:
|
||||
mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
|
||||
adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
|
||||
adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev,
|
||||
|
@ -179,7 +179,7 @@ static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
* Returns the number of bytes read/written; -errno on error.
|
||||
*/
|
||||
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
u8 *eeprom_buf, u16 buf_size, bool read)
|
||||
u8 *eeprom_buf, u32 buf_size, bool read)
|
||||
{
|
||||
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
|
||||
u16 limit;
|
||||
@ -225,7 +225,7 @@ static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
u32 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
true);
|
||||
@ -233,7 +233,7 @@ int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes)
|
||||
u32 bytes)
|
||||
{
|
||||
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
|
||||
false);
|
||||
|
@ -28,10 +28,10 @@
|
||||
|
||||
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
u32 bytes);
|
||||
|
||||
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
|
||||
u32 eeprom_addr, u8 *eeprom_buf,
|
||||
u16 bytes);
|
||||
u32 bytes);
|
||||
|
||||
#endif
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/set_memory.h>
|
||||
#endif
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_reset.h"
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/ttm/ttm_tt.h>
|
||||
|
||||
@ -404,7 +405,10 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
|
||||
return;
|
||||
|
||||
mb();
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
if (down_read_trylock(&adev->reset_domain->sem)) {
|
||||
amdgpu_device_flush_hdp(adev, NULL);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
}
|
||||
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
|
||||
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
|
||||
}
|
||||
|
@ -1336,6 +1336,9 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
|
||||
uint8_t dst_num_links = node_info.num_links;
|
||||
|
||||
hive = amdgpu_get_xgmi_hive(psp->adev);
|
||||
if (WARN_ON(!hive))
|
||||
return;
|
||||
|
||||
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
|
||||
struct psp_xgmi_topology_info *mirror_top_info;
|
||||
int j;
|
||||
|
@ -352,7 +352,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
ring->max_dw = max_dw;
|
||||
ring->hw_prio = hw_prio;
|
||||
|
||||
if (!ring->no_scheduler) {
|
||||
if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
|
||||
hw_ip = ring->funcs->type;
|
||||
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
|
||||
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
|
||||
@ -469,8 +469,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_ring *ring = file_inode(f)->i_private;
|
||||
int r, i;
|
||||
uint32_t value, result, early[3];
|
||||
loff_t i;
|
||||
int r;
|
||||
|
||||
if (*pos & 3 || size & 3)
|
||||
return -EINVAL;
|
||||
|
@ -135,6 +135,10 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
|
||||
mutex_unlock(&psp->securedisplay_context.mutex);
|
||||
break;
|
||||
case 2:
|
||||
if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
|
||||
dev_err(adev->dev, "Invalid input: %s\n", str);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&psp->securedisplay_context.mutex);
|
||||
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
|
||||
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
|
||||
|
@ -616,7 +616,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
|
||||
vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
|
||||
vf2pf_info->checksum =
|
||||
amd_sriov_msg_checksum(
|
||||
vf2pf_info, vf2pf_info->header.size, 0, 0);
|
||||
vf2pf_info, sizeof(*vf2pf_info), 0, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -999,6 +999,9 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return 0;
|
||||
|
||||
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
|
||||
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
|
||||
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
|
||||
@ -1074,6 +1077,9 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
|
||||
amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
|
||||
@ -1091,6 +1097,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 rlcg_flag;
|
||||
|
||||
if (amdgpu_device_skip_hw_access(adev))
|
||||
return 0;
|
||||
|
||||
if (!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
|
||||
return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
|
||||
|
@ -500,6 +500,12 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
|
||||
|
||||
if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
|
||||
mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
|
||||
if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
|
||||
dev_err(adev->dev,
|
||||
"Invalid config, no compatible compute partition mode found, available memory partitions: %d",
|
||||
adev->gmc.num_mem_partitions);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
|
||||
dev_err(adev->dev,
|
||||
"Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
|
||||
|
@ -70,6 +70,8 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
|
||||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
|
||||
if (fb_channel_number >= ARRAY_SIZE(df_v1_7_channel_number))
|
||||
fb_channel_number = 0;
|
||||
|
||||
return df_v1_7_channel_number[fb_channel_number];
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
else
|
||||
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
|
||||
|
||||
if (!ras->disable_ras_err_cnt_harvest) {
|
||||
if (ras && !ras->disable_ras_err_cnt_harvest && obj) {
|
||||
/*
|
||||
* clear error status after ras_controller_intr
|
||||
* according to hw team and count ue number
|
||||
|
@ -42,8 +42,6 @@
|
||||
#define CRAT_OEMTABLEID_LENGTH 8
|
||||
#define CRAT_RESERVED_LENGTH 6
|
||||
|
||||
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
|
||||
|
||||
/* Compute Unit flags */
|
||||
#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
|
||||
#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
|
||||
|
@ -103,7 +103,8 @@ void debug_event_write_work_handler(struct work_struct *work)
|
||||
struct kfd_process,
|
||||
debug_event_workarea);
|
||||
|
||||
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
|
||||
if (process->debug_trap_enabled && process->dbg_ev_file)
|
||||
kernel_write(process->dbg_ev_file, &write_data, 1, &pos);
|
||||
}
|
||||
|
||||
/* update process/device/queue exception status, write to descriptor
|
||||
@ -645,6 +646,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target)
|
||||
else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
|
||||
target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
|
||||
|
||||
cancel_work_sync(&target->debug_event_workarea);
|
||||
fput(target->dbg_ev_file);
|
||||
target->dbg_ev_file = NULL;
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "kfd_priv.h"
|
||||
#include "kfd_kernel_queue.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
#include "amdgpu_reset.h"
|
||||
|
||||
static inline struct process_queue_node *get_queue_by_qid(
|
||||
struct process_queue_manager *pqm, unsigned int qid)
|
||||
@ -87,8 +88,12 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
|
||||
return;
|
||||
|
||||
dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
|
||||
if (dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_mes_flush_shader_debugger(dev->adev, pdd->proc_ctx_gpu_addr);
|
||||
if (dev->kfd->shared_resources.enable_mes &&
|
||||
down_read_trylock(&dev->adev->reset_domain->sem)) {
|
||||
amdgpu_mes_flush_shader_debugger(dev->adev,
|
||||
pdd->proc_ctx_gpu_addr);
|
||||
up_read(&dev->adev->reset_domain->sem);
|
||||
}
|
||||
pdd->already_dequeued = true;
|
||||
}
|
||||
|
||||
|
@ -958,8 +958,7 @@ static void kfd_update_system_properties(void)
|
||||
dev = list_last_entry(&topology_device_list,
|
||||
struct kfd_topology_device, list);
|
||||
if (dev) {
|
||||
sys_props.platform_id =
|
||||
(*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
|
||||
sys_props.platform_id = dev->oem_id64;
|
||||
sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
|
||||
sys_props.platform_rev = dev->oem_revision;
|
||||
}
|
||||
|
@ -154,7 +154,10 @@ struct kfd_topology_device {
|
||||
struct attribute attr_gpuid;
|
||||
struct attribute attr_name;
|
||||
struct attribute attr_props;
|
||||
uint8_t oem_id[CRAT_OEMID_LENGTH];
|
||||
union {
|
||||
uint8_t oem_id[CRAT_OEMID_LENGTH];
|
||||
uint64_t oem_id64;
|
||||
};
|
||||
uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
|
||||
uint32_t oem_revision;
|
||||
};
|
||||
|
@ -4361,7 +4361,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
|
||||
/* There is one primary plane per CRTC */
|
||||
primary_planes = dm->dc->caps.max_streams;
|
||||
ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
|
||||
if (primary_planes > AMDGPU_MAX_PLANES) {
|
||||
DRM_ERROR("DM: Plane nums out of 6 planes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize primary planes, implicit planes for legacy IOCTLS.
|
||||
@ -8289,15 +8292,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->stream_update.vrr_infopacket =
|
||||
&acrtc_state->stream->vrr_infopacket;
|
||||
}
|
||||
} else if (cursor_update && acrtc_state->active_planes > 0 &&
|
||||
acrtc_attach->base.state->event) {
|
||||
drm_crtc_vblank_get(pcrtc);
|
||||
|
||||
} else if (cursor_update && acrtc_state->active_planes > 0) {
|
||||
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
|
||||
|
||||
acrtc_attach->event = acrtc_attach->base.state->event;
|
||||
acrtc_attach->base.state->event = NULL;
|
||||
|
||||
if (acrtc_attach->base.state->event) {
|
||||
drm_crtc_vblank_get(pcrtc);
|
||||
acrtc_attach->event = acrtc_attach->base.state->event;
|
||||
acrtc_attach->base.state->event = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@
|
||||
|
||||
#define AMDGPU_DM_MAX_NUM_EDP 2
|
||||
|
||||
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
|
||||
#define AMDGPU_DMUB_NOTIFICATION_MAX 6
|
||||
|
||||
#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A
|
||||
#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40
|
||||
|
@ -667,6 +667,9 @@ static enum bp_result get_ss_info_v3_1(
|
||||
ss_table_header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(ss_table_header_include, asSpreadSpectrum, 1)));
|
||||
if (!ss_table_header_include)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
table_size =
|
||||
(le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
|
||||
- sizeof(ATOM_COMMON_TABLE_HEADER))
|
||||
@ -1036,6 +1039,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
|
||||
&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header, asSpreadSpectrum, 1)));
|
||||
if (!header)
|
||||
return result;
|
||||
|
||||
memset(info, 0, sizeof(struct spread_spectrum_info));
|
||||
|
||||
@ -1109,6 +1114,8 @@ static enum bp_result get_ss_info_from_ss_info_table(
|
||||
get_atom_data_table_revision(header, &revision);
|
||||
|
||||
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
|
||||
if (!tbl)
|
||||
return result;
|
||||
|
||||
if (1 != revision.major || 2 > revision.minor)
|
||||
return result;
|
||||
@ -1636,6 +1643,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
|
||||
|
||||
tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
|
||||
DATA_TABLES(SS_Info));
|
||||
if (!tbl)
|
||||
return number;
|
||||
|
||||
if (1 != revision.major || 2 > revision.minor)
|
||||
return number;
|
||||
@ -1718,6 +1727,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
|
||||
&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header_include, asSpreadSpectrum, 1)));
|
||||
if (!header_include)
|
||||
return 0;
|
||||
|
||||
size = (le16_to_cpu(header_include->sHeader.usStructureSize)
|
||||
- sizeof(ATOM_COMMON_TABLE_HEADER))
|
||||
@ -1756,6 +1767,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
|
||||
header_include = ((ATOM_ASIC_INTERNAL_SS_INFO_V3 *) bios_get_image(&bp->base,
|
||||
DATA_TABLES(ASIC_InternalSS_Info),
|
||||
struct_size(header_include, asSpreadSpectrum, 1)));
|
||||
if (!header_include)
|
||||
return number;
|
||||
|
||||
size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
|
||||
sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
|
||||
@ -2552,8 +2566,8 @@ static enum bp_result construct_integrated_info(
|
||||
|
||||
/* Sort voltage table from low to high*/
|
||||
if (result == BP_RESULT_OK) {
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
int32_t i;
|
||||
int32_t j;
|
||||
|
||||
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
|
||||
for (j = i; j > 0; --j) {
|
||||
|
@ -2935,8 +2935,11 @@ static enum bp_result construct_integrated_info(
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t j;
|
||||
int32_t i;
|
||||
int32_t j;
|
||||
|
||||
if (!info)
|
||||
return result;
|
||||
|
||||
if (info && DATA_TABLES(integratedsysteminfo)) {
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
|
@ -484,7 +484,8 @@ static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_sm
|
||||
ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
|
||||
/* Modify previous watermark range to cover up to max */
|
||||
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
if (num_valid_sets > 0)
|
||||
ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
|
||||
}
|
||||
num_valid_sets++;
|
||||
}
|
||||
|
@ -1298,6 +1298,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
|
||||
return NULL;
|
||||
|
||||
if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
|
||||
dc->caps.linear_pitch_alignment = 64;
|
||||
if (!dc_construct_ctx(dc, init_params))
|
||||
goto destruct_dc;
|
||||
} else {
|
||||
|
@ -3927,6 +3927,9 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
|
||||
|
||||
enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
|
||||
{
|
||||
if (dc == NULL || stream == NULL)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
||||
struct dc_link *link = stream->link;
|
||||
struct timing_generator *tg = dc->res_pool->timing_generators[0];
|
||||
enum dc_status res = DC_OK;
|
||||
|
@ -102,7 +102,8 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
|
||||
break;
|
||||
}
|
||||
|
||||
fsleep(500);
|
||||
/* must *not* be fsleep - this can be called from high irq levels */
|
||||
udelay(500);
|
||||
}
|
||||
|
||||
/* assert if max retry hit */
|
||||
|
@ -690,6 +690,9 @@ static void wbscl_set_scaler_filter(
|
||||
int pair;
|
||||
uint16_t odd_coef, even_coef;
|
||||
|
||||
if (!filter)
|
||||
return;
|
||||
|
||||
for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
|
||||
for (pair = 0; pair < tap_pairs; pair++) {
|
||||
even_coef = filter[phase * taps + 2 * pair];
|
||||
|
@ -1453,10 +1453,9 @@ void dcn_bw_update_from_pplib_fclks(
|
||||
ASSERT(fclks->num_levels);
|
||||
|
||||
vmin0p65_idx = 0;
|
||||
vmid0p72_idx = fclks->num_levels -
|
||||
(fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
|
||||
vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
|
||||
vmax0p9_idx = fclks->num_levels - 1;
|
||||
vmid0p72_idx = fclks->num_levels > 2 ? fclks->num_levels - 3 : 0;
|
||||
vnom0p8_idx = fclks->num_levels > 1 ? fclks->num_levels - 2 : 0;
|
||||
vmax0p9_idx = fclks->num_levels > 0 ? fclks->num_levels - 1 : 0;
|
||||
|
||||
dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
|
||||
32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
|
||||
|
@ -304,6 +304,16 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_02_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_02_soc.num_states; i++) {
|
||||
dcn3_02_soc.clock_limits[i].state = i;
|
||||
|
@ -299,6 +299,16 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_03_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_03_soc.num_states; i++) {
|
||||
dcn3_03_soc.clock_limits[i].state = i;
|
||||
|
@ -2885,6 +2885,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_2_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_2_soc.num_states; i++) {
|
||||
dcn3_2_soc.clock_limits[i].state = i;
|
||||
|
@ -789,6 +789,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
|
||||
dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
|
||||
}
|
||||
|
||||
/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
|
||||
* MAX_NUM_DPM_LVL is 8.
|
||||
* dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
|
||||
* DC__VOLTAGE_STATES is 40.
|
||||
*/
|
||||
if (num_states > MAX_NUM_DPM_LVL) {
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
|
||||
dcn3_21_soc.num_states = num_states;
|
||||
for (i = 0; i < dcn3_21_soc.num_states; i++) {
|
||||
dcn3_21_soc.clock_limits[i].state = i;
|
||||
|
@ -1099,8 +1099,13 @@ void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
|
||||
|
||||
// Total Available Pipes Support Check
|
||||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
total_pipes += mode_lib->vba.DPPPerPlane[k];
|
||||
pipe_idx = get_pipe_idx(mode_lib, k);
|
||||
if (pipe_idx == -1) {
|
||||
ASSERT(0);
|
||||
continue; // skip inactive planes
|
||||
}
|
||||
total_pipes += mode_lib->vba.DPPPerPlane[k];
|
||||
|
||||
if (mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz > 0.0)
|
||||
mode_lib->vba.DPPCLK[k] = mode_lib->vba.cache_pipes[pipe_idx].clks_cfg.dppclk_mhz;
|
||||
else
|
||||
|
@ -56,7 +56,7 @@ struct gpio_service *dal_gpio_service_create(
|
||||
struct dc_context *ctx)
|
||||
{
|
||||
struct gpio_service *service;
|
||||
uint32_t index_of_id;
|
||||
int32_t index_of_id;
|
||||
|
||||
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
|
||||
|
||||
@ -112,7 +112,7 @@ struct gpio_service *dal_gpio_service_create(
|
||||
return service;
|
||||
|
||||
failure_2:
|
||||
while (index_of_id) {
|
||||
while (index_of_id > 0) {
|
||||
--index_of_id;
|
||||
kfree(service->busyness[index_of_id]);
|
||||
}
|
||||
@ -239,6 +239,9 @@ static bool is_pin_busy(
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return false;
|
||||
|
||||
return service->busyness[id][en];
|
||||
}
|
||||
|
||||
@ -247,6 +250,9 @@ static void set_pin_busy(
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return;
|
||||
|
||||
service->busyness[id][en] = true;
|
||||
}
|
||||
|
||||
@ -255,6 +261,9 @@ static void set_pin_free(
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (id == GPIO_ID_UNKNOWN)
|
||||
return;
|
||||
|
||||
service->busyness[id][en] = false;
|
||||
}
|
||||
|
||||
@ -263,7 +272,7 @@ enum gpio_result dal_gpio_service_lock(
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
@ -277,7 +286,7 @@ enum gpio_result dal_gpio_service_unlock(
|
||||
enum gpio_id id,
|
||||
uint32_t en)
|
||||
{
|
||||
if (!service->busyness[id]) {
|
||||
if (id != GPIO_ID_UNKNOWN && !service->busyness[id]) {
|
||||
ASSERT_CRITICAL(false);
|
||||
return GPIO_RESULT_OPEN_FAILED;
|
||||
}
|
||||
|
@ -130,13 +130,21 @@ static bool hdmi_14_process_transaction(
|
||||
const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/
|
||||
const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/
|
||||
struct i2c_command i2c_command;
|
||||
uint8_t offset = hdcp_i2c_offsets[message_info->msg_id];
|
||||
uint8_t offset;
|
||||
struct i2c_payload i2c_payloads[] = {
|
||||
{ true, 0, 1, &offset },
|
||||
{ true, 0, 1, 0 },
|
||||
/* actual hdcp payload, will be filled later, zeroed for now*/
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
|
||||
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
offset = hdcp_i2c_offsets[message_info->msg_id];
|
||||
i2c_payloads[0].data = &offset;
|
||||
|
||||
switch (message_info->link) {
|
||||
case HDCP_LINK_SECONDARY:
|
||||
i2c_payloads[0].address = hdcp_i2c_addr_link_secondary;
|
||||
@ -310,6 +318,11 @@ static bool dp_11_process_transaction(
|
||||
struct dc_link *link,
|
||||
struct hdcp_protection_message *message_info)
|
||||
{
|
||||
if (message_info->msg_id == HDCP_MESSAGE_ID_INVALID) {
|
||||
DC_LOG_ERROR("%s: Invalid message_info msg_id - %d\n", __func__, message_info->msg_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
return dpcd_access_helper(
|
||||
link,
|
||||
message_info->length,
|
||||
|
@ -528,7 +528,7 @@ static bool decide_fallback_link_setting_max_bw_policy(
|
||||
struct dc_link_settings *cur,
|
||||
enum link_training_result training_result)
|
||||
{
|
||||
uint8_t cur_idx = 0, next_idx;
|
||||
uint32_t cur_idx = 0, next_idx;
|
||||
bool found = false;
|
||||
|
||||
if (training_result == LINK_TRAINING_ABORT)
|
||||
@ -908,21 +908,17 @@ bool link_decide_link_settings(struct dc_stream_state *stream,
|
||||
|
||||
memset(link_setting, 0, sizeof(*link_setting));
|
||||
|
||||
/* if preferred is specified through AMDDP, use it, if it's enough
|
||||
* to drive the mode
|
||||
*/
|
||||
if (link->preferred_link_setting.lane_count !=
|
||||
LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate !=
|
||||
LINK_RATE_UNKNOWN) {
|
||||
if (dc_is_dp_signal(stream->signal) &&
|
||||
link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
|
||||
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) {
|
||||
/* if preferred is specified through AMDDP, use it, if it's enough
|
||||
* to drive the mode
|
||||
*/
|
||||
*link_setting = link->preferred_link_setting;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
} else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
decide_mst_link_settings(link, link_setting);
|
||||
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
/* enable edp link optimization for DSC eDP case */
|
||||
|
@ -914,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent(
|
||||
/* Driver does not need to train the first hop. Skip DPCD read and clear
|
||||
* AUX_RD_INTERVAL for DPTX-to-DPIA hop.
|
||||
*/
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT)
|
||||
link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
|
||||
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
|
||||
for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) {
|
||||
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
|
||||
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
|
||||
core_link_read_dpcd(
|
||||
|
@ -158,11 +158,16 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
|
||||
uint32_t cur_size = 0;
|
||||
uint32_t data_offset = 0;
|
||||
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
|
||||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
}
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
|
||||
sizeof(hdcp_dpcd_addrs[0]);
|
||||
if (msg_id >= num_dpcd_addrs)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
while (buf_len > 0) {
|
||||
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
|
||||
success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle,
|
||||
@ -177,6 +182,11 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
|
||||
data_offset += cur_size;
|
||||
}
|
||||
} else {
|
||||
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
|
||||
sizeof(hdcp_i2c_offsets[0]);
|
||||
if (msg_id >= num_i2c_offsets)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
success = hdcp->config.ddc.funcs.read_i2c(
|
||||
hdcp->config.ddc.handle,
|
||||
HDCP_I2C_ADDR,
|
||||
@ -221,11 +231,16 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
|
||||
uint32_t cur_size = 0;
|
||||
uint32_t data_offset = 0;
|
||||
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
|
||||
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID ||
|
||||
msg_id >= MOD_HDCP_MESSAGE_ID_MAX)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
}
|
||||
|
||||
if (is_dp_hdcp(hdcp)) {
|
||||
int num_dpcd_addrs = sizeof(hdcp_dpcd_addrs) /
|
||||
sizeof(hdcp_dpcd_addrs[0]);
|
||||
if (msg_id >= num_dpcd_addrs)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
while (buf_len > 0) {
|
||||
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
|
||||
success = hdcp->config.ddc.funcs.write_dpcd(
|
||||
@ -241,6 +256,11 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
|
||||
data_offset += cur_size;
|
||||
}
|
||||
} else {
|
||||
int num_i2c_offsets = sizeof(hdcp_i2c_offsets) /
|
||||
sizeof(hdcp_i2c_offsets[0]);
|
||||
if (msg_id >= num_i2c_offsets)
|
||||
return MOD_HDCP_STATUS_DDC_FAILURE;
|
||||
|
||||
hdcp->buf[0] = hdcp_i2c_offsets[msg_id];
|
||||
memmove(&hdcp->buf[1], buf, buf_len);
|
||||
success = hdcp->config.ddc.funcs.write_i2c(
|
||||
|
@ -99,7 +99,7 @@ static void pp_swctf_delayed_work_handler(struct work_struct *work)
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
struct amdgpu_dpm_thermal *range =
|
||||
&adev->pm.dpm.thermal;
|
||||
uint32_t gpu_temperature, size;
|
||||
uint32_t gpu_temperature, size = sizeof(gpu_temperature);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -30,9 +30,8 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result;
|
||||
unsigned int i;
|
||||
unsigned int table_entries;
|
||||
struct pp_power_state *state;
|
||||
int size;
|
||||
int size, table_entries;
|
||||
|
||||
if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
|
||||
return 0;
|
||||
@ -40,15 +39,19 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
|
||||
if (hwmgr->hwmgr_func->get_power_state_size == NULL)
|
||||
return 0;
|
||||
|
||||
hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
|
||||
table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
|
||||
|
||||
hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
|
||||
size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
|
||||
sizeof(struct pp_power_state);
|
||||
|
||||
if (table_entries == 0 || size == 0) {
|
||||
if (table_entries <= 0 || size == 0) {
|
||||
pr_warn("Please check whether power state management is supported on this asic\n");
|
||||
hwmgr->num_ps = 0;
|
||||
hwmgr->ps_size = 0;
|
||||
return 0;
|
||||
}
|
||||
hwmgr->num_ps = table_entries;
|
||||
hwmgr->ps_size = size;
|
||||
|
||||
hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
|
||||
if (hwmgr->ps == NULL)
|
||||
|
@ -73,8 +73,9 @@ static int atomctrl_retrieve_ac_timing(
|
||||
j++;
|
||||
} else if ((table->mc_reg_address[i].uc_pre_reg_data &
|
||||
LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
|
||||
if (i)
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i] =
|
||||
table->mc_reg_table_entry[num_ranges].mc_data[i-1];
|
||||
}
|
||||
}
|
||||
num_ranges++;
|
||||
|
@ -1036,7 +1036,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
|
||||
if (now == data->gfx_max_freq_limit/100)
|
||||
@ -1057,7 +1059,9 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
i == 2 ? "*" : "");
|
||||
break;
|
||||
case PP_MCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
@ -1550,7 +1554,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
||||
}
|
||||
|
||||
if (input[0] == 0) {
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (input[1] < min_freq) {
|
||||
pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
|
||||
input[1], min_freq);
|
||||
@ -1558,7 +1565,10 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
||||
}
|
||||
smu10_data->gfx_actual_soft_min_freq = input[1];
|
||||
} else if (input[0] == 1) {
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (input[1] > max_freq) {
|
||||
pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
|
||||
input[1], max_freq);
|
||||
@ -1573,10 +1583,15 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
|
||||
pr_err("Input parameter number not correct\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
smu10_data->gfx_actual_soft_min_freq = min_freq;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smu10_data->gfx_actual_soft_max_freq = max_freq;
|
||||
} else if (type == PP_OD_COMMIT_DPM_TABLE) {
|
||||
if (size != 0) {
|
||||
|
@ -5641,7 +5641,7 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
|
||||
mode = input[size];
|
||||
switch (mode) {
|
||||
case PP_SMC_POWER_PROFILE_CUSTOM:
|
||||
if (size < 8 && size != 0)
|
||||
if (size != 8 && size != 0)
|
||||
return -EINVAL;
|
||||
/* If only CUSTOM is passed in, use the saved values. Check
|
||||
* that we actually have a CUSTOM profile by ensuring that
|
||||
|
@ -584,6 +584,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
|
||||
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
@ -591,7 +592,9 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
|
||||
data->uvd_dpm.soft_min_clk = 0;
|
||||
data->uvd_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].vclk;
|
||||
@ -611,6 +614,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
|
||||
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
@ -618,7 +622,9 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
|
||||
data->vce_dpm.soft_min_clk = 0;
|
||||
data->vce_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].ecclk;
|
||||
@ -638,6 +644,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
|
||||
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
|
||||
unsigned long clock = 0;
|
||||
uint32_t level;
|
||||
int ret;
|
||||
|
||||
if (NULL == table || table->count <= 0)
|
||||
return -EINVAL;
|
||||
@ -645,7 +652,9 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
|
||||
data->acp_dpm.soft_min_clk = 0;
|
||||
data->acp_dpm.hard_min_clk = 0;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (level < table->count)
|
||||
clock = table->entries[level].acpclk;
|
||||
|
@ -354,13 +354,13 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
int i;
|
||||
uint32_t sub_vendor_id, hw_revision;
|
||||
uint32_t top32, bottom32;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int ret, i;
|
||||
|
||||
vega10_initialize_power_tune_defaults(hwmgr);
|
||||
|
||||
@ -485,9 +485,12 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
if (data->registry_data.vr0hot_enabled)
|
||||
data->smu_features[GNLD_VR0HOT].supported = true;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr,
|
||||
ret = smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetSmuVersion,
|
||||
&hwmgr->smu_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* ACG firmware has major version 5 */
|
||||
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
|
||||
data->smu_features[GNLD_ACG].supported = true;
|
||||
@ -505,10 +508,16 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->smu_features[GNLD_PCC_LIMIT].supported = true;
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef PPLIB_VEGA10_EVV_SUPPORT
|
||||
@ -882,7 +891,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
|
||||
vega10_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega10_init_dpm_defaults(hwmgr);
|
||||
result = vega10_init_dpm_defaults(hwmgr);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
#ifdef PPLIB_VEGA10_EVV_SUPPORT
|
||||
/* Get leakage voltage based on leakage ID. */
|
||||
@ -2350,15 +2361,20 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
uint32_t agc_btc_response;
|
||||
int ret;
|
||||
|
||||
if (data->smu_features[GNLD_ACG].supported) {
|
||||
if (0 == vega10_enable_smc_features(hwmgr, true,
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
|
||||
if (ret)
|
||||
agc_btc_response = 0;
|
||||
|
||||
if (1 == agc_btc_response) {
|
||||
if (1 == data->acg_loop_state)
|
||||
@ -2571,8 +2587,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
}
|
||||
|
||||
pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
|
||||
result = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
|
||||
VOLTAGE_OBJ_SVID2, &voltage_table);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to get voltage table!",
|
||||
return result);
|
||||
pp_table->MaxVidStep = voltage_table.max_vid_step;
|
||||
|
||||
pp_table->GfxDpmVoltageMode =
|
||||
@ -3910,11 +3929,14 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
|
||||
uint32_t *query)
|
||||
{
|
||||
uint32_t value;
|
||||
int ret;
|
||||
|
||||
if (!query)
|
||||
return -EINVAL;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
|
||||
*query = value << 8;
|
||||
@ -4810,14 +4832,16 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
|
||||
PPTable_t *pptable = &(data->smc_state_table.pp_table);
|
||||
|
||||
int i, now, size = 0, count = 0;
|
||||
int i, ret, now, size = 0, count = 0;
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
if (data->registry_data.sclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (hwmgr->pp_one_vf &&
|
||||
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
|
||||
@ -4833,7 +4857,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (data->registry_data.mclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
@ -4844,7 +4870,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (data->registry_data.socclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < soc_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
@ -4855,8 +4883,10 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (data->registry_data.dcefclk_dpm_key_disabled)
|
||||
break;
|
||||
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
for (i = 0; i < dcef_table->count; i++)
|
||||
size += snprintf(buf + size, PAGE_SIZE - size, "%d: %uMhz %s\n",
|
||||
|
@ -293,12 +293,12 @@ static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t top32, bottom32;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
|
||||
FEATURE_DPM_PREFETCHER_BIT;
|
||||
@ -364,10 +364,16 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
|
||||
@ -410,7 +416,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
|
||||
vega12_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega12_init_dpm_defaults(hwmgr);
|
||||
result = vega12_init_dpm_defaults(hwmgr);
|
||||
if (result) {
|
||||
pr_err("%s failed\n", __func__);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Parse pptable data read from VBIOS */
|
||||
vega12_set_private_data_based_on_pptable(hwmgr);
|
||||
|
@ -328,12 +328,12 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
static int vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
uint32_t top32, bottom32;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
|
||||
FEATURE_DPM_PREFETCHER_BIT;
|
||||
@ -404,10 +404,17 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
}
|
||||
|
||||
/* Get the SN to turn into a Unique ID */
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
|
||||
@ -427,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct vega20_hwmgr *data;
|
||||
struct amdgpu_device *adev = hwmgr->adev;
|
||||
int result;
|
||||
|
||||
data = kzalloc(sizeof(struct vega20_hwmgr), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
@ -452,8 +460,11 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
|
||||
|
||||
vega20_set_features_platform_caps(hwmgr);
|
||||
|
||||
vega20_init_dpm_defaults(hwmgr);
|
||||
|
||||
result = vega20_init_dpm_defaults(hwmgr);
|
||||
if (result) {
|
||||
pr_err("%s failed\n", __func__);
|
||||
return result;
|
||||
}
|
||||
/* Parse pptable data read from VBIOS */
|
||||
vega20_set_private_data_based_on_pptable(hwmgr);
|
||||
|
||||
@ -4091,9 +4102,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
||||
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
if (size == 0 && !data->is_custom_profile_set)
|
||||
|
||||
if (size != 10 && size != 0)
|
||||
return -EINVAL;
|
||||
if (size < 10 && size != 0)
|
||||
|
||||
if (size == 0 && !data->is_custom_profile_set)
|
||||
return -EINVAL;
|
||||
|
||||
result = vega20_get_activity_monitor_coeff(hwmgr,
|
||||
@ -4155,6 +4168,8 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
|
||||
activity_monitor.Fclk_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
result = vega20_set_activity_monitor_coeff(hwmgr,
|
||||
|
@ -130,13 +130,17 @@ int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
|
||||
uint64_t *features_enabled)
|
||||
{
|
||||
uint32_t enabled_features;
|
||||
int ret;
|
||||
|
||||
if (features_enabled == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
smum_send_msg_to_smc(hwmgr,
|
||||
ret = smum_send_msg_to_smc(hwmgr,
|
||||
PPSMC_MSG_GetEnabledSmuFeatures,
|
||||
&enabled_features);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*features_enabled = enabled_features;
|
||||
|
||||
return 0;
|
||||
|
@ -1222,19 +1222,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
|
||||
value);
|
||||
}
|
||||
|
||||
static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
|
||||
{
|
||||
PPTable_t *pptable = smu->smu_table.driver_pptable;
|
||||
DpmDescriptor_t *dpm_desc = NULL;
|
||||
uint32_t clk_index = 0;
|
||||
int clk_index = 0;
|
||||
|
||||
clk_index = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_CLK,
|
||||
clk_type);
|
||||
if (clk_index < 0)
|
||||
return clk_index;
|
||||
|
||||
dpm_desc = &pptable->DpmDescriptor[clk_index];
|
||||
|
||||
/* 0 - Fine grained DPM, 1 - Discrete DPM */
|
||||
return dpm_desc->SnapToDiscrete == 0;
|
||||
return dpm_desc->SnapToDiscrete == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
|
||||
@ -1290,7 +1293,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ret) {
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v11_0_get_dpm_freq_by_index(smu,
|
||||
clk_type, i, &value);
|
||||
@ -1499,7 +1506,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
|
||||
if (ret)
|
||||
return size;
|
||||
|
||||
if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!ret) {
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
if (ret)
|
||||
@ -1668,7 +1679,11 @@ static int navi10_force_clk_levels(struct smu_context *smu,
|
||||
case SMU_UCLK:
|
||||
case SMU_FCLK:
|
||||
/* There is only 2 levels for fine grained DPM */
|
||||
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret) {
|
||||
soft_max_level = (soft_max_level >= 1 ? 1 : 0);
|
||||
soft_min_level = (soft_min_level >= 1 ? 1 : 0);
|
||||
}
|
||||
|
@ -1017,6 +1017,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
if (min) {
|
||||
ret = vangogh_get_profiling_clk_mask(smu,
|
||||
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
|
||||
NULL,
|
||||
NULL,
|
||||
&mclk_mask,
|
||||
&fclk_mask,
|
||||
&soc_mask);
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
vclk_mask = dclk_mask = 0;
|
||||
|
||||
switch (clk_type) {
|
||||
case SMU_UCLK:
|
||||
case SMU_MCLK:
|
||||
@ -2489,6 +2501,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
|
||||
start, &residency);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!start)
|
||||
adev->gfx.gfx_off_residency = residency;
|
||||
|
@ -1933,7 +1933,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu)
|
||||
|
||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
||||
SMU_MSG_GfxDeviceDriverReset);
|
||||
|
||||
if (index < 0 )
|
||||
return -EINVAL;
|
||||
mutex_lock(&smu->message_lock);
|
||||
if (smu_version >= 0x00441400) {
|
||||
ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
|
||||
|
@ -2063,6 +2063,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
|
||||
|
||||
index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
|
||||
SMU_MSG_GfxDeviceDriverReset);
|
||||
if (index < 0)
|
||||
return index;
|
||||
|
||||
mutex_lock(&smu->message_lock);
|
||||
|
||||
|
@ -647,6 +647,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
|
||||
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
|
||||
u32 width, u32 height)
|
||||
{
|
||||
/*
|
||||
* This function may be invoked by panic() to flush the frame
|
||||
* buffer, where all CPUs except the panic CPU are stopped.
|
||||
* During the following schedule_work(), the panic CPU needs
|
||||
* the worker_pool lock, which might be held by a stopped CPU,
|
||||
* causing schedule_work() and panic() to block. Return early on
|
||||
* oops_in_progress to prevent this blocking.
|
||||
*/
|
||||
if (oops_in_progress)
|
||||
return;
|
||||
|
||||
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
|
||||
|
||||
schedule_work(&helper->damage_work);
|
||||
|
@ -420,6 +420,12 @@ static const struct dmi_system_id orientation_data[] = {
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1600x2560_leftside_up,
|
||||
}, { /* OrangePi Neo */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* Samsung GalaxyBook 10.6 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: uipc_mbuf.c,v 1.292 2024/09/05 08:52:27 bluhm Exp $ */
|
||||
/* $OpenBSD: uipc_mbuf.c,v 1.293 2024/09/09 11:27:03 bluhm Exp $ */
|
||||
/* $NetBSD: uipc_mbuf.c,v 1.15.4.1 1996/06/13 17:11:44 cgd Exp $ */
|
||||
|
||||
/*
|
||||
@ -90,6 +90,7 @@
|
||||
|
||||
#ifdef DDB
|
||||
#include <machine/db_machdep.h>
|
||||
#include <ddb/db_interface.h>
|
||||
#endif
|
||||
|
||||
#if NPF > 0
|
||||
@ -1567,6 +1568,9 @@ m_print_chain(void *v, int deep,
|
||||
(*pr)(", pktlen %d", m->m_pkthdr.len);
|
||||
if (m->m_flags & M_EXT)
|
||||
(*pr)(", clsize %u", m->m_ext.ext_size);
|
||||
else
|
||||
(*pr)(", size %u",
|
||||
m->m_flags & M_PKTHDR ? MHLEN : MLEN);
|
||||
(*pr)("\n");
|
||||
indent = deep ? "|+-" : " +-";
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: if_pppx.c,v 1.129 2024/07/30 13:41:15 yasuoka Exp $ */
|
||||
/* $OpenBSD: if_pppx.c,v 1.130 2024/09/09 07:37:47 mvs Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2010 Claudio Jeker <claudio@openbsd.org>
|
||||
@ -786,10 +786,8 @@ pppx_set_session_descr(struct pppx_dev *pxd,
|
||||
if (pxi == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
NET_LOCK();
|
||||
(void)memset(pxi->pxi_if.if_description, 0, IFDESCRSIZE);
|
||||
strlcpy(pxi->pxi_if.if_description, req->pdr_descr, IFDESCRSIZE);
|
||||
NET_UNLOCK();
|
||||
|
||||
pppx_if_rele(pxi);
|
||||
|
||||
|
488
sys/nfs/nfs_srvsubs.c
Normal file
488
sys/nfs/nfs_srvsubs.c
Normal file
@ -0,0 +1,488 @@
|
||||
/* $OpenBSD: nfs_srvsubs.c,v 1.1 2024/09/09 03:50:14 jsg Exp $ */
|
||||
/* $NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1989, 1993
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* This code is derived from software contributed to Berkeley by
|
||||
* Rick Macklem at The University of Guelph.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* These functions support the nfsm_subs.h inline functions and help fiddle
|
||||
* mbuf chains for the nfs op functions. They do things such as creating the
|
||||
* rpc header and copying data between mbuf chains and uio lists.
|
||||
*/
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/namei.h>
|
||||
#include <sys/mbuf.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/socketvar.h>
|
||||
#include <sys/pool.h>
|
||||
|
||||
#include <nfs/rpcv2.h>
|
||||
#include <nfs/nfsproto.h>
|
||||
#include <nfs/nfs.h>
|
||||
#include <nfs/xdr_subs.h>
|
||||
#include <nfs/nfs_var.h>
|
||||
#include <nfs/nfsm_subs.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
/* Global vars */
|
||||
extern u_int32_t nfs_false, nfs_true;
|
||||
extern const nfstype nfsv2_type[9];
|
||||
extern const nfstype nfsv3_type[9];
|
||||
|
||||
/*
|
||||
* Set up nameidata for a lookup() call and do it
|
||||
*/
|
||||
int
|
||||
nfs_namei(struct nameidata *ndp, fhandle_t *fhp, int len,
|
||||
struct nfssvc_sock *slp, struct mbuf *nam, struct mbuf **mdp,
|
||||
caddr_t *dposp, struct vnode **retdirp, struct proc *p)
|
||||
{
|
||||
int i, rem;
|
||||
struct mbuf *md;
|
||||
char *fromcp, *tocp;
|
||||
struct vnode *dp;
|
||||
int error, rdonly;
|
||||
struct componentname *cnp = &ndp->ni_cnd;
|
||||
|
||||
*retdirp = NULL;
|
||||
cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
|
||||
/*
|
||||
* Copy the name from the mbuf list to ndp->ni_pnbuf
|
||||
* and set the various ndp fields appropriately.
|
||||
*/
|
||||
fromcp = *dposp;
|
||||
tocp = cnp->cn_pnbuf;
|
||||
md = *mdp;
|
||||
rem = mtod(md, caddr_t) + md->m_len - fromcp;
|
||||
for (i = 0; i < len; i++) {
|
||||
while (rem == 0) {
|
||||
md = md->m_next;
|
||||
if (md == NULL) {
|
||||
error = EBADRPC;
|
||||
goto out;
|
||||
}
|
||||
fromcp = mtod(md, caddr_t);
|
||||
rem = md->m_len;
|
||||
}
|
||||
if (*fromcp == '\0' || *fromcp == '/') {
|
||||
error = EACCES;
|
||||
goto out;
|
||||
}
|
||||
*tocp++ = *fromcp++;
|
||||
rem--;
|
||||
}
|
||||
*tocp = '\0';
|
||||
*mdp = md;
|
||||
*dposp = fromcp;
|
||||
len = nfsm_padlen(len);
|
||||
if (len > 0) {
|
||||
if (rem >= len)
|
||||
*dposp += len;
|
||||
else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
|
||||
goto out;
|
||||
}
|
||||
ndp->ni_pathlen = tocp - cnp->cn_pnbuf;
|
||||
cnp->cn_nameptr = cnp->cn_pnbuf;
|
||||
/*
|
||||
* Extract and set starting directory.
|
||||
*/
|
||||
error = nfsrv_fhtovp(fhp, 0, &dp, ndp->ni_cnd.cn_cred, slp,
|
||||
nam, &rdonly);
|
||||
if (error)
|
||||
goto out;
|
||||
if (dp->v_type != VDIR) {
|
||||
vrele(dp);
|
||||
error = ENOTDIR;
|
||||
goto out;
|
||||
}
|
||||
vref(dp);
|
||||
*retdirp = dp;
|
||||
ndp->ni_startdir = dp;
|
||||
if (rdonly)
|
||||
cnp->cn_flags |= (NOCROSSMOUNT | RDONLY);
|
||||
else
|
||||
cnp->cn_flags |= NOCROSSMOUNT;
|
||||
|
||||
/*
|
||||
* And call lookup() to do the real work
|
||||
*/
|
||||
cnp->cn_proc = p;
|
||||
error = vfs_lookup(ndp);
|
||||
if (error)
|
||||
goto out;
|
||||
/*
|
||||
* Check for encountering a symbolic link
|
||||
*/
|
||||
if (cnp->cn_flags & ISSYMLINK) {
|
||||
if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
|
||||
vput(ndp->ni_dvp);
|
||||
else
|
||||
vrele(ndp->ni_dvp);
|
||||
vput(ndp->ni_vp);
|
||||
ndp->ni_vp = NULL;
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Check for saved name request
|
||||
*/
|
||||
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
|
||||
cnp->cn_flags |= HASBUF;
|
||||
return (0);
|
||||
}
|
||||
out:
|
||||
pool_put(&namei_pool, cnp->cn_pnbuf);
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* A fiddled version of m_adj() that ensures null fill to a long
|
||||
* boundary and only trims off the back end
|
||||
*/
|
||||
void
|
||||
nfsm_adj(struct mbuf *mp, int len, int nul)
|
||||
{
|
||||
struct mbuf *m;
|
||||
int count, i;
|
||||
char *cp;
|
||||
|
||||
/*
|
||||
* Trim from tail. Scan the mbuf chain,
|
||||
* calculating its length and finding the last mbuf.
|
||||
* If the adjustment only affects this mbuf, then just
|
||||
* adjust and return. Otherwise, rescan and truncate
|
||||
* after the remaining size.
|
||||
*/
|
||||
count = 0;
|
||||
m = mp;
|
||||
for (;;) {
|
||||
count += m->m_len;
|
||||
if (m->m_next == NULL)
|
||||
break;
|
||||
m = m->m_next;
|
||||
}
|
||||
if (m->m_len > len) {
|
||||
m->m_len -= len;
|
||||
if (nul > 0) {
|
||||
cp = mtod(m, caddr_t)+m->m_len-nul;
|
||||
for (i = 0; i < nul; i++)
|
||||
*cp++ = '\0';
|
||||
}
|
||||
return;
|
||||
}
|
||||
count -= len;
|
||||
if (count < 0)
|
||||
count = 0;
|
||||
/*
|
||||
* Correct length for chain is "count".
|
||||
* Find the mbuf with last data, adjust its length,
|
||||
* and toss data from remaining mbufs on chain.
|
||||
*/
|
||||
for (m = mp; m; m = m->m_next) {
|
||||
if (m->m_len >= count) {
|
||||
m->m_len = count;
|
||||
if (nul > 0) {
|
||||
cp = mtod(m, caddr_t)+m->m_len-nul;
|
||||
for (i = 0; i < nul; i++)
|
||||
*cp++ = '\0';
|
||||
}
|
||||
break;
|
||||
}
|
||||
count -= m->m_len;
|
||||
}
|
||||
for (m = m->m_next;m;m = m->m_next)
|
||||
m->m_len = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make these non-inline functions, so that the kernel text size
|
||||
* doesn't get too big...
|
||||
*/
|
||||
void
|
||||
nfsm_srvwcc(struct nfsrv_descript *nfsd, int before_ret,
|
||||
struct vattr *before_vap, int after_ret, struct vattr *after_vap,
|
||||
struct nfsm_info *info)
|
||||
{
|
||||
u_int32_t *tl;
|
||||
|
||||
if (before_ret) {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
|
||||
*tl = nfs_false;
|
||||
} else {
|
||||
tl = nfsm_build(&info->nmi_mb, 7 * NFSX_UNSIGNED);
|
||||
*tl++ = nfs_true;
|
||||
txdr_hyper(before_vap->va_size, tl);
|
||||
tl += 2;
|
||||
txdr_nfsv3time(&(before_vap->va_mtime), tl);
|
||||
tl += 2;
|
||||
txdr_nfsv3time(&(before_vap->va_ctime), tl);
|
||||
}
|
||||
nfsm_srvpostop_attr(nfsd, after_ret, after_vap, info);
|
||||
}
|
||||
|
||||
void
|
||||
nfsm_srvpostop_attr(struct nfsrv_descript *nfsd, int after_ret,
|
||||
struct vattr *after_vap, struct nfsm_info *info)
|
||||
{
|
||||
u_int32_t *tl;
|
||||
struct nfs_fattr *fp;
|
||||
|
||||
if (after_ret) {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
|
||||
*tl = nfs_false;
|
||||
} else {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED + NFSX_V3FATTR);
|
||||
*tl++ = nfs_true;
|
||||
fp = (struct nfs_fattr *)tl;
|
||||
nfsm_srvfattr(nfsd, after_vap, fp);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
|
||||
struct nfs_fattr *fp)
|
||||
{
|
||||
|
||||
fp->fa_nlink = txdr_unsigned(vap->va_nlink);
|
||||
fp->fa_uid = txdr_unsigned(vap->va_uid);
|
||||
fp->fa_gid = txdr_unsigned(vap->va_gid);
|
||||
if (nfsd->nd_flag & ND_NFSV3) {
|
||||
fp->fa_type = vtonfsv3_type(vap->va_type);
|
||||
fp->fa_mode = vtonfsv3_mode(vap->va_mode);
|
||||
txdr_hyper(vap->va_size, &fp->fa3_size);
|
||||
txdr_hyper(vap->va_bytes, &fp->fa3_used);
|
||||
fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev));
|
||||
fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev));
|
||||
fp->fa3_fsid.nfsuquad[0] = 0;
|
||||
fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
|
||||
txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
|
||||
txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
|
||||
txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
|
||||
txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
|
||||
} else {
|
||||
fp->fa_type = vtonfsv2_type(vap->va_type);
|
||||
fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
|
||||
fp->fa2_size = txdr_unsigned(vap->va_size);
|
||||
fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
|
||||
if (vap->va_type == VFIFO)
|
||||
fp->fa2_rdev = 0xffffffff;
|
||||
else
|
||||
fp->fa2_rdev = txdr_unsigned(vap->va_rdev);
|
||||
fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
|
||||
fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
|
||||
fp->fa2_fileid = txdr_unsigned((u_int32_t)vap->va_fileid);
|
||||
txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
|
||||
txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
|
||||
txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
|
||||
* - look up fsid in mount list (if not found ret error)
|
||||
* - get vp and export rights by calling VFS_FHTOVP() and VFS_CHECKEXP()
|
||||
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
|
||||
* - if not lockflag unlock it with VOP_UNLOCK()
|
||||
*/
|
||||
int
|
||||
nfsrv_fhtovp(fhandle_t *fhp, int lockflag, struct vnode **vpp,
|
||||
struct ucred *cred, struct nfssvc_sock *slp, struct mbuf *nam,
|
||||
int *rdonlyp)
|
||||
{
|
||||
struct mount *mp;
|
||||
int i;
|
||||
struct ucred *credanon;
|
||||
int error, exflags;
|
||||
struct sockaddr_in *saddr;
|
||||
|
||||
*vpp = NULL;
|
||||
mp = vfs_getvfs(&fhp->fh_fsid);
|
||||
|
||||
if (!mp)
|
||||
return (ESTALE);
|
||||
error = VFS_CHECKEXP(mp, nam, &exflags, &credanon);
|
||||
if (error)
|
||||
return (error);
|
||||
error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp);
|
||||
if (error)
|
||||
return (error);
|
||||
|
||||
saddr = mtod(nam, struct sockaddr_in *);
|
||||
if (saddr->sin_family == AF_INET &&
|
||||
(ntohs(saddr->sin_port) >= IPPORT_RESERVED ||
|
||||
(slp->ns_so->so_type == SOCK_STREAM && ntohs(saddr->sin_port) == 20))) {
|
||||
vput(*vpp);
|
||||
return (NFSERR_AUTHERR | AUTH_TOOWEAK);
|
||||
}
|
||||
|
||||
/* Check/setup credentials. */
|
||||
if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
|
||||
cred->cr_uid = credanon->cr_uid;
|
||||
cred->cr_gid = credanon->cr_gid;
|
||||
for (i = 0; i < credanon->cr_ngroups && i < NGROUPS_MAX; i++)
|
||||
cred->cr_groups[i] = credanon->cr_groups[i];
|
||||
cred->cr_ngroups = i;
|
||||
}
|
||||
if (exflags & MNT_EXRDONLY)
|
||||
*rdonlyp = 1;
|
||||
else
|
||||
*rdonlyp = 0;
|
||||
if (!lockflag)
|
||||
VOP_UNLOCK(*vpp);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function compares two net addresses by family and returns non zero
|
||||
* if they are the same host, or if there is any doubt it returns 0.
|
||||
* The AF_INET family is handled as a special case so that address mbufs
|
||||
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
|
||||
*/
|
||||
int
|
||||
netaddr_match(int family, union nethostaddr *haddr, struct mbuf *nam)
|
||||
{
|
||||
struct sockaddr_in *inetaddr;
|
||||
|
||||
switch (family) {
|
||||
case AF_INET:
|
||||
inetaddr = mtod(nam, struct sockaddr_in *);
|
||||
if (inetaddr->sin_family == AF_INET &&
|
||||
inetaddr->sin_addr.s_addr == haddr->had_inetaddr)
|
||||
return (1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
return (0);
|
||||
}
|
||||
|
||||
int
|
||||
nfsm_srvsattr(struct mbuf **mp, struct vattr *va, struct mbuf *mrep,
|
||||
caddr_t *dposp)
|
||||
{
|
||||
struct nfsm_info info;
|
||||
int error = 0;
|
||||
uint32_t *tl;
|
||||
|
||||
info.nmi_md = *mp;
|
||||
info.nmi_dpos = *dposp;
|
||||
info.nmi_mrep = mrep;
|
||||
info.nmi_errorp = &error;
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_mode = nfstov_mode(*tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_uid = fxdr_unsigned(uid_t, *tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_gid = fxdr_unsigned(gid_t, *tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_size = fxdr_hyper(tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
switch (fxdr_unsigned(int, *tl)) {
|
||||
case NFSV3SATTRTIME_TOCLIENT:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
va->va_vaflags &= ~VA_UTIMES_NULL;
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
fxdr_nfsv3time(tl, &va->va_atime);
|
||||
break;
|
||||
case NFSV3SATTRTIME_TOSERVER:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
getnanotime(&va->va_atime);
|
||||
break;
|
||||
};
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
switch (fxdr_unsigned(int, *tl)) {
|
||||
case NFSV3SATTRTIME_TOCLIENT:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
va->va_vaflags &= ~VA_UTIMES_NULL;
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
fxdr_nfsv3time(tl, &va->va_mtime);
|
||||
break;
|
||||
case NFSV3SATTRTIME_TOSERVER:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
getnanotime(&va->va_mtime);
|
||||
break;
|
||||
};
|
||||
|
||||
*dposp = info.nmi_dpos;
|
||||
*mp = info.nmi_md;
|
||||
return 0;
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: nfs_subs.c,v 1.150 2024/09/04 07:54:53 mglocker Exp $ */
|
||||
/* $OpenBSD: nfs_subs.c,v 1.151 2024/09/09 03:50:14 jsg Exp $ */
|
||||
/* $NetBSD: nfs_subs.c,v 1.27.4.3 1996/07/08 20:34:24 jtc Exp $ */
|
||||
|
||||
/*
|
||||
@ -1163,334 +1163,6 @@ nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
|
||||
}
|
||||
#endif /* NFSCLIENT */
|
||||
|
||||
/*
|
||||
* Set up nameidata for a lookup() call and do it
|
||||
*/
|
||||
int
|
||||
nfs_namei(struct nameidata *ndp, fhandle_t *fhp, int len,
|
||||
struct nfssvc_sock *slp, struct mbuf *nam, struct mbuf **mdp,
|
||||
caddr_t *dposp, struct vnode **retdirp, struct proc *p)
|
||||
{
|
||||
int i, rem;
|
||||
struct mbuf *md;
|
||||
char *fromcp, *tocp;
|
||||
struct vnode *dp;
|
||||
int error, rdonly;
|
||||
struct componentname *cnp = &ndp->ni_cnd;
|
||||
|
||||
*retdirp = NULL;
|
||||
cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK);
|
||||
/*
|
||||
* Copy the name from the mbuf list to ndp->ni_pnbuf
|
||||
* and set the various ndp fields appropriately.
|
||||
*/
|
||||
fromcp = *dposp;
|
||||
tocp = cnp->cn_pnbuf;
|
||||
md = *mdp;
|
||||
rem = mtod(md, caddr_t) + md->m_len - fromcp;
|
||||
for (i = 0; i < len; i++) {
|
||||
while (rem == 0) {
|
||||
md = md->m_next;
|
||||
if (md == NULL) {
|
||||
error = EBADRPC;
|
||||
goto out;
|
||||
}
|
||||
fromcp = mtod(md, caddr_t);
|
||||
rem = md->m_len;
|
||||
}
|
||||
if (*fromcp == '\0' || *fromcp == '/') {
|
||||
error = EACCES;
|
||||
goto out;
|
||||
}
|
||||
*tocp++ = *fromcp++;
|
||||
rem--;
|
||||
}
|
||||
*tocp = '\0';
|
||||
*mdp = md;
|
||||
*dposp = fromcp;
|
||||
len = nfsm_padlen(len);
|
||||
if (len > 0) {
|
||||
if (rem >= len)
|
||||
*dposp += len;
|
||||
else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
|
||||
goto out;
|
||||
}
|
||||
ndp->ni_pathlen = tocp - cnp->cn_pnbuf;
|
||||
cnp->cn_nameptr = cnp->cn_pnbuf;
|
||||
/*
|
||||
* Extract and set starting directory.
|
||||
*/
|
||||
error = nfsrv_fhtovp(fhp, 0, &dp, ndp->ni_cnd.cn_cred, slp,
|
||||
nam, &rdonly);
|
||||
if (error)
|
||||
goto out;
|
||||
if (dp->v_type != VDIR) {
|
||||
vrele(dp);
|
||||
error = ENOTDIR;
|
||||
goto out;
|
||||
}
|
||||
vref(dp);
|
||||
*retdirp = dp;
|
||||
ndp->ni_startdir = dp;
|
||||
if (rdonly)
|
||||
cnp->cn_flags |= (NOCROSSMOUNT | RDONLY);
|
||||
else
|
||||
cnp->cn_flags |= NOCROSSMOUNT;
|
||||
|
||||
/*
|
||||
* And call lookup() to do the real work
|
||||
*/
|
||||
cnp->cn_proc = p;
|
||||
error = vfs_lookup(ndp);
|
||||
if (error)
|
||||
goto out;
|
||||
/*
|
||||
* Check for encountering a symbolic link
|
||||
*/
|
||||
if (cnp->cn_flags & ISSYMLINK) {
|
||||
if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
|
||||
vput(ndp->ni_dvp);
|
||||
else
|
||||
vrele(ndp->ni_dvp);
|
||||
vput(ndp->ni_vp);
|
||||
ndp->ni_vp = NULL;
|
||||
error = EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* Check for saved name request
|
||||
*/
|
||||
if (cnp->cn_flags & (SAVENAME | SAVESTART)) {
|
||||
cnp->cn_flags |= HASBUF;
|
||||
return (0);
|
||||
}
|
||||
out:
|
||||
pool_put(&namei_pool, cnp->cn_pnbuf);
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* A fiddled version of m_adj() that ensures null fill to a long
|
||||
* boundary and only trims off the back end
|
||||
*/
|
||||
void
|
||||
nfsm_adj(struct mbuf *mp, int len, int nul)
|
||||
{
|
||||
struct mbuf *m;
|
||||
int count, i;
|
||||
char *cp;
|
||||
|
||||
/*
|
||||
* Trim from tail. Scan the mbuf chain,
|
||||
* calculating its length and finding the last mbuf.
|
||||
* If the adjustment only affects this mbuf, then just
|
||||
* adjust and return. Otherwise, rescan and truncate
|
||||
* after the remaining size.
|
||||
*/
|
||||
count = 0;
|
||||
m = mp;
|
||||
for (;;) {
|
||||
count += m->m_len;
|
||||
if (m->m_next == NULL)
|
||||
break;
|
||||
m = m->m_next;
|
||||
}
|
||||
if (m->m_len > len) {
|
||||
m->m_len -= len;
|
||||
if (nul > 0) {
|
||||
cp = mtod(m, caddr_t)+m->m_len-nul;
|
||||
for (i = 0; i < nul; i++)
|
||||
*cp++ = '\0';
|
||||
}
|
||||
return;
|
||||
}
|
||||
count -= len;
|
||||
if (count < 0)
|
||||
count = 0;
|
||||
/*
|
||||
* Correct length for chain is "count".
|
||||
* Find the mbuf with last data, adjust its length,
|
||||
* and toss data from remaining mbufs on chain.
|
||||
*/
|
||||
for (m = mp; m; m = m->m_next) {
|
||||
if (m->m_len >= count) {
|
||||
m->m_len = count;
|
||||
if (nul > 0) {
|
||||
cp = mtod(m, caddr_t)+m->m_len-nul;
|
||||
for (i = 0; i < nul; i++)
|
||||
*cp++ = '\0';
|
||||
}
|
||||
break;
|
||||
}
|
||||
count -= m->m_len;
|
||||
}
|
||||
for (m = m->m_next;m;m = m->m_next)
|
||||
m->m_len = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make these non-inline functions, so that the kernel text size
|
||||
* doesn't get too big...
|
||||
*/
|
||||
void
|
||||
nfsm_srvwcc(struct nfsrv_descript *nfsd, int before_ret,
|
||||
struct vattr *before_vap, int after_ret, struct vattr *after_vap,
|
||||
struct nfsm_info *info)
|
||||
{
|
||||
u_int32_t *tl;
|
||||
|
||||
if (before_ret) {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
|
||||
*tl = nfs_false;
|
||||
} else {
|
||||
tl = nfsm_build(&info->nmi_mb, 7 * NFSX_UNSIGNED);
|
||||
*tl++ = nfs_true;
|
||||
txdr_hyper(before_vap->va_size, tl);
|
||||
tl += 2;
|
||||
txdr_nfsv3time(&(before_vap->va_mtime), tl);
|
||||
tl += 2;
|
||||
txdr_nfsv3time(&(before_vap->va_ctime), tl);
|
||||
}
|
||||
nfsm_srvpostop_attr(nfsd, after_ret, after_vap, info);
|
||||
}
|
||||
|
||||
void
|
||||
nfsm_srvpostop_attr(struct nfsrv_descript *nfsd, int after_ret,
|
||||
struct vattr *after_vap, struct nfsm_info *info)
|
||||
{
|
||||
u_int32_t *tl;
|
||||
struct nfs_fattr *fp;
|
||||
|
||||
if (after_ret) {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED);
|
||||
*tl = nfs_false;
|
||||
} else {
|
||||
tl = nfsm_build(&info->nmi_mb, NFSX_UNSIGNED + NFSX_V3FATTR);
|
||||
*tl++ = nfs_true;
|
||||
fp = (struct nfs_fattr *)tl;
|
||||
nfsm_srvfattr(nfsd, after_vap, fp);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nfsm_srvfattr(struct nfsrv_descript *nfsd, struct vattr *vap,
|
||||
struct nfs_fattr *fp)
|
||||
{
|
||||
|
||||
fp->fa_nlink = txdr_unsigned(vap->va_nlink);
|
||||
fp->fa_uid = txdr_unsigned(vap->va_uid);
|
||||
fp->fa_gid = txdr_unsigned(vap->va_gid);
|
||||
if (nfsd->nd_flag & ND_NFSV3) {
|
||||
fp->fa_type = vtonfsv3_type(vap->va_type);
|
||||
fp->fa_mode = vtonfsv3_mode(vap->va_mode);
|
||||
txdr_hyper(vap->va_size, &fp->fa3_size);
|
||||
txdr_hyper(vap->va_bytes, &fp->fa3_used);
|
||||
fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev));
|
||||
fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev));
|
||||
fp->fa3_fsid.nfsuquad[0] = 0;
|
||||
fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid);
|
||||
txdr_hyper(vap->va_fileid, &fp->fa3_fileid);
|
||||
txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime);
|
||||
txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime);
|
||||
txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime);
|
||||
} else {
|
||||
fp->fa_type = vtonfsv2_type(vap->va_type);
|
||||
fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
|
||||
fp->fa2_size = txdr_unsigned(vap->va_size);
|
||||
fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize);
|
||||
if (vap->va_type == VFIFO)
|
||||
fp->fa2_rdev = 0xffffffff;
|
||||
else
|
||||
fp->fa2_rdev = txdr_unsigned(vap->va_rdev);
|
||||
fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE);
|
||||
fp->fa2_fsid = txdr_unsigned(vap->va_fsid);
|
||||
fp->fa2_fileid = txdr_unsigned((u_int32_t)vap->va_fileid);
|
||||
txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime);
|
||||
txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime);
|
||||
txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
|
||||
* - look up fsid in mount list (if not found ret error)
|
||||
* - get vp and export rights by calling VFS_FHTOVP() and VFS_CHECKEXP()
|
||||
* - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon
|
||||
* - if not lockflag unlock it with VOP_UNLOCK()
|
||||
*/
|
||||
int
|
||||
nfsrv_fhtovp(fhandle_t *fhp, int lockflag, struct vnode **vpp,
|
||||
struct ucred *cred, struct nfssvc_sock *slp, struct mbuf *nam,
|
||||
int *rdonlyp)
|
||||
{
|
||||
struct mount *mp;
|
||||
int i;
|
||||
struct ucred *credanon;
|
||||
int error, exflags;
|
||||
struct sockaddr_in *saddr;
|
||||
|
||||
*vpp = NULL;
|
||||
mp = vfs_getvfs(&fhp->fh_fsid);
|
||||
|
||||
if (!mp)
|
||||
return (ESTALE);
|
||||
error = VFS_CHECKEXP(mp, nam, &exflags, &credanon);
|
||||
if (error)
|
||||
return (error);
|
||||
error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp);
|
||||
if (error)
|
||||
return (error);
|
||||
|
||||
saddr = mtod(nam, struct sockaddr_in *);
|
||||
if (saddr->sin_family == AF_INET &&
|
||||
(ntohs(saddr->sin_port) >= IPPORT_RESERVED ||
|
||||
(slp->ns_so->so_type == SOCK_STREAM && ntohs(saddr->sin_port) == 20))) {
|
||||
vput(*vpp);
|
||||
return (NFSERR_AUTHERR | AUTH_TOOWEAK);
|
||||
}
|
||||
|
||||
/* Check/setup credentials. */
|
||||
if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) {
|
||||
cred->cr_uid = credanon->cr_uid;
|
||||
cred->cr_gid = credanon->cr_gid;
|
||||
for (i = 0; i < credanon->cr_ngroups && i < NGROUPS_MAX; i++)
|
||||
cred->cr_groups[i] = credanon->cr_groups[i];
|
||||
cred->cr_ngroups = i;
|
||||
}
|
||||
if (exflags & MNT_EXRDONLY)
|
||||
*rdonlyp = 1;
|
||||
else
|
||||
*rdonlyp = 0;
|
||||
if (!lockflag)
|
||||
VOP_UNLOCK(*vpp);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function compares two net addresses by family and returns non zero
|
||||
* if they are the same host, or if there is any doubt it returns 0.
|
||||
* The AF_INET family is handled as a special case so that address mbufs
|
||||
* don't need to be saved to store "struct in_addr", which is only 4 bytes.
|
||||
*/
|
||||
int
|
||||
netaddr_match(int family, union nethostaddr *haddr, struct mbuf *nam)
|
||||
{
|
||||
struct sockaddr_in *inetaddr;
|
||||
|
||||
switch (family) {
|
||||
case AF_INET:
|
||||
inetaddr = mtod(nam, struct sockaddr_in *);
|
||||
if (inetaddr->sin_family == AF_INET &&
|
||||
inetaddr->sin_addr.s_addr == haddr->had_inetaddr)
|
||||
return (1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
};
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The write verifier has changed (probably due to a server reboot), so all
|
||||
* B_NEEDCOMMIT blocks will have to be written again. Since they are on the
|
||||
@ -1821,100 +1493,6 @@ nfsm_srvfhtom(struct mbuf **mp, fhandle_t *f, int v3)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nfsm_srvsattr(struct mbuf **mp, struct vattr *va, struct mbuf *mrep,
|
||||
caddr_t *dposp)
|
||||
{
|
||||
struct nfsm_info info;
|
||||
int error = 0;
|
||||
uint32_t *tl;
|
||||
|
||||
info.nmi_md = *mp;
|
||||
info.nmi_dpos = *dposp;
|
||||
info.nmi_mrep = mrep;
|
||||
info.nmi_errorp = &error;
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_mode = nfstov_mode(*tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_uid = fxdr_unsigned(uid_t, *tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_gid = fxdr_unsigned(gid_t, *tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
if (*tl == nfs_true) {
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
va->va_size = fxdr_hyper(tl);
|
||||
}
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
switch (fxdr_unsigned(int, *tl)) {
|
||||
case NFSV3SATTRTIME_TOCLIENT:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
va->va_vaflags &= ~VA_UTIMES_NULL;
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
fxdr_nfsv3time(tl, &va->va_atime);
|
||||
break;
|
||||
case NFSV3SATTRTIME_TOSERVER:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
getnanotime(&va->va_atime);
|
||||
break;
|
||||
};
|
||||
|
||||
tl = (uint32_t *)nfsm_dissect(&info, NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
switch (fxdr_unsigned(int, *tl)) {
|
||||
case NFSV3SATTRTIME_TOCLIENT:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
va->va_vaflags &= ~VA_UTIMES_NULL;
|
||||
tl = (uint32_t *)nfsm_dissect(&info, 2 * NFSX_UNSIGNED);
|
||||
if (tl == NULL)
|
||||
return error;
|
||||
fxdr_nfsv3time(tl, &va->va_mtime);
|
||||
break;
|
||||
case NFSV3SATTRTIME_TOSERVER:
|
||||
va->va_vaflags |= VA_UTIMES_CHANGE;
|
||||
getnanotime(&va->va_mtime);
|
||||
break;
|
||||
};
|
||||
|
||||
*dposp = info.nmi_dpos;
|
||||
*mp = info.nmi_md;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
txdr_nfsv2time(const struct timespec *from, struct nfsv2_time *to)
|
||||
{
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: nfs_var.h,v 1.65 2024/05/04 11:25:24 jsg Exp $ */
|
||||
/* $OpenBSD: nfs_var.h,v 1.66 2024/09/09 03:50:14 jsg Exp $ */
|
||||
/* $NetBSD: nfs_var.h,v 1.3 1996/02/18 11:53:54 fvdl Exp $ */
|
||||
|
||||
/*
|
||||
@ -142,6 +142,22 @@ int nfsrv_getcache(struct nfsrv_descript *, struct nfssvc_sock *,
|
||||
void nfsrv_updatecache(struct nfsrv_descript *, int, struct mbuf *);
|
||||
void nfsrv_cleancache(void);
|
||||
|
||||
/* nfs_srvsubs.c */
|
||||
int nfs_namei(struct nameidata *, fhandle_t *, int, struct nfssvc_sock *,
|
||||
struct mbuf *, struct mbuf **, caddr_t *, struct vnode **,
|
||||
struct proc *);
|
||||
void nfsm_adj(struct mbuf *, int, int);
|
||||
void nfsm_srvwcc(struct nfsrv_descript *, int, struct vattr *, int,
|
||||
struct vattr *, struct nfsm_info *);
|
||||
void nfsm_srvpostop_attr(struct nfsrv_descript *, int, struct vattr *,
|
||||
struct nfsm_info *);
|
||||
void nfsm_srvfattr(struct nfsrv_descript *, struct vattr *,
|
||||
struct nfs_fattr *);
|
||||
int nfsrv_fhtovp(fhandle_t *, int, struct vnode **, struct ucred *,
|
||||
struct nfssvc_sock *, struct mbuf *, int *);
|
||||
int netaddr_match(int, union nethostaddr *, struct mbuf *);
|
||||
int nfsm_srvsattr(struct mbuf **, struct vattr *, struct mbuf *, caddr_t *);
|
||||
|
||||
/* nfs_subs.c */
|
||||
struct mbuf *nfsm_reqhead(int);
|
||||
void nfsm_rpchead(struct nfsreq *, struct ucred *, int);
|
||||
@ -156,21 +172,8 @@ int nfs_attrtimeo(struct nfsnode *);
|
||||
int nfs_loadattrcache(struct vnode **, struct mbuf **, caddr_t *,
|
||||
struct vattr *);
|
||||
int nfs_getattrcache(struct vnode *, struct vattr *);
|
||||
int nfs_namei(struct nameidata *, fhandle_t *, int, struct nfssvc_sock *,
|
||||
struct mbuf *, struct mbuf **, caddr_t *, struct vnode **,
|
||||
struct proc *);
|
||||
void nfsm_v3attrbuild(struct mbuf **, struct vattr *, int);
|
||||
int nfsm_disct(struct mbuf **, caddr_t *, int, int, caddr_t *);
|
||||
void nfsm_adj(struct mbuf *, int, int);
|
||||
void nfsm_srvwcc(struct nfsrv_descript *, int, struct vattr *, int,
|
||||
struct vattr *, struct nfsm_info *);
|
||||
void nfsm_srvpostop_attr(struct nfsrv_descript *, int, struct vattr *,
|
||||
struct nfsm_info *);
|
||||
void nfsm_srvfattr(struct nfsrv_descript *, struct vattr *,
|
||||
struct nfs_fattr *);
|
||||
int nfsrv_fhtovp(fhandle_t *, int, struct vnode **, struct ucred *,
|
||||
struct nfssvc_sock *, struct mbuf *, int *);
|
||||
int netaddr_match(int, union nethostaddr *, struct mbuf *);
|
||||
void nfs_clearcommit(struct mount *);
|
||||
int nfs_in_committed_range(struct vnode *, struct buf *);
|
||||
int nfs_in_tobecommitted_range(struct vnode *, struct buf *);
|
||||
@ -180,7 +183,6 @@ void nfs_add_tobecommitted_range(struct vnode *, struct buf *);
|
||||
void nfs_del_tobecommitted_range(struct vnode *, struct buf *);
|
||||
void nfs_merge_commit_ranges(struct vnode *);
|
||||
int nfsrv_errmap(struct nfsrv_descript *, int);
|
||||
int nfsm_srvsattr(struct mbuf **, struct vattr *, struct mbuf *, caddr_t *);
|
||||
void nfsm_fhtom(struct nfsm_info *, struct vnode *, int);
|
||||
void nfsm_srvfhtom(struct mbuf **, fhandle_t *, int);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: socketvar.h,v 1.133 2024/07/20 17:26:19 mvs Exp $ */
|
||||
/* $OpenBSD: socketvar.h,v 1.134 2024/09/09 07:38:45 mvs Exp $ */
|
||||
/* $NetBSD: socketvar.h,v 1.18 1996/02/09 18:25:38 christos Exp $ */
|
||||
|
||||
/*-
|
||||
@ -54,8 +54,10 @@ TAILQ_HEAD(soqhead, socket);
|
||||
/*
|
||||
* Locks used to protect global data and struct members:
|
||||
* I immutable after creation
|
||||
* a atomic
|
||||
* mr sb_mxt of so_rcv buffer
|
||||
* ms sb_mtx of so_snd buffer
|
||||
* m sb_mtx
|
||||
* br sblock() of so_rcv buffer
|
||||
* bs sblock() od so_snd buffer
|
||||
* s solock()
|
||||
@ -78,6 +80,40 @@ struct sosplice {
|
||||
struct task ssp_task; /* task for somove */
|
||||
};
|
||||
|
||||
/*
|
||||
* Variables for socket buffering.
|
||||
*/
|
||||
struct sockbuf {
|
||||
struct rwlock sb_lock;
|
||||
struct mutex sb_mtx;
|
||||
/* The following fields are all zeroed on flush. */
|
||||
#define sb_startzero sb_cc
|
||||
u_long sb_cc; /* [m] actual chars in buffer */
|
||||
u_long sb_datacc; /* [m] data only chars in buffer */
|
||||
u_long sb_hiwat; /* [m] max actual char count */
|
||||
u_long sb_wat; /* [m] default watermark */
|
||||
u_long sb_mbcnt; /* [m] chars of mbufs used */
|
||||
u_long sb_mbmax; /* [m] max chars of mbufs to use */
|
||||
long sb_lowat; /* [m] low water mark */
|
||||
struct mbuf *sb_mb; /* [m] the mbuf chain */
|
||||
struct mbuf *sb_mbtail; /* [m] the last mbuf in the chain */
|
||||
struct mbuf *sb_lastrecord; /* [m] first mbuf of last record in
|
||||
socket buffer */
|
||||
short sb_flags; /* [m] flags, see below */
|
||||
/* End area that is zeroed on flush. */
|
||||
#define sb_endzero sb_flags
|
||||
short sb_state; /* [m] socket state on sockbuf */
|
||||
uint64_t sb_timeo_nsecs; /* [m] timeout for read/write */
|
||||
struct klist sb_klist; /* [m] list of knotes */
|
||||
};
|
||||
|
||||
#define SB_MAX (2*1024*1024) /* default for max chars in sockbuf */
|
||||
#define SB_WAIT 0x0001 /* someone is waiting for data/space */
|
||||
#define SB_ASYNC 0x0002 /* ASYNC I/O, need signals */
|
||||
#define SB_SPLICE 0x0004 /* buffer is splice source or drain */
|
||||
#define SB_NOINTR 0x0008 /* operations not interruptible */
|
||||
#define SB_MTXLOCK 0x0010 /* sblock() doesn't need solock() */
|
||||
|
||||
/*
|
||||
* Kernel structure per socket.
|
||||
* Contains send and receive buffer queues,
|
||||
@ -85,14 +121,16 @@ struct sosplice {
|
||||
* private data and error information.
|
||||
*/
|
||||
struct socket {
|
||||
const struct protosw *so_proto; /* protocol handle */
|
||||
const struct protosw *so_proto; /* [I] protocol handle */
|
||||
struct rwlock so_lock; /* this socket lock */
|
||||
struct refcnt so_refcnt; /* references to this socket */
|
||||
void *so_pcb; /* protocol control block */
|
||||
u_int so_state; /* internal state flags SS_*, below */
|
||||
short so_type; /* generic type, see socket.h */
|
||||
short so_options; /* from socket call, see socket.h */
|
||||
short so_linger; /* time to linger while closing */
|
||||
void *so_pcb; /* [s] protocol control block */
|
||||
u_int so_state; /* [s] internal state flags SS_*,
|
||||
see below */
|
||||
short so_type; /* [I] generic type, see socket.h */
|
||||
short so_options; /* [s] from socket call, see
|
||||
socket.h */
|
||||
short so_linger; /* [s] time to linger while closing */
|
||||
/*
|
||||
* Variables for connection queueing.
|
||||
* Socket where accepts occur is so_head in all subsidiary sockets.
|
||||
@ -103,59 +141,37 @@ struct socket {
|
||||
* it has to be pulled out of either so_q0 or so_q.
|
||||
* We allow connections to queue up based on current queue lengths
|
||||
* and limit on number of queued connections for this socket.
|
||||
*
|
||||
* Connections queue relies on both socket locks of listening and
|
||||
* unaccepted sockets. Socket lock of listening socket should be
|
||||
* always taken first.
|
||||
*/
|
||||
struct socket *so_head; /* back pointer to accept socket */
|
||||
struct soqhead *so_onq; /* queue (q or q0) that we're on */
|
||||
struct soqhead so_q0; /* queue of partial connections */
|
||||
struct soqhead so_q; /* queue of incoming connections */
|
||||
struct socket *so_head; /* [s] back pointer to accept socket */
|
||||
struct soqhead *so_onq; /* [s] queue (q or q0) that we're on */
|
||||
struct soqhead so_q0; /* [s] queue of partial connections */
|
||||
struct soqhead so_q; /* [s] queue of incoming connections */
|
||||
struct sigio_ref so_sigio; /* async I/O registration */
|
||||
TAILQ_ENTRY(socket) so_qe; /* our queue entry (q or q0) */
|
||||
short so_q0len; /* partials on so_q0 */
|
||||
short so_qlen; /* number of connections on so_q */
|
||||
short so_qlimit; /* max number queued connections */
|
||||
short so_timeo; /* connection timeout */
|
||||
u_long so_oobmark; /* chars to oob mark */
|
||||
u_int so_error; /* error affecting connection */
|
||||
TAILQ_ENTRY(socket) so_qe; /* [s] our queue entry (q or q0) */
|
||||
short so_q0len; /* [s] partials on so_q0 */
|
||||
short so_qlen; /* [s] number of connections on so_q */
|
||||
short so_qlimit; /* [s] max number queued connections */
|
||||
short so_timeo; /* [s] connection timeout */
|
||||
u_long so_oobmark; /* [mr] chars to oob mark */
|
||||
u_int so_error; /* [a] error affecting connection */
|
||||
|
||||
struct sosplice *so_sp; /* [s br] */
|
||||
/*
|
||||
* Variables for socket buffering.
|
||||
*/
|
||||
struct sockbuf {
|
||||
struct rwlock sb_lock;
|
||||
struct mutex sb_mtx;
|
||||
/* The following fields are all zeroed on flush. */
|
||||
#define sb_startzero sb_cc
|
||||
u_long sb_cc; /* actual chars in buffer */
|
||||
u_long sb_datacc; /* data only chars in buffer */
|
||||
u_long sb_hiwat; /* max actual char count */
|
||||
u_long sb_wat; /* default watermark */
|
||||
u_long sb_mbcnt; /* chars of mbufs used */
|
||||
u_long sb_mbmax; /* max chars of mbufs to use */
|
||||
long sb_lowat; /* low water mark */
|
||||
struct mbuf *sb_mb; /* the mbuf chain */
|
||||
struct mbuf *sb_mbtail; /* the last mbuf in the chain */
|
||||
struct mbuf *sb_lastrecord;/* first mbuf of last record in
|
||||
socket buffer */
|
||||
short sb_flags; /* flags, see below */
|
||||
/* End area that is zeroed on flush. */
|
||||
#define sb_endzero sb_flags
|
||||
short sb_state; /* socket state on sockbuf */
|
||||
uint64_t sb_timeo_nsecs;/* timeout for read/write */
|
||||
struct klist sb_klist; /* process selecting read/write */
|
||||
} so_rcv, so_snd;
|
||||
#define SB_MAX (2*1024*1024) /* default for max chars in sockbuf */
|
||||
#define SB_WAIT 0x0001 /* someone is waiting for data/space */
|
||||
#define SB_ASYNC 0x0002 /* ASYNC I/O, need signals */
|
||||
#define SB_SPLICE 0x0004 /* buffer is splice source or drain */
|
||||
#define SB_NOINTR 0x0008 /* operations not interruptible */
|
||||
#define SB_MTXLOCK 0x0010 /* sblock() doesn't need solock() */
|
||||
|
||||
void (*so_upcall)(struct socket *so, caddr_t arg, int waitf);
|
||||
caddr_t so_upcallarg; /* Arg for above */
|
||||
uid_t so_euid, so_ruid; /* who opened the socket */
|
||||
gid_t so_egid, so_rgid;
|
||||
pid_t so_cpid; /* pid of process that opened socket */
|
||||
struct sockbuf so_rcv;
|
||||
struct sockbuf so_snd;
|
||||
|
||||
void (*so_upcall)(struct socket *, caddr_t, int); /* [s] */
|
||||
caddr_t so_upcallarg; /* [s] Arg for above */
|
||||
uid_t so_euid; /* [I] who opened the socket */
|
||||
uid_t so_ruid; /* [I] */
|
||||
gid_t so_egid; /* [I] */
|
||||
gid_t so_rgid; /* [I] */
|
||||
pid_t so_cpid; /* [I] pid of process that opened
|
||||
socket */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1,4 +1,4 @@
|
||||
# $OpenBSD: Makefile.inc,v 1.95 2024/09/02 12:13:56 djm Exp $
|
||||
# $OpenBSD: Makefile.inc,v 1.96 2024/09/09 02:39:57 djm Exp $
|
||||
|
||||
.include <bsd.own.mk>
|
||||
|
||||
@ -38,8 +38,6 @@ WARNINGS=yes
|
||||
OPENSSL?= yes
|
||||
ZLIB?= yes
|
||||
DSAKEY?= no
|
||||
# NB. experimental; Internet-draft subject to change.
|
||||
MLKEM?= no
|
||||
|
||||
.if (${OPENSSL:L} == "yes")
|
||||
CFLAGS+= -DWITH_OPENSSL
|
||||
@ -53,10 +51,6 @@ CFLAGS+= -DWITH_ZLIB
|
||||
CFLAGS+= -DWITH_DSA
|
||||
.endif
|
||||
|
||||
.if (${MLKEM:L} == "yes")
|
||||
CFLAGS+= -DWITH_MLKEM
|
||||
.endif
|
||||
|
||||
CFLAGS+= -DENABLE_PKCS11
|
||||
.ifndef NOPIC
|
||||
CFLAGS+= -DHAVE_DLOPEN
|
||||
@ -86,9 +80,7 @@ SRCS_KEX+= smult_curve25519_ref.c
|
||||
SRCS_KEX+= kexgen.c
|
||||
SRCS_KEX+= kexsntrup761x25519.c
|
||||
SRCS_KEX+= sntrup761.c
|
||||
.if (${MLKEM:L} == "yes")
|
||||
SRCS_KEX+= kexmlkem768x25519.c
|
||||
.endif
|
||||
|
||||
SRCS_KEY+= sshkey.c
|
||||
SRCS_KEY+= cipher.c
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: kex-names.c,v 1.3 2024/09/02 12:13:56 djm Exp $ */
|
||||
/* $OpenBSD: kex-names.c,v 1.4 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
|
||||
*
|
||||
@ -72,10 +72,8 @@ static const struct kexalg kexalgs[] = {
|
||||
SSH_DIGEST_SHA512 },
|
||||
{ KEX_SNTRUP761X25519_SHA512_OLD, KEX_KEM_SNTRUP761X25519_SHA512, 0,
|
||||
SSH_DIGEST_SHA512 },
|
||||
#ifdef WITH_MLKEM
|
||||
{ KEX_MLKEM768X25519_SHA256, KEX_KEM_MLKEM768X25519_SHA256, 0,
|
||||
SSH_DIGEST_SHA256 },
|
||||
#endif
|
||||
{ NULL, 0, -1, -1},
|
||||
};
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: kexgen.c,v 1.9 2024/09/02 12:13:56 djm Exp $ */
|
||||
/* $OpenBSD: kexgen.c,v 1.10 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2019 Markus Friedl. All rights reserved.
|
||||
*
|
||||
@ -117,11 +117,9 @@ kex_gen_client(struct ssh *ssh)
|
||||
case KEX_KEM_SNTRUP761X25519_SHA512:
|
||||
r = kex_kem_sntrup761x25519_keypair(kex);
|
||||
break;
|
||||
#ifdef WITH_MLKEM
|
||||
case KEX_KEM_MLKEM768X25519_SHA256:
|
||||
r = kex_kem_mlkem768x25519_keypair(kex);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = SSH_ERR_INVALID_ARGUMENT;
|
||||
break;
|
||||
@ -194,12 +192,10 @@ input_kex_gen_reply(int type, u_int32_t seq, struct ssh *ssh)
|
||||
r = kex_kem_sntrup761x25519_dec(kex, server_blob,
|
||||
&shared_secret);
|
||||
break;
|
||||
#ifdef WITH_MLKEM
|
||||
case KEX_KEM_MLKEM768X25519_SHA256:
|
||||
r = kex_kem_mlkem768x25519_dec(kex, server_blob,
|
||||
&shared_secret);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = SSH_ERR_INVALID_ARGUMENT;
|
||||
break;
|
||||
@ -320,12 +316,10 @@ input_kex_gen_init(int type, u_int32_t seq, struct ssh *ssh)
|
||||
r = kex_kem_sntrup761x25519_enc(kex, client_pubkey,
|
||||
&server_pubkey, &shared_secret);
|
||||
break;
|
||||
#ifdef WITH_MLKEM
|
||||
case KEX_KEM_MLKEM768X25519_SHA256:
|
||||
r = kex_kem_mlkem768x25519_enc(kex, client_pubkey,
|
||||
&server_pubkey, &shared_secret);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = SSH_ERR_INVALID_ARGUMENT;
|
||||
break;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: monitor.c,v 1.241 2024/09/02 12:13:56 djm Exp $ */
|
||||
/* $OpenBSD: monitor.c,v 1.242 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* Copyright 2002 Markus Friedl <markus@openbsd.org>
|
||||
@ -1456,9 +1456,7 @@ monitor_apply_keystate(struct ssh *ssh, struct monitor *pmonitor)
|
||||
#endif
|
||||
kex->kex[KEX_C25519_SHA256] = kex_gen_server;
|
||||
kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_server;
|
||||
#ifdef WITH_MLKEM
|
||||
kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_server;
|
||||
#endif
|
||||
kex->load_host_public_key=&get_hostkey_public_by_type;
|
||||
kex->load_host_private_key=&get_hostkey_private_by_type;
|
||||
kex->host_key_index=&get_hostkey_index;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: myproposal.h,v 1.72 2024/08/22 23:11:30 djm Exp $ */
|
||||
/* $OpenBSD: myproposal.h,v 1.73 2024/09/09 02:39:57 djm Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2000 Markus Friedl. All rights reserved.
|
||||
@ -27,6 +27,7 @@
|
||||
#define KEX_SERVER_KEX \
|
||||
"sntrup761x25519-sha512," \
|
||||
"sntrup761x25519-sha512@openssh.com," \
|
||||
"mlkem768x25519-sha256," \
|
||||
"curve25519-sha256," \
|
||||
"curve25519-sha256@libssh.org," \
|
||||
"ecdh-sha2-nistp256," \
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: ssh-keyscan.c,v 1.160 2024/09/04 05:33:34 djm Exp $ */
|
||||
/* $OpenBSD: ssh-keyscan.c,v 1.161 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright 1995, 1996 by David Mazieres <dm@lcs.mit.edu>.
|
||||
*
|
||||
@ -277,9 +277,7 @@ keygrab_ssh2(con *c)
|
||||
#endif
|
||||
c->c_ssh->kex->kex[KEX_C25519_SHA256] = kex_gen_client;
|
||||
c->c_ssh->kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_client;
|
||||
#ifdef WITH_MLKEM
|
||||
c->c_ssh->kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_client;
|
||||
#endif
|
||||
ssh_set_verify_host_key_callback(c->c_ssh, key_print_wrapper);
|
||||
/*
|
||||
* do the key-exchange until an error occurs or until
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: ssh_api.c,v 1.30 2024/09/02 12:13:56 djm Exp $ */
|
||||
/* $OpenBSD: ssh_api.c,v 1.31 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2012 Markus Friedl. All rights reserved.
|
||||
*
|
||||
@ -130,9 +130,7 @@ ssh_init(struct ssh **sshp, int is_server, struct kex_params *kex_params)
|
||||
#endif /* WITH_OPENSSL */
|
||||
ssh->kex->kex[KEX_C25519_SHA256] = kex_gen_server;
|
||||
ssh->kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_server;
|
||||
#ifdef WITH_MLKEM
|
||||
ssh->kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_server;
|
||||
#endif
|
||||
ssh->kex->load_host_public_key=&_ssh_host_public_key;
|
||||
ssh->kex->load_host_private_key=&_ssh_host_private_key;
|
||||
ssh->kex->sign=&_ssh_host_key_sign;
|
||||
@ -149,9 +147,7 @@ ssh_init(struct ssh **sshp, int is_server, struct kex_params *kex_params)
|
||||
#endif /* WITH_OPENSSL */
|
||||
ssh->kex->kex[KEX_C25519_SHA256] = kex_gen_client;
|
||||
ssh->kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_client;
|
||||
#ifdef WITH_MLKEM
|
||||
ssh->kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_client;
|
||||
#endif
|
||||
ssh->kex->verify_host_key =&_ssh_verify_host_key;
|
||||
}
|
||||
*sshp = ssh;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: sshconnect2.c,v 1.374 2024/09/02 12:13:56 djm Exp $ */
|
||||
/* $OpenBSD: sshconnect2.c,v 1.375 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* Copyright (c) 2000 Markus Friedl. All rights reserved.
|
||||
* Copyright (c) 2008 Damien Miller. All rights reserved.
|
||||
@ -267,9 +267,7 @@ ssh_kex2(struct ssh *ssh, char *host, struct sockaddr *hostaddr, u_short port,
|
||||
#endif
|
||||
ssh->kex->kex[KEX_C25519_SHA256] = kex_gen_client;
|
||||
ssh->kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_client;
|
||||
#ifdef WITH_MLKEM
|
||||
ssh->kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_client;
|
||||
#endif
|
||||
ssh->kex->verify_host_key=&verify_host_key_callback;
|
||||
|
||||
ssh_dispatch_run_fatal(ssh, DISPATCH_BLOCK, &ssh->kex->done);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: sshd-session.c,v 1.8 2024/09/02 12:18:35 djm Exp $ */
|
||||
/* $OpenBSD: sshd-session.c,v 1.9 2024/09/09 02:39:57 djm Exp $ */
|
||||
/*
|
||||
* SSH2 implementation:
|
||||
* Privilege Separation:
|
||||
@ -1334,9 +1334,7 @@ do_ssh2_kex(struct ssh *ssh)
|
||||
#endif
|
||||
kex->kex[KEX_C25519_SHA256] = kex_gen_server;
|
||||
kex->kex[KEX_KEM_SNTRUP761X25519_SHA512] = kex_gen_server;
|
||||
#ifdef WITH_MLKEM
|
||||
kex->kex[KEX_KEM_MLKEM768X25519_SHA256] = kex_gen_server;
|
||||
#endif
|
||||
kex->load_host_public_key=&get_hostkey_public_by_type;
|
||||
kex->load_host_private_key=&get_hostkey_private_by_type;
|
||||
kex->host_key_index=&get_hostkey_index;
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: rde.c,v 1.629 2024/08/28 13:21:39 claudio Exp $ */
|
||||
/* $OpenBSD: rde.c,v 1.630 2024/09/09 12:59:49 claudio Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
|
||||
@ -3199,7 +3199,7 @@ rde_dump_mrt_new(struct mrt *mrt, pid_t pid, int fd)
|
||||
return;
|
||||
}
|
||||
memcpy(&ctx->mrt, mrt, sizeof(struct mrt));
|
||||
TAILQ_INIT(&ctx->mrt.wbuf.bufs);
|
||||
msgbuf_init(&ctx->mrt.wbuf);
|
||||
ctx->mrt.wbuf.fd = fd;
|
||||
ctx->mrt.state = MRT_STATE_RUNNING;
|
||||
rid = rib_find(ctx->mrt.rib);
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* $OpenBSD: session.c,v 1.481 2024/08/20 11:59:39 claudio Exp $ */
|
||||
/* $OpenBSD: session.c,v 1.482 2024/09/09 12:59:49 claudio Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2003, 2004, 2005 Henning Brauer <henning@openbsd.org>
|
||||
@ -3159,13 +3159,13 @@ session_dispatch_imsg(struct imsgbuf *imsgbuf, int idx, u_int *listener_cnt)
|
||||
if (mrt == NULL)
|
||||
fatal("session_dispatch_imsg");
|
||||
memcpy(mrt, &xmrt, sizeof(struct mrt));
|
||||
TAILQ_INIT(&mrt->wbuf.bufs);
|
||||
msgbuf_init(&mrt->wbuf);
|
||||
LIST_INSERT_HEAD(&mrthead, mrt, entry);
|
||||
} else {
|
||||
/* old dump reopened */
|
||||
close(mrt->wbuf.fd);
|
||||
mrt->wbuf.fd = xmrt.wbuf.fd;
|
||||
}
|
||||
mrt->wbuf.fd = xmrt.wbuf.fd;
|
||||
break;
|
||||
case IMSG_MRT_CLOSE:
|
||||
if (idx != PFD_PIPE_MAIN)
|
||||
|
Loading…
Reference in New Issue
Block a user