sync with OpenBSD -current

This commit is contained in:
purplerain 2024-05-06 16:20:26 +00:00
parent f05839c6d3
commit a6677bfd36
Signed by: purplerain
GPG Key ID: F42C07F07E2E35B7
29 changed files with 703 additions and 123 deletions

View File

@ -1,4 +1,4 @@
/* $OpenBSD: lhash.c,v 1.22 2024/03/02 11:11:11 tb Exp $ */
/* $OpenBSD: lhash.c,v 1.24 2024/05/06 14:38:20 jsing Exp $ */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
@ -294,7 +294,9 @@ doall_util_fn(_LHASH *lh, int use_arg, LHASH_DOALL_FN_TYPE func,
/* Restore down load factor and trigger contraction. */
lh->down_load = down_load;
contract(lh);
if ((lh->num_nodes > MIN_NODES) &&
(lh->down_load >= (lh->num_items * LH_LOAD_MULT / lh->num_nodes)))
contract(lh);
}
void
@ -397,7 +399,8 @@ contract(_LHASH *lh)
}
}
static LHASH_NODE **getrn(_LHASH *lh, const void *data, unsigned long *rhash)
static LHASH_NODE **
getrn(_LHASH *lh, const void *data, unsigned long *rhash)
{
LHASH_NODE **ret, *n1;
unsigned long hash, nn;

View File

@ -1,4 +1,4 @@
# $OpenBSD: Makefile,v 1.57 2024/04/25 14:27:29 jsing Exp $
# $OpenBSD: Makefile,v 1.58 2024/05/06 14:37:26 jsing Exp $
SUBDIR += aead
SUBDIR += aes
@ -32,6 +32,7 @@ SUBDIR += hmac
SUBDIR += idea
SUBDIR += ige
SUBDIR += init
SUBDIR += lhash
SUBDIR += md
SUBDIR += objects
SUBDIR += pbkdf2

View File

@ -0,0 +1,12 @@
# $OpenBSD: Makefile,v 1.1 2024/05/06 14:31:25 jsing Exp $
PROG = lhash_test
DPADD+= ${LIBCRYPTO}
WARNINGS= Yes
LDFLAGS+= -lcrypto
CFLAGS+= -DLIBRESSL_INTERNAL
CFLAGS+= -Wall -Wundef -Werror
CFLAGS+= -I${.CURDIR}/../../../../lib/libcrypto
.include <bsd.regress.mk>

View File

@ -0,0 +1,59 @@
/* $OpenBSD: lhash_test.c,v 1.1 2024/05/06 14:31:25 jsing Exp $ */
/*
* Copyright (c) 2024 Joel Sing <jsing@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <openssl/lhash.h>
static void
test_doall_fn(void *arg1, void *arg2)
{
}
static int
test_lhash_doall(void)
{
_LHASH *lh;
int i;
int failed = 1;
if ((lh = lh_new(NULL, NULL)) == NULL)
goto failure;
/* Call doall multiple times while linked hash is empty. */
for (i = 0; i < 100; i++)
lh_doall_arg(lh, test_doall_fn, NULL);
lh_free(lh);
failed = 0;
failure:
return failed;
}
int
main(int argc, char **argv)
{
int failed = 0;
failed |= test_lhash_doall();
return failed;
}

View File

@ -1,4 +1,4 @@
.\" $OpenBSD: efi.4,v 1.2 2024/04/30 05:05:23 jmc Exp $
.\" $OpenBSD: efi.4,v 1.3 2024/05/05 19:13:13 kn Exp $
.\"
.\" Copyright (c) 2018 Mark Kettenis <kettenis@openbsd.org>
.\" Copyright (c) 2024 Klemens Nanni <kn@openbsd.org>
@ -15,7 +15,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd $Mdocdate: April 30 2024 $
.Dd $Mdocdate: May 5 2024 $
.Dt EFI 4
.Os
.Sh NAME
@ -35,6 +35,49 @@ firmware implementations.
It can read and write the Real Time Clock and provides userland applications
.Xr ioctl 2
access to the System Resource Table and to read and write variables.
.Sh IOCTLS
.Nm
supports the following
.Xr ioctl 2
commands, available through
.In dev/efi/efi.h
.In dev/efi/efiio.h :
.Bl -tag -width xxxx
.It Dv EFIIOC_GET_TABLE Fa "struct efi_get_table_ioc *"
.Bd -literal
struct efi_get_table_ioc {
void *buf; /* Pointer to userspace buffer */
struct uuid uuid; /* UUID to look up */
size_t table_len; /* Table size */
size_t buf_len; /* Size of the buffer */
};
.Ed
.Pp
Read a table.
.It Dv EFIIOC_VAR_GET Fa "struct efi_var_ioc *"
.Bd -literal
struct efi_var_ioc {
uint16_t *name; /* User pointer to name, in UCS2 chars */
size_t namesize; /* Number of *bytes* in the name
including terminator */
struct uuid vendor; /* Vendor's UUID for variable */
uint32_t attrib; /* Attributes */
void *data; /* User pointer to value */
size_t datasize; /* Number of *bytes* in the value */
};
.Ed
.Pp
Read a variable.
.It Dv EFIIOC_VAR_NEXT Fa "struct efi_var_ioc *"
Enumerate all variables.
The first command must use the empty name.
Further ones must denote the previous variable's name and UUID.
.It Dv EFIIOC_VAR_SET Fa "struct efi_var_ioc *"
Write a variable.
Delete it if
.Va datasize
is zero.
.El
.Sh FILES
.Bl -tag -width /dev/efi -compact
.It Pa /dev/efi

View File

@ -1,4 +1,4 @@
/* $OpenBSD: dwqe.c,v 1.20 2024/05/03 13:02:18 stsp Exp $ */
/* $OpenBSD: dwqe.c,v 1.21 2024/05/06 09:54:38 stsp Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
@ -93,6 +93,12 @@ void dwqe_dmamem_free(struct dwqe_softc *, struct dwqe_dmamem *);
struct mbuf *dwqe_alloc_mbuf(struct dwqe_softc *, bus_dmamap_t);
void dwqe_fill_rx_ring(struct dwqe_softc *);
int
dwqe_have_tx_csum_offload(struct dwqe_softc *sc)
{
return (sc->sc_hw_feature[0] & GMAC_MAC_HW_FEATURE0_TXCOESEL);
}
int
dwqe_attach(struct dwqe_softc *sc)
{
@ -121,6 +127,11 @@ dwqe_attach(struct dwqe_softc *sc)
bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
ifp->if_capabilities = IFCAP_VLAN_MTU;
if (dwqe_have_tx_csum_offload(sc)) {
ifp->if_capabilities |= (IFCAP_CSUM_IPv4 |
IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6);
}
sc->sc_mii.mii_ifp = ifp;
sc->sc_mii.mii_readreg = dwqe_mii_readreg;
@ -1077,6 +1088,25 @@ dwqe_iff(struct dwqe_softc *sc)
dwqe_write(sc, GMAC_MAC_PACKET_FILTER, reg);
}
void
dwqe_tx_csum(struct dwqe_softc *sc, struct mbuf *m, struct dwqe_desc *txd)
{
if (!dwqe_have_tx_csum_offload(sc))
return;
/* Checksum flags are valid only on first descriptor. */
if ((txd->sd_tdes3 & TDES3_FS) == 0)
return;
/* TSO and Tx checksum offloading are incompatible. */
if (txd->sd_tdes3 & TDES3_TSO_EN)
return;
if (m->m_pkthdr.csum_flags & (M_IPV4_CSUM_OUT |
M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
txd->sd_tdes3 |= TDES3_CSUM_IPHDR_PAYLOAD_PSEUDOHDR;
}
int
dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
{
@ -1107,8 +1137,10 @@ dwqe_encap(struct dwqe_softc *sc, struct mbuf *m, int *idx, int *used)
txd->sd_tdes1 = (uint32_t)(map->dm_segs[i].ds_addr >> 32);
txd->sd_tdes2 = map->dm_segs[i].ds_len;
txd->sd_tdes3 = m->m_pkthdr.len;
if (i == 0)
if (i == 0) {
txd->sd_tdes3 |= TDES3_FS;
dwqe_tx_csum(sc, m, txd);
}
if (i == (map->dm_nsegs - 1)) {
txd->sd_tdes2 |= TDES2_IC;
txd->sd_tdes3 |= TDES3_LS;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: dwqereg.h,v 1.8 2024/05/03 13:02:18 stsp Exp $ */
/* $OpenBSD: dwqereg.h,v 1.9 2024/05/06 09:54:38 stsp Exp $ */
/*
* Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
* Copyright (c) 2017, 2022 Patrick Wildt <patrick@blueri.se>
@ -230,14 +230,37 @@ struct dwqe_desc {
uint32_t sd_tdes3;
};
/* Tx bits */
/* Tx bits (read format; host to device) */
#define TDES2_HDR_LEN 0x000003ff /* if TSO is enabled */
#define TDES2_BUF1_LEN 0x00003fff /* if TSO is disabled */
#define TDES2_VLAN_TIR 0x0000c000
#define TDES2_NO_VLAN_TAGGING (0x0 << 14)
#define TDES2_VLAN_TAG_STRIP (0x1 << 14)
#define TDES2_VLAN_TAG_INSERT (0x2 << 14)
#define TDES2_VLAN_TAG_REPLACE (0x3 << 14)
#define TDES2_BUF2_LEN 0x3fff0000
#define TDES2_TX_TIMESTAMP_EN (1 << 30) /* if TSO is disabled */
#define TDES2_TSO_EXTMEM_DIS (1 << 30) /* if TSO is enabled */
#define TDES2_IC (1U << 31)
#define TDES3_ES (1 << 15)
#define TDES3_DE (1 << 23)
#define TDES3_TCP_PAYLOAD_LEN 0x0003ffff /* if TSO is enabled */
#define TDES3_FRAME_LEN 0x00007fff /* if TSO is disabled */
#define TDES3_CIC 0x00030000 /* if TSO is disabled */
#define TDES3_CSUM_DISABLE (0x0 << 16)
#define TDES3_CSUM_IPHDR (0x1 << 16)
#define TDES3_CSUM_IPHDR_PAYLOAD (0x2 << 16)
#define TDES3_CSUM_IPHDR_PAYLOAD_PSEUDOHDR (0x3 << 16)
#define TDES3_TSO_EN (1 << 18)
#define TDES3_LS (1 << 28)
#define TDES3_FS (1 << 29)
#define TDES3_OWN (1U << 31)
/* Tx bits (writeback format; device to host) */
#define TDES3_ES (1 << 15)
#define TDES3_DE (1 << 23)
/* Bit 28 is the LS bit, as in "read" format. */
/* Bit 29 is the FS bit, as in "read" format. */
/* Bit 31 is the OWN bit, as in "read" format. */
/* Rx bits (read format; host to device) */
#define RDES3_BUF1V (1 << 24)
#define RDES3_BUF2V (1 << 25)

View File

@ -1785,6 +1785,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);

View File

@ -819,7 +819,7 @@ retry:
p->bytes_moved += ctx.bytes_moved;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {

View File

@ -107,6 +107,10 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
stats.requested_visible_vram/1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats.requested_gtt/1024UL);
drm_printf(p, "drm-shared-vram:\t%llu KiB\n", stats.vram_shared/1024UL);
drm_printf(p, "drm-shared-gtt:\t%llu KiB\n", stats.gtt_shared/1024UL);
drm_printf(p, "drm-shared-cpu:\t%llu KiB\n", stats.cpu_shared/1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])
continue;

View File

@ -627,8 +627,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_res_cpu_visible(adev, bo->tbo.resource))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@ -1302,26 +1301,39 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
uint64_t size = amdgpu_bo_size(bo);
struct drm_gem_object *obj;
unsigned int domain;
bool shared;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
if (!res)
return;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
obj = &bo->tbo.base;
shared = drm_gem_object_is_shared_for_memory_stats(obj);
domain = amdgpu_mem_type_to_domain(res->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
stats->visible_vram += size;
if (shared)
stats->vram_shared += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
if (shared)
stats->gtt_shared += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
if (shared)
stats->cpu_shared += size;
break;
}
@ -1406,10 +1418,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
@ -1433,7 +1442,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo))
!amdgpu_res_cpu_visible(adev, bo->resource))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@ -1593,6 +1602,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
@ -1601,10 +1611,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo))
if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
placement = "VRAM VISIBLE";
else
placement = "VRAM";

View File

@ -139,12 +139,18 @@ struct amdgpu_bo_vm {
struct amdgpu_mem_stats {
/* current VRAM usage, includes visible VRAM */
uint64_t vram;
/* current shared VRAM usage, includes visible VRAM */
uint64_t vram_shared;
/* current visible VRAM usage */
uint64_t visible_vram;
/* current GTT usage */
uint64_t gtt;
/* current shared GTT usage */
uint64_t gtt_shared;
/* current system memory usage */
uint64_t cpu;
/* current shared system memory usage */
uint64_t cpu_shared;
/* sum of evicted buffers, includes visible VRAM */
uint64_t evicted_vram;
/* sum of evicted buffers due to CPU access */
@ -245,28 +251,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
* amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
*/
static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;
amdgpu_res_next(&cursor, cursor.size);
}
return false;
}
/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/

View File

@ -137,7 +137,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
amdgpu_res_cpu_visible(adev, bo->resource)) {
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
@ -408,40 +408,55 @@ error:
return r;
}
/**
* amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
* @adev: amdgpu device
* @res: the resource to check
*
* Returns: true if the full resource is CPU visible, false otherwise.
*/
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res)
{
struct amdgpu_res_cursor cursor;
if (!res)
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
res->mem_type == AMDGPU_PL_PREEMPT)
return true;
if (res->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(res, 0, res->size, &cursor);
while (cursor.remaining) {
if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size)
return false;
amdgpu_res_next(&cursor, cursor.size);
}
return true;
}
/*
* amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy
* amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
*
* Called by amdgpu_bo_move()
*/
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
static bool amdgpu_res_copyable(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
return true;
if (mem->mem_type != TTM_PL_VRAM)
if (!amdgpu_res_cpu_visible(adev, mem))
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
end = cursor.start + cursor.size;
while (cursor.remaining) {
amdgpu_res_next(&cursor, cursor.size);
/* ttm_resource_ioremap only supports contiguous memory */
if (mem->mem_type == TTM_PL_VRAM &&
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
return false;
if (!cursor.remaining)
break;
/* ttm_resource_ioremap only supports contiguous memory */
if (end != cursor.start)
return false;
end = cursor.start + cursor.size;
}
return end <= adev->gmc.visible_vram_size;
return true;
}
/*
@ -534,8 +549,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
/* Check that all memory is CPU accessible */
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}

View File

@ -140,6 +140,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,

View File

@ -365,7 +365,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
<< (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),

View File

@ -292,17 +292,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
if (ring->me > 1) {
amdgpu_asic_flush_hdp(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
}
/**

View File

@ -567,6 +567,19 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
/**
* drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
*
* This helper should only be used for fdinfo shared memory stats to determine
* if a GEM object is shared.
*
* @obj: obj in question
*/
static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
{
return (obj->handle_count > 1) || obj->dma_buf;
}
#ifdef CONFIG_LOCKDEP
/**
* drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.

View File

@ -81,7 +81,7 @@ struct ttm_pool {
bool use_dma32;
struct {
struct ttm_pool_type orders[MAX_ORDER + 1];
struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};

View File

@ -7,6 +7,7 @@
#include <linux/nodemask.h>
#define MAX_ORDER 11
#define NR_PAGE_ORDERS (MAX_ORDER + 1)
#define pfn_to_nid(x) 0
#endif

View File

@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (int j = 0; j <= MAX_ORDER; ++j) {
for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);

View File

@ -70,11 +70,11 @@ module_param(page_pool_size, ulong, 0644);
static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@ -424,17 +424,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
if (pool->use_dma_alloc)
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_write_combined[order];
return &global_write_combined[order];
case ttm_uncached:
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
if (pool->use_dma32)
return &global_dma32_uncached[order];
@ -710,11 +716,17 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
/* Initialize only pool types which are actually used */
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_init(pt, pool, i, j);
}
}
}
EXPORT_SYMBOL(ttm_pool_init);
@ -731,10 +743,16 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j <= MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
struct ttm_pool_type *pt;
pt = ttm_pool_select_type(pool, i, j);
if (pt != &pool->caching[i].orders[j])
continue;
ttm_pool_type_fini(pt);
}
}
/* We removed the pool types from the LRU, but we need to also make sure
@ -795,7 +813,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
unsigned int i;
seq_puts(m, "\t ");
for (i = 0; i <= MAX_ORDER; ++i)
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " ---%2u---", i);
seq_puts(m, "\n");
}
@ -806,7 +824,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i)
for (i = 0; i < NR_PAGE_ORDERS; ++i)
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
seq_puts(m, "\n");
}
@ -915,7 +933,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mtx_init(&shrinker_lock, IPL_NONE);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i <= MAX_ORDER; ++i) {
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@ -948,7 +966,7 @@ void ttm_pool_mgr_fini(void)
{
unsigned int i;
for (i = 0; i <= MAX_ORDER; ++i) {
for (i = 0; i < NR_PAGE_ORDERS; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: if_igc.c,v 1.21 2024/05/04 13:35:26 mbuhl Exp $ */
/* $OpenBSD: if_igc.c,v 1.22 2024/05/06 04:25:52 dlg Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@ -30,6 +30,7 @@
#include "bpfilter.h"
#include "vlan.h"
#include "kstat.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -41,6 +42,7 @@
#include <sys/device.h>
#include <sys/endian.h>
#include <sys/intrmap.h>
#include <sys/kstat.h>
#include <net/if.h>
#include <net/if_media.h>
@ -148,6 +150,10 @@ void igc_get_hw_control(struct igc_softc *);
void igc_release_hw_control(struct igc_softc *);
int igc_is_valid_ether_addr(uint8_t *);
#if NKSTAT > 0
void igc_kstat_attach(struct igc_softc *);
#endif
/*********************************************************************
* OpenBSD Device Interface Entry Points
*********************************************************************/
@ -282,6 +288,10 @@ igc_attach(struct device *parent, struct device *self, void *aux)
igc_get_hw_control(sc);
printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
#if NKSTAT > 0
igc_kstat_attach(sc);
#endif
return;
err_late:
@ -2482,3 +2492,328 @@ igc_is_valid_ether_addr(uint8_t *addr)
return 1;
}
#if NKSTAT > 0
/*
* the below are read to clear, so they need to be accumulated for
* userland to see counters. periodically fetch the counters from a
* timeout to avoid a 32 roll-over between kstat reads.
*/
enum igc_stat {
igc_stat_crcerrs,
igc_stat_algnerrc,
igc_stat_rxerrc,
igc_stat_mpc,
igc_stat_scc,
igc_stat_ecol,
igc_stat_mcc,
igc_stat_latecol,
igc_stat_colc,
igc_stat_rerc,
igc_stat_dc,
igc_stat_tncrs,
igc_stat_htdpmc,
igc_stat_rlec,
igc_stat_xonrxc,
igc_stat_xontxc,
igc_stat_xoffrxc,
igc_stat_xofftxc,
igc_stat_fcruc,
igc_stat_prc64,
igc_stat_prc127,
igc_stat_prc255,
igc_stat_prc511,
igc_stat_prc1023,
igc_stat_prc1522,
igc_stat_gprc,
igc_stat_bprc,
igc_stat_mprc,
igc_stat_gptc,
igc_stat_gorc,
igc_stat_gotc,
igc_stat_rnbc,
igc_stat_ruc,
igc_stat_rfc,
igc_stat_roc,
igc_stat_rjc,
igc_stat_mgtprc,
igc_stat_mgtpdc,
igc_stat_mgtptc,
igc_stat_tor,
igc_stat_tot,
igc_stat_tpr,
igc_stat_tpt,
igc_stat_ptc64,
igc_stat_ptc127,
igc_stat_ptc255,
igc_stat_ptc511,
igc_stat_ptc1023,
igc_stat_ptc1522,
igc_stat_mptc,
igc_stat_bptc,
igc_stat_tsctc,
igc_stat_iac,
igc_stat_rpthc,
igc_stat_tlpic,
igc_stat_rlpic,
igc_stat_hgptc,
igc_stat_rxdmtc,
igc_stat_hgorc,
igc_stat_hgotc,
igc_stat_lenerrs,
igc_stat_count
};
struct igc_counter {
const char *name;
enum kstat_kv_unit unit;
uint32_t reg;
};
static const struct igc_counter igc_counters[igc_stat_count] = {
[igc_stat_crcerrs] =
{ "crc errs", KSTAT_KV_U_NONE, IGC_CRCERRS },
[igc_stat_algnerrc] =
{ "alignment errs", KSTAT_KV_U_NONE, IGC_ALGNERRC },
[igc_stat_rxerrc] =
{ "rx errs", KSTAT_KV_U_NONE, IGC_RXERRC },
[igc_stat_mpc] =
{ "missed pkts", KSTAT_KV_U_NONE, IGC_MPC },
[igc_stat_scc] =
{ "single colls", KSTAT_KV_U_NONE, IGC_SCC },
[igc_stat_ecol] =
{ "excessive colls", KSTAT_KV_U_NONE, IGC_ECOL },
[igc_stat_mcc] =
{ "multiple colls", KSTAT_KV_U_NONE, IGC_MCC },
[igc_stat_latecol] =
{ "late colls", KSTAT_KV_U_NONE, IGC_LATECOL },
[igc_stat_colc] =
{ "collisions", KSTAT_KV_U_NONE, IGC_COLC },
[igc_stat_rerc] =
{ "recv errs", KSTAT_KV_U_NONE, IGC_RERC },
[igc_stat_dc] =
{ "defers", KSTAT_KV_U_NONE, IGC_DC },
[igc_stat_tncrs] =
{ "tx no crs", KSTAT_KV_U_NONE, IGC_TNCRS},
[igc_stat_htdpmc] =
{ "host tx discards", KSTAT_KV_U_NONE, IGC_HTDPMC },
[igc_stat_rlec] =
{ "recv len errs", KSTAT_KV_U_NONE, IGC_RLEC },
[igc_stat_xonrxc] =
{ "xon rx", KSTAT_KV_U_NONE, IGC_XONRXC },
[igc_stat_xontxc] =
{ "xon tx", KSTAT_KV_U_NONE, IGC_XONTXC },
[igc_stat_xoffrxc] =
{ "xoff rx", KSTAT_KV_U_NONE, IGC_XOFFRXC },
[igc_stat_xofftxc] =
{ "xoff tx", KSTAT_KV_U_NONE, IGC_XOFFTXC },
[igc_stat_fcruc] =
{ "fc rx unsupp", KSTAT_KV_U_NONE, IGC_FCRUC },
[igc_stat_prc64] =
{ "rx 64B", KSTAT_KV_U_PACKETS, IGC_PRC64 },
[igc_stat_prc127] =
{ "rx 65-127B", KSTAT_KV_U_PACKETS, IGC_PRC127 },
[igc_stat_prc255] =
{ "rx 128-255B", KSTAT_KV_U_PACKETS, IGC_PRC255 },
[igc_stat_prc511] =
{ "rx 256-511B", KSTAT_KV_U_PACKETS, IGC_PRC511 },
[igc_stat_prc1023] =
{ "rx 512-1023B", KSTAT_KV_U_PACKETS, IGC_PRC1023 },
[igc_stat_prc1522] =
{ "rx 1024-maxB", KSTAT_KV_U_PACKETS, IGC_PRC1522 },
[igc_stat_gprc] =
{ "rx good", KSTAT_KV_U_PACKETS, IGC_GPRC },
[igc_stat_bprc] =
{ "rx bcast", KSTAT_KV_U_PACKETS, IGC_BPRC },
[igc_stat_mprc] =
{ "rx mcast", KSTAT_KV_U_PACKETS, IGC_MPRC },
[igc_stat_gptc] =
{ "tx good", KSTAT_KV_U_PACKETS, IGC_GPTC },
[igc_stat_gorc] =
{ "rx good bytes", KSTAT_KV_U_BYTES, 0 },
[igc_stat_gotc] =
{ "tx good bytes", KSTAT_KV_U_BYTES, 0 },
[igc_stat_rnbc] =
{ "rx no bufs", KSTAT_KV_U_NONE, IGC_RNBC },
[igc_stat_ruc] =
{ "rx undersize", KSTAT_KV_U_NONE, IGC_RUC },
[igc_stat_rfc] =
{ "rx frags", KSTAT_KV_U_NONE, IGC_RFC },
[igc_stat_roc] =
{ "rx oversize", KSTAT_KV_U_NONE, IGC_ROC },
[igc_stat_rjc] =
{ "rx jabbers", KSTAT_KV_U_NONE, IGC_RJC },
[igc_stat_mgtprc] =
{ "rx mgmt", KSTAT_KV_U_PACKETS, IGC_MGTPRC },
[igc_stat_mgtpdc] =
{ "rx mgmt drops", KSTAT_KV_U_PACKETS, IGC_MGTPDC },
[igc_stat_mgtptc] =
{ "tx mgmt", KSTAT_KV_U_PACKETS, IGC_MGTPTC },
[igc_stat_tor] =
{ "rx total bytes", KSTAT_KV_U_BYTES, 0 },
[igc_stat_tot] =
{ "tx total bytes", KSTAT_KV_U_BYTES, 0 },
[igc_stat_tpr] =
{ "rx total", KSTAT_KV_U_PACKETS, IGC_TPR },
[igc_stat_tpt] =
{ "tx total", KSTAT_KV_U_PACKETS, IGC_TPT },
[igc_stat_ptc64] =
{ "tx 64B", KSTAT_KV_U_PACKETS, IGC_PTC64 },
[igc_stat_ptc127] =
{ "tx 65-127B", KSTAT_KV_U_PACKETS, IGC_PTC127 },
[igc_stat_ptc255] =
{ "tx 128-255B", KSTAT_KV_U_PACKETS, IGC_PTC255 },
[igc_stat_ptc511] =
{ "tx 256-511B", KSTAT_KV_U_PACKETS, IGC_PTC511 },
[igc_stat_ptc1023] =
{ "tx 512-1023B", KSTAT_KV_U_PACKETS, IGC_PTC1023 },
[igc_stat_ptc1522] =
{ "tx 1024-maxB", KSTAT_KV_U_PACKETS, IGC_PTC1522 },
[igc_stat_mptc] =
{ "tx mcast", KSTAT_KV_U_PACKETS, IGC_MPTC },
[igc_stat_bptc] =
{ "tx bcast", KSTAT_KV_U_PACKETS, IGC_BPTC },
[igc_stat_tsctc] =
{ "tx tso ctx", KSTAT_KV_U_NONE, IGC_TSCTC },
[igc_stat_iac] =
{ "interrupts", KSTAT_KV_U_NONE, IGC_IAC },
[igc_stat_rpthc] =
{ "rx to host", KSTAT_KV_U_PACKETS, IGC_RPTHC },
[igc_stat_tlpic] =
{ "eee tx lpi", KSTAT_KV_U_NONE, IGC_TLPIC },
[igc_stat_rlpic] =
{ "eee rx lpi", KSTAT_KV_U_NONE, IGC_RLPIC },
[igc_stat_hgptc] =
{ "host rx", KSTAT_KV_U_PACKETS, IGC_HGPTC },
[igc_stat_rxdmtc] =
{ "rxd min thresh", KSTAT_KV_U_NONE, IGC_RXDMTC },
[igc_stat_hgorc] =
{ "host good rx", KSTAT_KV_U_BYTES, 0 },
[igc_stat_hgotc] =
{ "host good tx", KSTAT_KV_U_BYTES, 0 },
[igc_stat_lenerrs] =
{ "len errs", KSTAT_KV_U_NONE, IGC_LENERRS },
};
static void
igc_stat_read(struct igc_softc *sc)
{
struct igc_hw *hw = &sc->hw;
struct kstat *ks = sc->ks;
struct kstat_kv *kvs = ks->ks_data;
uint32_t hi, lo;
unsigned int i;
for (i = 0; i < nitems(igc_counters); i++) {
const struct igc_counter *c = &igc_counters[i];
if (c->reg == 0)
continue;
kstat_kv_u64(&kvs[i]) += IGC_READ_REG(hw, c->reg);
}
lo = IGC_READ_REG(hw, IGC_GORCL);
hi = IGC_READ_REG(hw, IGC_GORCH);
kstat_kv_u64(&kvs[igc_stat_gorc]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
lo = IGC_READ_REG(hw, IGC_GOTCL);
hi = IGC_READ_REG(hw, IGC_GOTCH);
kstat_kv_u64(&kvs[igc_stat_gotc]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
lo = IGC_READ_REG(hw, IGC_TORL);
hi = IGC_READ_REG(hw, IGC_TORH);
kstat_kv_u64(&kvs[igc_stat_tor]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
lo = IGC_READ_REG(hw, IGC_TOTL);
hi = IGC_READ_REG(hw, IGC_TOTH);
kstat_kv_u64(&kvs[igc_stat_tot]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
lo = IGC_READ_REG(hw, IGC_HGORCL);
hi = IGC_READ_REG(hw, IGC_HGORCH);
kstat_kv_u64(&kvs[igc_stat_hgorc]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
lo = IGC_READ_REG(hw, IGC_HGOTCL);
hi = IGC_READ_REG(hw, IGC_HGOTCH);
kstat_kv_u64(&kvs[igc_stat_hgotc]) +=
((uint64_t)hi << 32) | ((uint64_t)lo << 0);
}
static void
igc_kstat_tick(void *arg)
{
struct igc_softc *sc = arg;
if (mtx_enter_try(&sc->ks_mtx)) {
igc_stat_read(sc);
mtx_leave(&sc->ks_mtx);
}
timeout_add_sec(&sc->ks_tmo, 4);
}
static int
igc_kstat_read(struct kstat *ks)
{
struct igc_softc *sc = ks->ks_softc;
igc_stat_read(sc);
nanouptime(&ks->ks_updated);
return (0);
}
void
igc_kstat_attach(struct igc_softc *sc)
{
struct kstat *ks;
struct kstat_kv *kvs;
size_t len;
unsigned int i;
mtx_init(&sc->ks_mtx, IPL_SOFTCLOCK);
timeout_set(&sc->ks_tmo, igc_kstat_tick, sc);
kvs = mallocarray(sizeof(*kvs), nitems(igc_counters), M_DEVBUF,
M_WAITOK|M_ZERO|M_CANFAIL);
if (kvs == NULL) {
printf("%s: unable to allocate igc kstats\n", DEVNAME(sc));
return;
}
len = sizeof(*kvs) * nitems(igc_counters);
ks = kstat_create(DEVNAME(sc), 0, "igc-stats", 0, KSTAT_T_KV, 0);
if (ks == NULL) {
printf("%s: unable to create igc kstats\n", DEVNAME(sc));
free(kvs, M_DEVBUF, len);
return;
}
for (i = 0; i < nitems(igc_counters); i++) {
const struct igc_counter *c = &igc_counters[i];
kstat_kv_unit_init(&kvs[i], c->name,
KSTAT_KV_T_COUNTER64, c->unit);
}
ks->ks_softc = sc;
ks->ks_data = kvs;
ks->ks_datalen = len;
ks->ks_read = igc_kstat_read;
kstat_set_mutex(ks, &sc->ks_mtx);
kstat_install(ks);
sc->ks = ks;
igc_kstat_tick(sc); /* let's gooo */
}
#endif /* NKSTAT > 0 */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: if_igc.h,v 1.2 2022/01/09 05:42:50 jsg Exp $ */
/* $OpenBSD: if_igc.h,v 1.3 2024/05/06 04:25:52 dlg Exp $ */
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@ -199,6 +199,7 @@
/* Forward declaration. */
struct igc_hw;
struct kstat;
struct igc_osdep {
bus_dma_tag_t os_dmat;
@ -320,6 +321,11 @@ struct igc_softc {
/* Multicast array memory */
uint8_t *mta;
/* Counters */
struct mutex ks_mtx;
struct timeout ks_tmo;
struct kstat *ks;
};
#define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)

View File

@ -1,4 +1,4 @@
/* $OpenBSD: igc_regs.h,v 1.2 2023/08/15 08:27:30 miod Exp $ */
/* $OpenBSD: igc_regs.h,v 1.3 2024/05/06 04:25:52 dlg Exp $ */
/*-
* Copyright 2021 Intel Corp
* Copyright 2021 Rubicon Communications, LLC (Netgate)
@ -164,6 +164,7 @@
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define IGC_RXERRC 0x04004 /* Receive Error Count - R/clr */
#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
@ -218,7 +219,14 @@
#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_VFGPRC 0x00F10
#define IGC_VFGORC 0x00F18
@ -229,11 +237,7 @@
#define IGC_VFGPTLBC 0x00F44
#define IGC_VFGORLBC 0x00F48
#define IGC_VFGPRLBC 0x00F40
#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_PCS_ANADV 0x04218 /* AN advertisement - RW */
#define IGC_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */

View File

@ -1,4 +1,4 @@
/* $OpenBSD: if_urtwn.c,v 1.108 2023/06/12 11:27:30 jsg Exp $ */
/* $OpenBSD: if_urtwn.c,v 1.109 2024/05/06 05:02:25 jsg Exp $ */
/*-
* Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
@ -344,6 +344,7 @@ static const struct urtwn_type {
/* URTWN_RTL8192EU */
URTWN_DEV_8192EU(DLINK, DWA131E1),
URTWN_DEV_8192EU(REALTEK, RTL8192EU),
URTWN_DEV_8192EU(REALTEK, RTL8192EU_2),
URTWN_DEV_8192EU(TPLINK, RTL8192EU),
URTWN_DEV_8192EU(TPLINK, RTL8192EU_2),
URTWN_DEV_8192EU(TPLINK, RTL8192EU_3)

View File

@ -1,4 +1,4 @@
$OpenBSD: usbdevs,v 1.761 2024/04/12 00:43:32 kevlo Exp $
$OpenBSD: usbdevs,v 1.762 2024/05/06 05:00:19 jsg Exp $
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
/*
@ -3808,6 +3808,7 @@ product REALTEK RTL8187 0x8187 RTL8187
product REALTEK RTL8187B_0 0x8189 RTL8187B
product REALTEK RTL8188CUS 0x818a RTL8188CUS
product REALTEK RTL8192EU 0x818b RTL8192EU
product REALTEK RTL8192EU_2 0x818c RTL8192EU
product REALTEK RTL8188CU_3 0x8191 RTL8188CU
product REALTEK RTL8192U 0x8192 RTL8192U
product REALTEK RTL8187B_1 0x8197 RTL8187B

View File

@ -1,10 +1,10 @@
/* $OpenBSD: usbdevs.h,v 1.773 2024/04/12 00:44:07 kevlo Exp $ */
/* $OpenBSD: usbdevs.h,v 1.774 2024/05/06 05:00:52 jsg Exp $ */
/*
* THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: usbdevs,v 1.761 2024/04/12 00:43:32 kevlo Exp
* OpenBSD: usbdevs,v 1.762 2024/05/06 05:00:19 jsg Exp
*/
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
@ -3815,6 +3815,7 @@
#define USB_PRODUCT_REALTEK_RTL8187B_0 0x8189 /* RTL8187B */
#define USB_PRODUCT_REALTEK_RTL8188CUS 0x818a /* RTL8188CUS */
#define USB_PRODUCT_REALTEK_RTL8192EU 0x818b /* RTL8192EU */
#define USB_PRODUCT_REALTEK_RTL8192EU_2 0x818c /* RTL8192EU */
#define USB_PRODUCT_REALTEK_RTL8188CU_3 0x8191 /* RTL8188CU */
#define USB_PRODUCT_REALTEK_RTL8192U 0x8192 /* RTL8192U */
#define USB_PRODUCT_REALTEK_RTL8187B_1 0x8197 /* RTL8187B */

View File

@ -1,10 +1,10 @@
/* $OpenBSD: usbdevs_data.h,v 1.767 2024/04/12 00:44:07 kevlo Exp $ */
/* $OpenBSD: usbdevs_data.h,v 1.768 2024/05/06 05:00:52 jsg Exp $ */
/*
* THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
*
* generated from:
* OpenBSD: usbdevs,v 1.761 2024/04/12 00:43:32 kevlo Exp
* OpenBSD: usbdevs,v 1.762 2024/05/06 05:00:19 jsg Exp
*/
/* $NetBSD: usbdevs,v 1.322 2003/05/10 17:47:14 hamajima Exp $ */
@ -9517,6 +9517,10 @@ const struct usb_known_product usb_known_products[] = {
USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8192EU,
"RTL8192EU",
},
{
USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8192EU_2,
"RTL8192EU",
},
{
USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8188CU_3,
"RTL8188CU",

View File

@ -1,4 +1,4 @@
/* $OpenBSD: dev.c,v 1.112 2024/04/22 11:01:02 ratchov Exp $ */
/* $OpenBSD: dev.c,v 1.113 2024/05/06 05:37:26 ratchov Exp $ */
/*
* Copyright (c) 2008-2012 Alexandre Ratchov <alex@caoua.org>
*
@ -2450,8 +2450,8 @@ ctl_setval(struct ctl *c, int val)
c->curval = val;
return 1;
case CTL_OPT_DEV:
c->u.opt_dev.opt->alt_first = c->u.opt_dev.dev;
opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev);
if (opt_setdev(c->u.opt_dev.opt, c->u.opt_dev.dev))
c->u.opt_dev.opt->alt_first = c->u.opt_dev.dev;
return 1;
default:
if (log_level >= 2) {