sync with OpenBSD -current

This commit is contained in:
purplerain 2024-01-31 03:34:50 +00:00
parent 4b5c843641
commit fe0bbab526
Signed by: purplerain
GPG Key ID: F42C07F07E2E35B7
22 changed files with 1045 additions and 594 deletions

View File

@ -1,4 +1,4 @@
# $OpenBSD: apnic.constraints,v 1.4 2023/12/26 13:36:18 job Exp $
# $OpenBSD: apnic.constraints,v 1.5 2024/01/30 03:40:01 job Exp $
# From https://www.iana.org/assignments/ipv6-unicast-address-assignments
allow 2001:200::/23
@ -13,7 +13,15 @@ allow 2400::/12
# IX Assignments
allow 2001:7fa::/32
# AFRINIC Internet Number Resources cannot be transferred
# LACNIC ASNs cannot be transferred to APNIC
# From https://www.iana.org/assignments/as-numbers/as-numbers.xhtml
deny 27648 - 28671
deny 52224 - 53247
deny 61440 - 61951
deny 64099 - 64197
deny 262144 - 273820
# AFRINIC IPv4 resources cannot be transferred to APNIC
# From https://www.iana.org/assignments/ipv4-address-space/
deny 41.0.0.0/8
deny 102.0.0.0/8
@ -58,6 +66,8 @@ deny 196.32.160.0 - 196.39.255.255
deny 196.40.96.0 - 196.41.255.255
deny 196.42.64.0 - 196.216.0.255
deny 196.216.2.0 - 197.255.255.255
# AFRINIC ASNs cannot be transferred to APNIC
# From https://www.iana.org/assignments/as-numbers/
deny 36864 - 37887
deny 327680 - 328703
@ -87,6 +97,6 @@ deny 65552 - 131071 # IANA Reserved
deny 4200000000 - 4294967294 # RFC 6996
deny 4294967295 # RFC 7300
# Allow the complement of what is denied
# APNIC supports IPv4 and ASN transfers: allow the complement of what is denied
allow 0.0.0.0/0
allow 1 - 4199999999

View File

@ -1,4 +1,4 @@
# $OpenBSD: arin.constraints,v 1.3 2023/12/26 13:36:18 job Exp $
# $OpenBSD: arin.constraints,v 1.4 2024/01/30 03:40:01 job Exp $
# From https://www.iana.org/assignments/ipv6-unicast-address-assignments
allow 2001:400::/23
@ -9,7 +9,15 @@ allow 2610::/23
allow 2620::/23
allow 2630::/12
# AFRINIC Internet Number Resources cannot be transferred
# LACNIC ASNs cannot be transferred to ARIN
# From https://www.iana.org/assignments/as-numbers/as-numbers.xhtml
deny 27648 - 28671
deny 52224 - 53247
deny 61440 - 61951
deny 64099 - 64197
deny 262144 - 273820
# AFRINIC IPv4 resources cannot be transferred to ARIN
# From https://www.iana.org/assignments/ipv4-address-space/
deny 41.0.0.0/8
deny 102.0.0.0/8
@ -54,6 +62,8 @@ deny 196.32.160.0 - 196.39.255.255
deny 196.40.96.0 - 196.41.255.255
deny 196.42.64.0 - 196.216.0.255
deny 196.216.2.0 - 197.255.255.255
# AFRINIC ASNs cannot be transferred to ARIN
# From https://www.iana.org/assignments/as-numbers/
deny 36864 - 37887
deny 327680 - 328703
@ -83,6 +93,6 @@ deny 65552 - 131071 # IANA Reserved
deny 4200000000 - 4294967294 # RFC 6996
deny 4294967295 # RFC 7300
# Allow the complement of what is denied
# ARIN supports IPv4 and ASN transfers: allow the complement of what is denied
allow 0.0.0.0/0
allow 1 - 4199999999

View File

@ -1,9 +1,16 @@
# $OpenBSD: lacnic.constraints,v 1.3 2023/12/26 13:36:18 job Exp $
# $OpenBSD: lacnic.constraints,v 1.4 2024/01/30 03:40:01 job Exp $
# From https://www.iana.org/assignments/ipv6-unicast-address-assignments
allow 2001:1200::/23
allow 2800::/12
# From https://www.iana.org/assignments/as-numbers/
allow 27648 - 28671
allow 52224 - 53247
allow 61440 - 61951
allow 64099 - 64197
allow 262144 - 273820
# AFRINIC Internet Number Resources cannot be transferred
# From https://www.iana.org/assignments/ipv4-address-space/
deny 41.0.0.0/8
@ -49,10 +56,6 @@ deny 196.32.160.0 - 196.39.255.255
deny 196.40.96.0 - 196.41.255.255
deny 196.42.64.0 - 196.216.0.255
deny 196.216.2.0 - 197.255.255.255
# From https://www.iana.org/assignments/as-numbers/
deny 36864 - 37887
deny 327680 - 328703
deny 328704 - 329727
# Private use IPv4 & IPv6 addresses and ASNs
deny 0.0.0.0/8 # RFC 1122 Local Identification
@ -69,15 +72,6 @@ deny 198.51.100.0/24 # RFC 5737 TEST-NET-2
deny 203.0.113.0/24 # RFC 5737 TEST-NET-3
deny 224.0.0.0/4 # Multicast
deny 240.0.0.0/4 # Reserved
deny 23456 # RFC 4893 AS_TRANS
deny 64496 - 64511 # RFC 5398
deny 64512 - 65534 # RFC 6996
deny 65535 # RFC 7300
deny 65536 - 65551 # RFC 5398
deny 65552 - 131071 # IANA Reserved
deny 4200000000 - 4294967294 # RFC 6996
deny 4294967295 # RFC 7300
# Allow the complement of what is denied
# LACNIC supports only IPv4 transfers: allow the complement of what is denied
allow 0.0.0.0/0
allow 1 - 4199999999

View File

@ -1,4 +1,4 @@
# $OpenBSD: ripe.constraints,v 1.3 2023/12/26 13:36:18 job Exp $
# $OpenBSD: ripe.constraints,v 1.4 2024/01/30 03:40:01 job Exp $
# From https://www.iana.org/assignments/ipv6-unicast-address-assignments
allow 2001:600::/23
@ -16,7 +16,15 @@ allow 2003::/18
allow 2a00::/12
allow 2a10::/12
# AFRINIC Internet Number Resources cannot be transferred
# LACNIC ASNs cannot be transferred to RIPE NCC
# From https://www.iana.org/assignments/as-numbers/
deny 27648 - 28671
deny 52224 - 53247
deny 61440 - 61951
deny 64099 - 64197
deny 262144 - 273820
# AFRINIC IPv4 resources cannot be transferred to RIPE NCC
# From https://www.iana.org/assignments/ipv4-address-space/
deny 41.0.0.0/8
deny 102.0.0.0/8
@ -61,6 +69,8 @@ deny 196.32.160.0 - 196.39.255.255
deny 196.40.96.0 - 196.41.255.255
deny 196.42.64.0 - 196.216.0.255
deny 196.216.2.0 - 197.255.255.255
# AFRINIC ASNs cannot be transferred to RIPE NCC
# From https://www.iana.org/assignments/as-numbers/
deny 36864 - 37887
deny 327680 - 328703
@ -90,6 +100,6 @@ deny 65552 - 131071 # IANA Reserved
deny 4200000000 - 4294967294 # RFC 6996
deny 4294967295 # RFC 7300
# Allow the complement of what is denied
# RIPE NCC supports IPv4 and ASN transfers: allow the complement of what is denied
allow 0.0.0.0/0
allow 1 - 4199999999

View File

@ -1,4 +1,4 @@
/* $OpenBSD: cmac.c,v 1.21 2024/01/29 06:05:50 tb Exp $ */
/* $OpenBSD: cmac.c,v 1.22 2024/01/30 17:43:39 tb Exp $ */
/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
* project.
*/
@ -131,8 +131,7 @@ LCRYPTO_ALIAS(CMAC_CTX_new);
void
CMAC_CTX_cleanup(CMAC_CTX *ctx)
{
if (ctx->cipher_ctx != NULL)
(void)EVP_CIPHER_CTX_reset(ctx->cipher_ctx);
(void)EVP_CIPHER_CTX_reset(ctx->cipher_ctx);
explicit_bzero(ctx->tbl, EVP_MAX_BLOCK_LENGTH);
explicit_bzero(ctx->k1, EVP_MAX_BLOCK_LENGTH);
explicit_bzero(ctx->k2, EVP_MAX_BLOCK_LENGTH);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: evp_cipher.c,v 1.16 2024/01/07 15:21:04 tb Exp $ */
/* $OpenBSD: evp_cipher.c,v 1.17 2024/01/30 17:41:01 tb Exp $ */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
@ -627,6 +627,9 @@ EVP_CIPHER_CTX_reset(EVP_CIPHER_CTX *ctx)
int
EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *ctx)
{
if (ctx == NULL)
return 1;
if (ctx->cipher != NULL) {
/* XXX - Avoid leaks, so ignore return value of cleanup()... */
if (ctx->cipher->cleanup != NULL)

View File

@ -1,4 +1,4 @@
/* $OpenBSD: evp_digest.c,v 1.7 2023/12/29 07:22:47 tb Exp $ */
/* $OpenBSD: evp_digest.c,v 1.8 2024/01/30 17:41:01 tb Exp $ */
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
@ -258,10 +258,12 @@ EVP_MD_CTX_reset(EVP_MD_CTX *ctx)
return EVP_MD_CTX_cleanup(ctx);
}
/* This call frees resources associated with the context */
int
EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx)
{
if (ctx == NULL)
return 1;
/*
* Don't assume ctx->md_data was cleaned in EVP_Digest_Final,
* because sometimes only copies of the context are ever finalised.

View File

@ -1,4 +1,4 @@
/* $OpenBSD: tls13_legacy.c,v 1.43 2024/01/27 14:34:28 jsing Exp $ */
/* $OpenBSD: tls13_legacy.c,v 1.44 2024/01/30 14:50:50 jsing Exp $ */
/*
* Copyright (c) 2018, 2019 Joel Sing <jsing@openbsd.org>
*
@ -501,6 +501,7 @@ tls13_legacy_shutdown(SSL *ssl)
return -1;
if (ret != TLS13_IO_SUCCESS)
return tls13_legacy_return_code(ssl, ret);
goto done;
}
ret = tls13_record_layer_send_pending(ctx->rl);
@ -524,6 +525,7 @@ tls13_legacy_shutdown(SSL *ssl)
}
}
done:
if (ssl->shutdown == (SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN))
return 1;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: shutdowntest.c,v 1.2 2024/01/27 14:35:13 jsing Exp $ */
/* $OpenBSD: shutdowntest.c,v 1.3 2024/01/30 14:46:46 jsing Exp $ */
/*
* Copyright (c) 2020, 2021, 2024 Joel Sing <jsing@openbsd.org>
*
@ -360,7 +360,7 @@ static const struct shutdown_test shutdown_tests[] = {
#define N_TLS_TESTS (sizeof(shutdown_tests) / sizeof(*shutdown_tests))
static int
shutdowntest(uint16_t ssl_version, const char *ssl_version_name,
shutdown_test(uint16_t ssl_version, const char *ssl_version_name,
const struct shutdown_test *st)
{
BIO *client_wbio = NULL, *server_wbio = NULL;
@ -479,6 +479,135 @@ shutdowntest(uint16_t ssl_version, const char *ssl_version_name,
return failed;
}
static int
shutdown_sequence_test(uint16_t ssl_version, const char *ssl_version_name)
{
BIO *client_wbio = NULL, *server_wbio = NULL;
SSL *client = NULL, *server = NULL;
int shutdown, ret;
int failed = 1;
fprintf(stderr, "\n== Testing %s, shutdown sequence... ==\n",
ssl_version_name);
if ((client_wbio = BIO_new(BIO_s_mem())) == NULL)
goto failure;
if (BIO_set_mem_eof_return(client_wbio, -1) <= 0)
goto failure;
if ((server_wbio = BIO_new(BIO_s_mem())) == NULL)
goto failure;
if (BIO_set_mem_eof_return(server_wbio, -1) <= 0)
goto failure;
if ((client = tls_client(server_wbio, client_wbio)) == NULL)
goto failure;
if (!SSL_set_min_proto_version(client, ssl_version))
goto failure;
if (!SSL_set_max_proto_version(client, ssl_version))
goto failure;
if ((server = tls_server(client_wbio, server_wbio)) == NULL)
goto failure;
if (!SSL_set_min_proto_version(server, ssl_version))
goto failure;
if (!SSL_set_max_proto_version(server, ssl_version))
goto failure;
if (!do_client_server_loop(client, do_connect, server, do_accept)) {
fprintf(stderr, "FAIL: client and server handshake failed\n");
goto failure;
}
if (!do_client_server_loop(client, do_write, server, do_read)) {
fprintf(stderr, "FAIL: client write and server read I/O failed\n");
goto failure;
}
if (!do_client_server_loop(client, do_read, server, do_write)) {
fprintf(stderr, "FAIL: client read and server write I/O failed\n");
goto failure;
}
/*
* Shutdown in lock step and check return value and shutdown flags.
*
* It is not documented, however some software relies on SSL_shutdown()
* to only send a close-notify on the first call, then indicate that a
* close-notify was received on a second (or later) call.
*/
if ((shutdown = SSL_get_shutdown(client)) != 0) {
fprintf(stderr, "FAIL: client shutdown flags = %x, want %x\n",
shutdown, 0);
goto failure;
}
if ((shutdown = SSL_get_shutdown(server)) != 0) {
fprintf(stderr, "FAIL: server shutdown flags = %x, want %x\n",
shutdown, 0);
goto failure;
}
if ((ret = SSL_shutdown(client)) != 0) {
fprintf(stderr, "FAIL: client SSL_shutdown() = %d, want %d\n",
ret, 0);
goto failure;
}
if ((shutdown = SSL_get_shutdown(client)) != SSL_SENT_SHUTDOWN) {
fprintf(stderr, "FAIL: client shutdown flags = %x, want %x\n",
shutdown, SSL_SENT_SHUTDOWN);
goto failure;
}
if ((ret = SSL_shutdown(server)) != 0) {
fprintf(stderr, "FAIL: server SSL_shutdown() = %d, want %d\n",
ret, 0);
goto failure;
}
if ((shutdown = SSL_get_shutdown(server)) != SSL_SENT_SHUTDOWN) {
fprintf(stderr, "FAIL: server shutdown flags = %x, want %x\n",
shutdown, SSL_SENT_SHUTDOWN);
goto failure;
}
if ((ret = SSL_shutdown(client)) != 1) {
fprintf(stderr, "FAIL: client SSL_shutdown() = %d, want %d\n",
ret, 0);
goto failure;
}
if ((shutdown = SSL_get_shutdown(client)) !=
(SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN)) {
fprintf(stderr, "FAIL: client shutdown flags = %x, want %x\n",
shutdown, SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN);
goto failure;
}
if ((ret = SSL_shutdown(server)) != 1) {
fprintf(stderr, "FAIL: server SSL_shutdown() = %d, want %d\n",
ret, 0);
goto failure;
}
if ((shutdown = SSL_get_shutdown(server)) !=
(SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN)) {
fprintf(stderr, "FAIL: server shutdown flags = %x, want %x\n",
shutdown, SSL_SENT_SHUTDOWN | SSL_RECEIVED_SHUTDOWN);
goto failure;
}
fprintf(stderr, "INFO: Done!\n");
failed = 0;
failure:
BIO_free(client_wbio);
BIO_free(server_wbio);
SSL_free(client);
SSL_free(server);
return failed;
}
struct ssl_version {
uint16_t version;
const char *name;
@ -517,9 +646,10 @@ main(int argc, char **argv)
for (i = 0; i < N_SSL_VERSIONS; i++) {
sv = &ssl_versions[i];
for (j = 0; j < N_TLS_TESTS; j++) {
failed |= shutdowntest(sv->version, sv->name,
failed |= shutdown_test(sv->version, sv->name,
&shutdown_tests[j]);
}
failed |= shutdown_sequence_test(sv->version, sv->name);
}
return failed;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: qwx.c,v 1.10 2024/01/29 16:06:45 stsp Exp $ */
/* $OpenBSD: qwx.c,v 1.13 2024/01/30 15:33:32 stsp Exp $ */
/*
* Copyright 2023 Stefan Sperling <stsp@openbsd.org>
@ -651,6 +651,49 @@ void qwx_init_wmi_config_qca6390(struct qwx_softc *sc,
config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
void
qwx_hw_ipq8074_reo_setup(struct qwx_softc *sc)
{
uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
uint32_t val;
/* Each hash entry uses three bits to map to a particular ring. */
uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 3 |
HAL_HASH_ROUTING_RING_SW3 << 6 |
HAL_HASH_ROUTING_RING_SW4 << 9 |
HAL_HASH_ROUTING_RING_SW1 << 12 |
HAL_HASH_ROUTING_RING_SW2 << 15 |
HAL_HASH_ROUTING_RING_SW3 << 18 |
HAL_HASH_ROUTING_RING_SW4 << 21;
val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP, ring_hash_map));
}
void qwx_init_wmi_config_ipq8074(struct qwx_softc *sc,
struct target_resource_config *config)
{
@ -710,6 +753,90 @@ void qwx_init_wmi_config_ipq8074(struct qwx_softc *sc,
config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
}
void
qwx_hw_wcn6855_reo_setup(struct qwx_softc *sc)
{
uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
uint32_t val;
/* Each hash entry uses four bits to map to a particular ring. */
uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
val = sc->ops.read32(sc, reo_base + HAL_REO1_MISC_CTL(sc));
val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING,
HAL_SRNG_RING_ID_REO2SW1);
sc->ops.write32(sc, reo_base + HAL_REO1_MISC_CTL(sc), val);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
void
qwx_hw_ipq5018_reo_setup(struct qwx_softc *sc)
{
uint32_t reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
uint32_t val;
/* Each hash entry uses three bits to map to a particular ring. */
uint32_t ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
HAL_HASH_ROUTING_RING_SW2 << 4 |
HAL_HASH_ROUTING_RING_SW3 << 8 |
HAL_HASH_ROUTING_RING_SW4 << 12 |
HAL_HASH_ROUTING_RING_SW1 << 16 |
HAL_HASH_ROUTING_RING_SW2 << 20 |
HAL_HASH_ROUTING_RING_SW3 << 24 |
HAL_HASH_ROUTING_RING_SW4 << 28;
val = sc->ops.read32(sc, reo_base + HAL_REO1_GEN_ENABLE);
val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
HAL_SRNG_RING_ID_REO2SW1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
sc->ops.write32(sc, reo_base + HAL_REO1_GEN_ENABLE, val);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_0(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_1(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_2(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_AGING_THRESH_IX_3(sc),
HAL_DEFAULT_REO_TIMEOUT_USEC);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
ring_hash_map);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
ring_hash_map);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
ring_hash_map);
sc->ops.write32(sc, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
ring_hash_map);
}
int
qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, int mac_id)
{
@ -769,7 +896,9 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
#endif
.reo_setup = qwx_hw_ipq8074_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
@ -813,7 +942,9 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
#endif
.reo_setup = qwx_hw_ipq8074_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
@ -857,7 +988,9 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
#endif
.reo_setup = qwx_hw_ipq8074_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
@ -901,7 +1034,9 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
#endif
.reo_setup = qwx_hw_ipq8074_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
@ -945,7 +1080,9 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
#endif
.reo_setup = qwx_hw_wcn6855_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
@ -989,7 +1126,9 @@ const struct ath11k_hw_ops wcn6750_ops = {
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
#endif
.reo_setup = qwx_hw_wcn6855_reo_setup,
#ifdef notyet
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
@ -9145,12 +9284,11 @@ qwx_dp_srng_common_setup(struct qwx_softc *sc)
sc->sc_dev.dv_xname, ret);
goto err;
}
#ifdef notyet
/* When hash based routing of rx packet is enabled, 32 entries to map
* the hash values to the ring will be configured.
*/
sc->hw_params.hw_ops->reo_setup(sc);
#endif
return 0;
err:
@ -16249,9 +16387,9 @@ qwx_core_pdev_create(struct qwx_softc *sc)
ath11k_err(ab, "failed to init spectral %d\n", ret);
goto err_thermal_unregister;
}
#endif
return 0;
#if 0
err_thermal_unregister:
ath11k_thermal_unregister(ab);
err_mac_unregister:
@ -21344,12 +21482,17 @@ qwx_run(struct qwx_softc *sc)
return ret;
}
/* Enable "ext" IRQs for datapath. */
sc->ops.irq_enable(sc);
return 0;
}
int
qwx_run_stop(struct qwx_softc *sc)
{
sc->ops.irq_disable(sc);
printf("%s: not implemented\n", __func__);
return ENOTSUP;
}

View File

@ -1,4 +1,4 @@
/* $OpenBSD: qwxreg.h,v 1.3 2024/01/28 22:30:39 stsp Exp $ */
/* $OpenBSD: qwxreg.h,v 1.4 2024/01/30 15:32:04 stsp Exp $ */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc.
@ -9593,6 +9593,14 @@ enum hal_reo_exec_status {
#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
#define HAL_HASH_ROUTING_RING_TCL 0
#define HAL_HASH_ROUTING_RING_SW1 1
#define HAL_HASH_ROUTING_RING_SW2 2
#define HAL_HASH_ROUTING_RING_SW3 3
#define HAL_HASH_ROUTING_RING_SW4 4
#define HAL_HASH_ROUTING_RING_REL 5
#define HAL_HASH_ROUTING_RING_FW 6
struct hal_reo_status_hdr {
uint32_t info0;
uint32_t timestamp;

View File

@ -1,4 +1,4 @@
/* $OpenBSD: qwxvar.h,v 1.7 2024/01/29 16:06:45 stsp Exp $ */
/* $OpenBSD: qwxvar.h,v 1.8 2024/01/30 15:32:04 stsp Exp $ */
/*
* Copyright (c) 2018-2019 The Linux Foundation.
@ -247,7 +247,9 @@ struct ath11k_hw_ops {
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, uint16_t len);
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
uint8_t *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
void (*reo_setup)(struct ath11k_base *ab);
#endif
void (*reo_setup)(struct qwx_softc *);
#ifdef notyet
uint16_t (*mpdu_info_get_peerid)(uint8_t *tlv_data);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
uint8_t* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: bgpctl.c,v 1.302 2024/01/25 09:54:21 claudio Exp $ */
/* $OpenBSD: bgpctl.c,v 1.303 2024/01/30 13:51:13 claudio Exp $ */
/*
* Copyright (c) 2003 Henning Brauer <henning@openbsd.org>
@ -471,9 +471,7 @@ show(struct imsg *imsg, struct parse_result *res)
struct ctl_show_rib rib;
struct rde_memstats stats;
struct ibuf ibuf;
u_char *asdata;
u_int rescode, ilen;
size_t aslen;
switch (imsg->hdr.type) {
case IMSG_CTL_SHOW_NEIGHBOR:
@ -528,16 +526,13 @@ show(struct imsg *imsg, struct parse_result *res)
output->fib_table(&kt);
break;
case IMSG_CTL_SHOW_RIB:
if (imsg->hdr.len < IMSG_HEADER_SIZE + sizeof(rib))
errx(1, "wrong imsg len");
if (output->rib == NULL)
break;
/* XXX */
memcpy(&rib, imsg->data, sizeof(rib));
aslen = imsg->hdr.len - IMSG_HEADER_SIZE - sizeof(rib);
asdata = imsg->data;
asdata += sizeof(rib);
output->rib(&rib, asdata, aslen, res);
if (imsg_get_ibuf(imsg, &ibuf) == -1)
err(1, "imsg_get_ibuf");
if (ibuf_get(&ibuf, &rib, sizeof(rib)) == -1)
err(1, "imsg_get_ibuf");
output->rib(&rib, &ibuf, res);
break;
case IMSG_CTL_SHOW_RIB_COMMUNITIES:
if (output->communities == NULL)
@ -1231,6 +1226,7 @@ show_mrt_dump(struct mrt_rib *mr, struct mrt_peer *mp, void *arg)
struct parse_result res;
struct ctl_show_rib_request *req = arg;
struct mrt_rib_entry *mre;
struct ibuf ibuf;
time_t now;
uint16_t i, j;
@ -1296,7 +1292,8 @@ show_mrt_dump(struct mrt_rib *mr, struct mrt_peer *mp, void *arg)
!match_aspath(mre->aspath, mre->aspath_len, &req->as))
continue;
output->rib(&ctl, mre->aspath, mre->aspath_len, &res);
ibuf_from_buffer(&ibuf, mre->aspath, mre->aspath_len);
output->rib(&ctl, &ibuf, &res);
if (req->flags & F_CTL_DETAIL) {
for (j = 0; j < mre->nattrs; j++)
output->attr(mre->attrs[j].attr,

View File

@ -1,4 +1,4 @@
/* $OpenBSD: bgpctl.h,v 1.22 2024/01/25 09:54:21 claudio Exp $ */
/* $OpenBSD: bgpctl.h,v 1.23 2024/01/30 13:51:13 claudio Exp $ */
/*
* Copyright (c) 2019 Claudio Jeker <claudio@openbsd.org>
@ -29,7 +29,7 @@ struct output {
void (*interface)(struct ctl_show_interface *);
void (*attr)(u_char *, size_t, int, int);
void (*communities)(struct ibuf *, struct parse_result *);
void (*rib)(struct ctl_show_rib *, u_char *, size_t,
void (*rib)(struct ctl_show_rib *, struct ibuf *,
struct parse_result *);
void (*rib_mem)(struct rde_memstats *);
void (*set)(struct ctl_show_set *);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: output.c,v 1.48 2024/01/25 09:54:21 claudio Exp $ */
/* $OpenBSD: output.c,v 1.49 2024/01/30 13:51:13 claudio Exp $ */
/*
* Copyright (c) 2003 Henning Brauer <henning@openbsd.org>
@ -769,10 +769,9 @@ show_ext_community(u_char *data, uint16_t len)
static void
show_attr(u_char *data, size_t len, int reqflags, int addpath)
{
u_char *path;
struct in_addr id;
struct bgpd_addr prefix;
struct ibuf ibuf, *buf = &ibuf;
struct ibuf ibuf, *buf = &ibuf, asbuf, *path = NULL;
char *aspath;
uint32_t as, pathid;
uint16_t alen, ioff, short_as, afi;
@ -820,25 +819,27 @@ show_attr(u_char *data, size_t len, int reqflags, int addpath)
break;
case ATTR_ASPATH:
case ATTR_AS4_PATH:
ibuf_from_buffer(buf, data, alen);
/* prefer 4-byte AS here */
e4 = aspath_verify(data, alen, 1, 0);
e2 = aspath_verify(data, alen, 0, 0);
e4 = aspath_verify(buf, 1, 0);
e2 = aspath_verify(buf, 0, 0);
if (e4 == 0 || e4 == AS_ERR_SOFT) {
path = data;
ibuf_from_ibuf(&asbuf, buf);
} else if (e2 == 0 || e2 == AS_ERR_SOFT) {
path = aspath_inflate(data, alen, &alen);
if (path == NULL)
errx(1, "aspath_inflate failed");
if ((path = aspath_inflate(buf)) == NULL) {
printf("aspath_inflate failed");
break;
}
ibuf_from_ibuf(&asbuf, path);
} else {
printf("bad AS-Path");
break;
}
if (aspath_asprint(&aspath, path, alen) == -1)
if (aspath_asprint(&aspath, &asbuf) == -1)
err(1, NULL);
printf("%s", aspath);
free(aspath);
if (path != data)
free(path);
ibuf_free(path);
break;
case ATTR_NEXTHOP:
if (alen == 4) {
@ -1013,7 +1014,7 @@ show_attr(u_char *data, size_t len, int reqflags, int addpath)
}
static void
show_rib_brief(struct ctl_show_rib *r, u_char *asdata, size_t aslen)
show_rib_brief(struct ctl_show_rib *r, struct ibuf *asbuf)
{
char *p, *aspath;
@ -1025,7 +1026,7 @@ show_rib_brief(struct ctl_show_rib *r, u_char *asdata, size_t aslen)
log_addr(&r->exit_nexthop), r->local_pref, r->med);
free(p);
if (aspath_asprint(&aspath, asdata, aslen) == -1)
if (aspath_asprint(&aspath, asbuf) == -1)
err(1, NULL);
if (strlen(aspath) > 0)
printf("%s ", aspath);
@ -1035,8 +1036,7 @@ show_rib_brief(struct ctl_show_rib *r, u_char *asdata, size_t aslen)
}
static void
show_rib_detail(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
int flag0)
show_rib_detail(struct ctl_show_rib *r, struct ibuf *asbuf, int flag0)
{
struct in_addr id;
char *aspath, *s;
@ -1045,7 +1045,7 @@ show_rib_detail(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
log_addr(&r->prefix), r->prefixlen,
EOL0(flag0));
if (aspath_asprint(&aspath, asdata, aslen) == -1)
if (aspath_asprint(&aspath, asbuf) == -1)
err(1, NULL);
if (strlen(aspath) > 0)
printf(" %s%c", aspath, EOL0(flag0));
@ -1072,13 +1072,12 @@ show_rib_detail(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
}
static void
show_rib(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
struct parse_result *res)
show_rib(struct ctl_show_rib *r, struct ibuf *aspath, struct parse_result *res)
{
if (res->flags & F_CTL_DETAIL)
show_rib_detail(r, asdata, aslen, res->flags);
show_rib_detail(r, aspath, res->flags);
else
show_rib_brief(r, asdata, aslen);
show_rib_brief(r, aspath);
}
static void

View File

@ -1,4 +1,4 @@
/* $OpenBSD: output_json.c,v 1.40 2024/01/25 09:54:21 claudio Exp $ */
/* $OpenBSD: output_json.c,v 1.41 2024/01/30 13:51:13 claudio Exp $ */
/*
* Copyright (c) 2020 Claudio Jeker <claudio@openbsd.org>
@ -589,9 +589,8 @@ json_attr(u_char *data, size_t len, int reqflags, int addpath)
{
struct bgpd_addr prefix;
struct in_addr id;
struct ibuf ibuf, *buf = &ibuf;
struct ibuf ibuf, *buf = &ibuf, asbuf, *path = NULL;
char *aspath;
u_char *path;
uint32_t as, pathid;
uint16_t alen, afi, off, short_as;
uint8_t flags, type, safi, aid, prefixlen;
@ -645,25 +644,28 @@ json_attr(u_char *data, size_t len, int reqflags, int addpath)
break;
case ATTR_ASPATH:
case ATTR_AS4_PATH:
ibuf_from_buffer(buf, data, alen);
/* prefer 4-byte AS here */
e4 = aspath_verify(data, alen, 1, 0);
e2 = aspath_verify(data, alen, 0, 0);
e4 = aspath_verify(buf, 1, 0);
e2 = aspath_verify(buf, 0, 0);
if (e4 == 0 || e4 == AS_ERR_SOFT) {
path = data;
ibuf_from_ibuf(&asbuf, buf);
} else if (e2 == 0 || e2 == AS_ERR_SOFT) {
path = aspath_inflate(data, alen, &alen);
if (path == NULL)
errx(1, "aspath_inflate failed");
if ((path = aspath_inflate(buf)) == NULL) {
json_do_string("error",
"aspath_inflate failed");
break;
}
ibuf_from_ibuf(&asbuf, path);
} else {
json_do_string("error", "bad AS-Path");
break;
}
if (aspath_asprint(&aspath, path, alen) == -1)
if (aspath_asprint(&aspath, &asbuf) == -1)
err(1, NULL);
json_do_string("aspath", aspath);
free(aspath);
if (path != data)
free(path);
ibuf_free(path);
break;
case ATTR_NEXTHOP:
if (alen == 4) {
@ -841,8 +843,7 @@ bad_len:
}
static void
json_rib(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
struct parse_result *res)
json_rib(struct ctl_show_rib *r, struct ibuf *asbuf, struct parse_result *res)
{
struct in_addr id;
char *aspath;
@ -853,7 +854,7 @@ json_rib(struct ctl_show_rib *r, u_char *asdata, size_t aslen,
json_do_printf("prefix", "%s/%u", log_addr(&r->prefix), r->prefixlen);
if (aspath_asprint(&aspath, asdata, aslen) == -1)
if (aspath_asprint(&aspath, asbuf) == -1)
err(1, NULL);
json_do_string("aspath", aspath);
free(aspath);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: bgpd.h,v 1.483 2024/01/23 16:13:35 claudio Exp $ */
/* $OpenBSD: bgpd.h,v 1.484 2024/01/30 13:50:08 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
@ -1542,20 +1542,19 @@ const char *log_as(uint32_t);
const char *log_rd(uint64_t);
const char *log_ext_subtype(int, uint8_t);
const char *log_reason(const char *);
const char *log_aspath_error(int);
const char *log_roa(struct roa *);
const char *log_aspa(struct aspa_set *);
const char *log_rtr_error(enum rtr_error);
const char *log_policy(enum role);
int aspath_snprint(char *, size_t, void *, uint16_t);
int aspath_asprint(char **, void *, uint16_t);
size_t aspath_strlen(void *, uint16_t);
int aspath_asprint(char **, struct ibuf *);
uint32_t aspath_extract(const void *, int);
int aspath_verify(void *, uint16_t, int, int);
int aspath_verify(struct ibuf *, int, int);
#define AS_ERR_LEN -1
#define AS_ERR_TYPE -2
#define AS_ERR_BAD -3
#define AS_ERR_SOFT -4
u_char *aspath_inflate(void *, uint16_t, uint16_t *);
struct ibuf *aspath_inflate(struct ibuf *);
int extract_prefix(const u_char *, int, void *, uint8_t, uint8_t);
int nlri_get_prefix(struct ibuf *, struct bgpd_addr *, uint8_t *);
int nlri_get_prefix6(struct ibuf *, struct bgpd_addr *, uint8_t *);

View File

@ -1,4 +1,4 @@
/* $OpenBSD: rde.c,v 1.619 2024/01/25 11:13:35 claudio Exp $ */
/* $OpenBSD: rde.c,v 1.620 2024/01/30 13:50:09 claudio Exp $ */
/*
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
@ -1916,13 +1916,6 @@ rde_update_withdraw(struct rde_peer *peer, uint32_t path_id,
*/
/* attribute parser specific macros */
#define UPD_READ(t, p, plen, n) \
do { \
memcpy(t, p, n); \
p += n; \
plen += n; \
} while (0)
#define CHECK_FLAGS(s, t, m) \
(((s) & ~(ATTR_DEFMASK | (m))) == (t))
@ -1932,12 +1925,10 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
{
struct bgpd_addr nexthop;
struct rde_aspath *a = &state->aspath;
struct ibuf attrbuf, tmpbuf;
u_char *p, *npath;
struct ibuf attrbuf, tmpbuf, *npath = NULL;
size_t alen, hlen;
uint32_t tmp32, zero = 0;
int error;
uint16_t nlen;
size_t attr_len, hlen, plen;
uint8_t flags, type;
ibuf_from_ibuf(&attrbuf, buf);
@ -1945,30 +1936,26 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
ibuf_get_n8(&attrbuf, &type) == -1)
goto bad_list;
if (flags & ATTR_EXTLEN) {
uint16_t alen;
if (ibuf_get_n16(&attrbuf, &alen) == -1)
uint16_t attr_len;
if (ibuf_get_n16(&attrbuf, &attr_len) == -1)
goto bad_list;
attr_len = alen;
alen = attr_len;
hlen = 4;
} else {
uint8_t alen;
if (ibuf_get_n8(&attrbuf, &alen) == -1)
uint8_t attr_len;
if (ibuf_get_n8(&attrbuf, &attr_len) == -1)
goto bad_list;
attr_len = alen;
alen = attr_len;
hlen = 3;
}
if (ibuf_truncate(&attrbuf, attr_len) == -1)
if (ibuf_truncate(&attrbuf, alen) == -1)
goto bad_list;
/* consume the attribute in buf before moving forward */
if (ibuf_skip(buf, hlen + attr_len) == -1)
if (ibuf_skip(buf, hlen + alen) == -1)
goto bad_list;
p = ibuf_data(&attrbuf);
plen = ibuf_size(&attrbuf);
switch (type) {
case ATTR_UNDEF:
/* ignore and drop path attributes with a type code of 0 */
@ -1998,44 +1985,43 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
case ATTR_ASPATH:
if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
goto bad_flags;
error = aspath_verify(p, attr_len, peer_has_as4byte(peer),
if (a->flags & F_ATTR_ASPATH)
goto bad_list;
error = aspath_verify(&attrbuf, peer_has_as4byte(peer),
peer_accept_no_as_set(peer));
if (error != 0 && error != AS_ERR_SOFT) {
log_peer_warnx(&peer->conf, "bad ASPATH, %s",
log_aspath_error(error));
rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
NULL);
return (-1);
}
if (peer_has_as4byte(peer)) {
ibuf_from_ibuf(&tmpbuf, &attrbuf);
} else {
if ((npath = aspath_inflate(&attrbuf)) == NULL)
fatal("aspath_inflate");
ibuf_from_ibuf(&tmpbuf, npath);
}
if (error == AS_ERR_SOFT) {
char *str;
/*
* soft errors like unexpected segment types are
* not considered fatal and the path is just
* marked invalid.
*/
a->flags |= F_ATTR_PARSE_ERR;
} else if (error != 0) {
rde_update_err(peer, ERR_UPDATE, ERR_UPD_ASPATH,
NULL);
return (-1);
}
if (a->flags & F_ATTR_ASPATH)
goto bad_list;
if (peer_has_as4byte(peer)) {
npath = p;
nlen = attr_len;
} else {
npath = aspath_inflate(p, attr_len, &nlen);
if (npath == NULL)
fatal("aspath_inflate");
}
if (error == AS_ERR_SOFT) {
char *str;
aspath_asprint(&str, npath, nlen);
aspath_asprint(&str, &tmpbuf);
log_peer_warnx(&peer->conf, "bad ASPATH %s, "
"path invalidated and prefix withdrawn",
str ? str : "(bad aspath)");
free(str);
}
a->flags |= F_ATTR_ASPATH;
a->aspath = aspath_get(npath, nlen);
if (npath != p)
free(npath);
plen += attr_len;
a->aspath = aspath_get(ibuf_data(&tmpbuf), ibuf_size(&tmpbuf));
ibuf_free(npath);
break;
case ATTR_NEXTHOP:
if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0))
@ -2050,7 +2036,6 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
nexthop.aid = AID_INET;
if (ibuf_get_h32(&attrbuf, &nexthop.v4.s_addr) == -1)
goto bad_len;
/*
* Check if the nexthop is a valid IP address. We consider
* multicast addresses as invalid.
@ -2245,12 +2230,11 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE,
ATTR_PARTIAL))
goto bad_flags;
if ((error = aspath_verify(p, attr_len, 1,
if ((error = aspath_verify(&attrbuf, 1,
peer_accept_no_as_set(peer))) != 0) {
/* As per RFC6793 use "attribute discard" here. */
log_peer_warnx(&peer->conf, "bad AS4_PATH, "
"attribute discarded");
plen += attr_len;
break;
}
a->flags |= F_ATTR_AS4BYTE_NEW;
@ -2295,7 +2279,6 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
break;
}
(void)plen; /* XXX make compiler happy for now */
return (0);
bad_len:
@ -2309,7 +2292,6 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer,
return (-1);
}
#undef UPD_READ
#undef CHECK_FLAGS
int

View File

@ -1,4 +1,4 @@
/* $OpenBSD: util.c,v 1.79 2024/01/23 16:13:35 claudio Exp $ */
/* $OpenBSD: util.c,v 1.80 2024/01/30 13:50:09 claudio Exp $ */
/*
* Copyright (c) 2006 Claudio Jeker <claudio@openbsd.org>
@ -32,8 +32,6 @@
#include "rde.h"
#include "log.h"
const char *aspath_delim(uint8_t, int);
const char *
log_addr(const struct bgpd_addr *addr)
{
@ -235,6 +233,26 @@ log_aspa(struct aspa_set *aspa)
return errbuf;
}
const char *
log_aspath_error(int error)
{
static char buf[20];
switch (error) {
case AS_ERR_LEN:
return "inconsitent lenght";
case AS_ERR_TYPE:
return "unknown segment type";
case AS_ERR_BAD:
return "invalid encoding";
case AS_ERR_SOFT:
return "soft failure";
default:
snprintf(buf, sizeof(buf), "unknown %d", error);
return buf;
}
}
const char *
log_rtr_error(enum rtr_error err)
{
@ -286,7 +304,7 @@ log_policy(enum role role)
}
}
const char *
static const char *
aspath_delim(uint8_t seg_type, int closing)
{
static char db[8];
@ -318,42 +336,38 @@ aspath_delim(uint8_t seg_type, int closing)
}
}
int
aspath_snprint(char *buf, size_t size, void *data, uint16_t len)
static int
aspath_snprint(char *buf, size_t size, struct ibuf *in)
{
#define UPDATE() \
do { \
if (r < 0) \
return (-1); \
total_size += r; \
if ((unsigned int)r < size) { \
size -= r; \
buf += r; \
} else { \
buf += size; \
size = 0; \
} \
#define UPDATE() \
do { \
if (r < 0 || (unsigned int)r >= size) \
return (-1); \
size -= r; \
buf += r; \
} while (0)
uint8_t *seg;
int r, total_size;
uint16_t seg_size;
uint8_t i, seg_type, seg_len;
total_size = 0;
seg = data;
for (; len > 0; len -= seg_size, seg += seg_size) {
seg_type = seg[0];
seg_len = seg[1];
seg_size = 2 + sizeof(uint32_t) * seg_len;
struct ibuf data;
uint32_t as;
int r, n = 0;
uint8_t i, seg_type, seg_len;
r = snprintf(buf, size, "%s%s",
total_size != 0 ? " " : "",
ibuf_from_ibuf(&data, in);
while (ibuf_size(&data) > 0) {
if (ibuf_get_n8(&data, &seg_type) == -1 ||
ibuf_get_n8(&data, &seg_len) == -1 ||
seg_len == 0)
return (-1);
r = snprintf(buf, size, "%s%s", n++ != 0 ? " " : "",
aspath_delim(seg_type, 0));
UPDATE();
for (i = 0; i < seg_len; i++) {
r = snprintf(buf, size, "%s",
log_as(aspath_extract(seg, i)));
if (ibuf_get_n32(&data, &as) == -1)
return -1;
r = snprintf(buf, size, "%s", log_as(as));
UPDATE();
if (i + 1 < seg_len) {
r = snprintf(buf, size, " ");
@ -364,73 +378,68 @@ aspath_snprint(char *buf, size_t size, void *data, uint16_t len)
UPDATE();
}
/* ensure that we have a valid C-string especially for empty as path */
if (size > 0)
*buf = '\0';
return (total_size);
*buf = '\0';
return (0);
#undef UPDATE
}
int
aspath_asprint(char **ret, void *data, uint16_t len)
static ssize_t
aspath_strsize(struct ibuf *in)
{
size_t slen;
int plen;
slen = aspath_strlen(data, len) + 1;
*ret = malloc(slen);
if (*ret == NULL)
return (-1);
plen = aspath_snprint(*ret, slen, data, len);
if (plen == -1) {
free(*ret);
*ret = NULL;
return (-1);
}
return (0);
}
size_t
aspath_strlen(void *data, uint16_t len)
{
uint8_t *seg;
int total_size;
struct ibuf buf;
ssize_t total_size = 0;
uint32_t as;
uint16_t seg_size;
uint8_t i, seg_type, seg_len;
total_size = 0;
seg = data;
for (; len > 0; len -= seg_size, seg += seg_size) {
seg_type = seg[0];
seg_len = seg[1];
seg_size = 2 + sizeof(uint32_t) * seg_len;
ibuf_from_ibuf(&buf, in);
while (ibuf_size(&buf) > 0) {
if (ibuf_get_n8(&buf, &seg_type) == -1 ||
ibuf_get_n8(&buf, &seg_len) == -1 ||
seg_len == 0)
return (-1);
if (seg_type == AS_SET)
if (total_size != 0)
total_size += 3;
else
total_size += 2;
else if (total_size != 0)
if (total_size != 0)
total_size += 1;
total_size += strlen(aspath_delim(seg_type, 0));
for (i = 0; i < seg_len; i++) {
as = aspath_extract(seg, i);
if (ibuf_get_n32(&buf, &as) == -1)
return (-1);
do {
total_size++;
} while ((as = as / 10) != 0);
if (i + 1 < seg_len)
total_size += 1;
}
total_size += seg_len - 1;
if (seg_type == AS_SET)
total_size += 2;
total_size += strlen(aspath_delim(seg_type, 1));
}
return (total_size);
return (total_size + 1);
}
int
aspath_asprint(char **ret, struct ibuf *data)
{
ssize_t slen;
if ((slen = aspath_strsize(data)) == -1) {
*ret = NULL;
errno = EINVAL;
return (-1);
}
*ret = malloc(slen);
if (*ret == NULL)
return (-1);
if (aspath_snprint(*ret, slen, data) == -1) {
free(*ret);
*ret = NULL;
errno = EINVAL;
return (-1);
}
return (0);
}
/*
@ -456,32 +465,31 @@ aspath_extract(const void *seg, int pos)
* Verify that the aspath is correctly encoded.
*/
int
aspath_verify(void *data, uint16_t len, int as4byte, int noset)
aspath_verify(struct ibuf *in, int as4byte, int noset)
{
uint8_t *seg = data;
uint16_t seg_size, as_size = 2;
struct ibuf buf;
int pos, error = 0;
uint8_t seg_len, seg_type;
int error = 0;
if (len & 1)
ibuf_from_ibuf(&buf, in);
if (ibuf_size(&buf) & 1) {
/* odd length aspath are invalid */
return (AS_ERR_BAD);
error = AS_ERR_BAD;
goto done;
}
if (as4byte)
as_size = 4;
while (ibuf_size(&buf) > 0) {
if (ibuf_get_n8(&buf, &seg_type) == -1 ||
ibuf_get_n8(&buf, &seg_len) == -1) {
error = AS_ERR_LEN;
goto done;
}
for (; len > 0; len -= seg_size, seg += seg_size) {
const uint8_t *ptr;
int pos;
if (len < 2) /* header length check */
return (AS_ERR_BAD);
seg_type = seg[0];
seg_len = seg[1];
if (seg_len == 0)
if (seg_len == 0) {
/* empty aspath segments are not allowed */
return (AS_ERR_BAD);
error = AS_ERR_BAD;
goto done;
}
/*
* BGP confederations should not show up but consider them
@ -497,70 +505,75 @@ aspath_verify(void *data, uint16_t len, int as4byte, int noset)
if (noset && seg_type == AS_SET)
error = AS_ERR_SOFT;
if (seg_type != AS_SET && seg_type != AS_SEQUENCE &&
seg_type != AS_CONFED_SEQUENCE && seg_type != AS_CONFED_SET)
return (AS_ERR_TYPE);
seg_size = 2 + as_size * seg_len;
if (seg_size > len)
return (AS_ERR_LEN);
seg_type != AS_CONFED_SEQUENCE &&
seg_type != AS_CONFED_SET) {
error = AS_ERR_TYPE;
goto done;
}
/* RFC 7607 - AS 0 is considered malformed */
ptr = seg + 2;
for (pos = 0; pos < seg_len; pos++) {
uint32_t as;
memcpy(&as, ptr, as_size);
if (as4byte) {
if (ibuf_get_n32(&buf, &as) == -1) {
error = AS_ERR_LEN;
goto done;
}
} else {
uint16_t tmp;
if (ibuf_get_n16(&buf, &tmp) == -1) {
error = AS_ERR_LEN;
goto done;
}
as = tmp;
}
if (as == 0)
error = AS_ERR_SOFT;
ptr += as_size;
}
}
done:
return (error); /* aspath is valid but probably not loop free */
}
/*
* convert a 2 byte aspath to a 4 byte one.
*/
u_char *
aspath_inflate(void *data, uint16_t len, uint16_t *newlen)
struct ibuf *
aspath_inflate(struct ibuf *in)
{
uint8_t *seg, *nseg, *ndata;
uint16_t seg_size, olen, nlen;
uint8_t seg_len;
struct ibuf *out;
uint16_t short_as;
uint8_t seg_type, seg_len;
/* first calculate the length of the aspath */
seg = data;
nlen = 0;
for (olen = len; olen > 0; olen -= seg_size, seg += seg_size) {
seg_len = seg[1];
seg_size = 2 + sizeof(uint16_t) * seg_len;
nlen += 2 + sizeof(uint32_t) * seg_len;
if (seg_size > olen) {
errno = ERANGE;
return (NULL);
}
}
*newlen = nlen;
if ((ndata = malloc(nlen)) == NULL)
/* allocate enough space for the worst case */
if ((out = ibuf_open(ibuf_size(in) * 2)) == NULL)
return (NULL);
/* then copy the aspath */
seg = data;
for (nseg = ndata; nseg < ndata + nlen; ) {
*nseg++ = *seg++;
*nseg++ = seg_len = *seg++;
while (ibuf_size(in) > 0) {
if (ibuf_get_n8(in, &seg_type) == -1 ||
ibuf_get_n8(in, &seg_len) == -1 ||
seg_len == 0)
goto fail;
if (ibuf_add_n8(out, seg_type) == -1 ||
ibuf_add_n8(out, seg_len) == -1)
goto fail;
for (; seg_len > 0; seg_len--) {
*nseg++ = 0;
*nseg++ = 0;
*nseg++ = *seg++;
*nseg++ = *seg++;
if (ibuf_get_n16(in, &short_as) == -1)
goto fail;
if (ibuf_add_n32(out, short_as) == -1)
goto fail;
}
}
return (ndata);
return (out);
fail:
ibuf_free(out);
return (NULL);
}
static const u_char addrmask[] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0,

View File

@ -1,4 +1,4 @@
/* $OpenBSD: http.c,v 1.78 2023/06/28 17:36:09 op Exp $ */
/* $OpenBSD: http.c,v 1.80 2024/01/30 11:15:05 claudio Exp $ */
/*
* Copyright (c) 2020 Nils Fisher <nils_fisher@hotmail.com>
* Copyright (c) 2020 Claudio Jeker <claudio@openbsd.org>
@ -1836,6 +1836,8 @@ http_close(struct http_connection *conn)
assert(conn->state == STATE_IDLE || conn->state == STATE_CLOSE);
conn->state = STATE_CLOSE;
LIST_REMOVE(conn, entry);
LIST_INSERT_HEAD(&active, conn, entry);
if (conn->tls != NULL) {
switch (tls_close(conn->tls)) {
@ -1985,6 +1987,8 @@ http_handle(struct http_connection *conn)
return http_close(conn);
case STATE_IDLE:
conn->state = STATE_RESPONSE_HEADER;
LIST_REMOVE(conn, entry);
LIST_INSERT_HEAD(&active, conn, entry);
return http_read(conn);
case STATE_FREE:
errx(1, "bad http state");
@ -2156,8 +2160,10 @@ proc_http(char *bind_addr, int fd)
LIST_FOREACH_SAFE(conn, &idle, entry, nc) {
if (conn->pfd != NULL && conn->pfd->revents != 0)
http_do(conn, http_handle);
else if (conn->idle_time <= now)
else if (conn->idle_time <= now) {
conn->io_time = 0;
http_do(conn, http_close);
}
if (conn->state == STATE_FREE)
http_free(conn);
@ -2168,7 +2174,7 @@ proc_http(char *bind_addr, int fd)
/* check if event is ready */
if (conn->pfd != NULL && conn->pfd->revents != 0)
http_do(conn, http_handle);
else if (conn->io_time <= now) {
else if (conn->io_time != 0 && conn->io_time <= now) {
conn->io_time = 0;
if (conn->state == STATE_CONNECT) {
warnx("%s: connect timeout",

View File

@ -1,4 +1,4 @@
/* $OpenBSD: vionet.c,v 1.7 2024/01/03 03:14:16 dv Exp $ */
/* $OpenBSD: vionet.c,v 1.8 2024/01/30 23:01:49 dv Exp $ */
/*
* Copyright (c) 2023 Dave Voutila <dv@openbsd.org>
@ -16,9 +16,8 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/mman.h>
#include <sys/param.h> /* PAGE_SIZE */
#include <sys/socket.h>
#include <sys/types.h>
#include <dev/pci/virtio_pcireg.h>
#include <dev/pv/virtioreg.h>
@ -26,7 +25,6 @@
#include <net/if.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <errno.h>
#include <event.h>
@ -36,7 +34,6 @@
#include <unistd.h>
#include "atomicio.h"
#include "pci.h"
#include "virtio.h"
#include "vmd.h"
@ -47,20 +44,36 @@
extern char *__progname;
extern struct vmd_vm *current_vm;
/* Device Globals */
struct event ev_tap;
struct packet {
uint8_t *buf;
size_t len;
};
static int vionet_rx(struct vionet_dev *);
static int vionet_rx(struct vionet_dev *, int);
static ssize_t vionet_rx_copy(struct vionet_dev *, int, const struct iovec *,
int, size_t);
static ssize_t vionet_rx_zerocopy(struct vionet_dev *, int,
const struct iovec *, int);
static void vionet_rx_event(int, short, void *);
static uint32_t handle_io_read(struct viodev_msg *, struct virtio_dev *,
int8_t *);
static int handle_io_write(struct viodev_msg *, struct virtio_dev *);
void vionet_notify_rx(struct virtio_dev *);
int vionet_notifyq(struct virtio_dev *);
static int vionet_notify_tx(struct virtio_dev *);
static int vionet_notifyq(struct virtio_dev *);
static void dev_dispatch_vm(int, short, void *);
static void handle_sync_io(int, short, void *);
/* Device Globals */
struct event ev_tap;
struct event ev_inject;
int pipe_inject[2];
#define READ 0
#define WRITE 1
struct iovec iov_rx[VIONET_QUEUE_SIZE];
struct iovec iov_tx[VIONET_QUEUE_SIZE];
__dead void
vionet_main(int fd, int fd_vmm)
{
@ -79,6 +92,10 @@ vionet_main(int fd, int fd_vmm)
if (pledge("stdio vmm proc", NULL) == -1)
fatal("pledge");
/* Initialize iovec arrays. */
memset(iov_rx, 0, sizeof(iov_rx));
memset(iov_tx, 0, sizeof(iov_tx));
/* Receive our vionet_dev, mostly preconfigured. */
sz = atomicio(read, fd, &dev, sizeof(dev));
if (sz != sizeof(dev)) {
@ -153,6 +170,12 @@ vionet_main(int fd, int fd_vmm)
}
}
/* Initialize our packet injection pipe. */
if (pipe2(pipe_inject, O_NONBLOCK) == -1) {
log_warn("%s: injection pipe", __func__);
goto fail;
}
/* Initialize libevent so we can start wiring event handlers. */
event_init();
@ -171,6 +194,12 @@ vionet_main(int fd, int fd_vmm)
event_set(&ev_tap, vionet->data_fd, EV_READ | EV_PERSIST,
vionet_rx_event, &dev);
/* Add an event for injected packets. */
log_debug("%s: wiring in packet injection handler (fd=%d)", __func__,
pipe_inject[READ]);
event_set(&ev_inject, pipe_inject[READ], EV_READ | EV_PERSIST,
vionet_rx_event, &dev);
/* Configure our sync channel event handler. */
log_debug("%s: wiring in sync channel handler (fd=%d)", __func__,
dev.sync_fd);
@ -209,6 +238,8 @@ vionet_main(int fd, int fd_vmm)
close_fd(dev.sync_fd);
close_fd(dev.async_fd);
close_fd(vionet->data_fd);
close_fd(pipe_inject[READ]);
close_fd(pipe_inject[WRITE]);
_exit(ret);
/* NOTREACHED */
}
@ -223,6 +254,8 @@ fail:
close_fd(dev.sync_fd);
close_fd(dev.async_fd);
close_fd(pipe_inject[READ]);
close_fd(pipe_inject[WRITE]);
if (vionet != NULL)
close_fd(vionet->data_fd);
@ -232,7 +265,7 @@ fail:
/*
* Update the gpa and hva of the virtqueue.
*/
void
static void
vionet_update_qa(struct vionet_dev *dev)
{
struct virtio_vq_info *vq_info;
@ -259,7 +292,7 @@ vionet_update_qa(struct vionet_dev *dev)
/*
* Update the queue size.
*/
void
static void
vionet_update_qs(struct vionet_dev *dev)
{
struct virtio_vq_info *vq_info;
@ -280,33 +313,27 @@ vionet_update_qs(struct vionet_dev *dev)
}
/*
* vionet_enq_rx
* vionet_rx
*
* Take a given packet from the host-side tap and copy it into the guest's
* buffers utilizing the rx virtio ring. If the packet length is invalid
* (too small or too large) or if there are not enough buffers available,
* the packet is dropped.
* Pull packet from the provided fd and fill the receive-side virtqueue. We
* selectively use zero-copy approaches when possible.
*
* Returns 1 if guest notification is needed. Otherwise, returns -1 on failure
* or 0 if no notification is needed.
*/
int
vionet_enq_rx(struct vionet_dev *dev, char *pkt, size_t sz, int *spc)
static int
vionet_rx(struct vionet_dev *dev, int fd)
{
uint16_t dxx, idx, hdr_desc_idx, chain_hdr_idx;
uint16_t idx, hdr_idx;
char *vr = NULL;
size_t bufsz = 0, off = 0, pkt_offset = 0, chunk_size = 0;
size_t chain_len = 0;
struct vring_desc *desc, *pkt_desc, *hdr_desc;
size_t chain_len = 0, iov_cnt;
struct vring_desc *desc, *table;
struct vring_avail *avail;
struct vring_used *used;
struct virtio_vq_info *vq_info;
struct virtio_net_hdr hdr;
size_t hdr_sz;
if (sz < VIONET_MIN_TXLEN || sz > VIONET_MAX_TXLEN) {
log_warnx("%s: invalid packet size", __func__);
return (0);
}
hdr_sz = sizeof(hdr);
struct iovec *iov;
int notify = 0;
ssize_t sz;
if (!(dev->cfg.device_status
& VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK)) {
@ -315,178 +342,287 @@ vionet_enq_rx(struct vionet_dev *dev, char *pkt, size_t sz, int *spc)
}
vq_info = &dev->vq[RXQ];
idx = vq_info->last_avail;
vr = vq_info->q_hva;
if (vr == NULL)
fatalx("%s: vr == NULL", __func__);
/* Compute offsets in ring of descriptors, avail ring, and used ring */
desc = (struct vring_desc *)(vr);
table = (struct vring_desc *)(vr);
avail = (struct vring_avail *)(vr + vq_info->vq_availoffset);
used = (struct vring_used *)(vr + vq_info->vq_usedoffset);
used->flags |= VRING_USED_F_NO_NOTIFY;
idx = vq_info->last_avail & VIONET_QUEUE_MASK;
if ((vq_info->notified_avail & VIONET_QUEUE_MASK) == idx) {
log_debug("%s: insufficient available buffer capacity, "
"dropping packet.", __func__);
return (0);
}
while (idx != avail->idx) {
hdr_idx = avail->ring[idx & VIONET_QUEUE_MASK];
desc = &table[hdr_idx & VIONET_QUEUE_MASK];
if (!DESC_WRITABLE(desc)) {
log_warnx("%s: invalid descriptor state", __func__);
goto reset;
}
hdr_desc_idx = avail->ring[idx] & VIONET_QUEUE_MASK;
hdr_desc = &desc[hdr_desc_idx];
iov = &iov_rx[0];
iov_cnt = 1;
dxx = hdr_desc_idx;
chain_hdr_idx = dxx;
chain_len = 0;
/*
* First descriptor should be at least as large as the
* virtio_net_hdr. It's not technically required, but in
* legacy devices it should be safe to assume.
*/
iov->iov_len = desc->len;
if (iov->iov_len < sizeof(struct virtio_net_hdr)) {
log_warnx("%s: invalid descriptor length", __func__);
goto reset;
}
/* Process the descriptor and walk any potential chain. */
do {
off = 0;
pkt_desc = &desc[dxx];
if (!(pkt_desc->flags & VRING_DESC_F_WRITE)) {
log_warnx("%s: invalid descriptor, not writable",
/*
* Insert the virtio_net_hdr and adjust len/base. We do the
* pointer math here before it's a void*.
*/
iov->iov_base = hvaddr_mem(desc->addr, iov->iov_len);
if (iov->iov_base == NULL)
goto reset;
memset(iov->iov_base, 0, sizeof(struct virtio_net_hdr));
/* Tweak the iovec to account for the virtio_net_hdr. */
iov->iov_len -= sizeof(struct virtio_net_hdr);
iov->iov_base = hvaddr_mem(desc->addr +
sizeof(struct virtio_net_hdr), iov->iov_len);
if (iov->iov_base == NULL)
goto reset;
chain_len = iov->iov_len;
/*
* Walk the remaining chain and collect remaining addresses
* and lengths.
*/
while (desc->flags & VRING_DESC_F_NEXT) {
desc = &table[desc->next & VIONET_QUEUE_MASK];
if (!DESC_WRITABLE(desc)) {
log_warnx("%s: invalid descriptor state",
__func__);
goto reset;
}
/* Collect our IO information. Translate gpa's. */
iov = &iov_rx[iov_cnt];
iov->iov_len = desc->len;
iov->iov_base = hvaddr_mem(desc->addr, iov->iov_len);
if (iov->iov_base == NULL)
goto reset;
chain_len += iov->iov_len;
/* Guard against infinitely looping chains. */
if (++iov_cnt >= nitems(iov_rx)) {
log_warnx("%s: infinite chain detected",
__func__);
goto reset;
}
}
/* Make sure the driver gave us the bare minimum buffers. */
if (chain_len < VIONET_MIN_TXLEN) {
log_warnx("%s: insufficient buffers provided",
__func__);
return (0);
goto reset;
}
/* How much data do we get to write? */
if (sz - bufsz > pkt_desc->len)
chunk_size = pkt_desc->len;
/*
* If we're enforcing hardware address or handling an injected
* packet, we need to use a copy-based approach.
*/
if (dev->lockedmac || fd != dev->data_fd)
sz = vionet_rx_copy(dev, fd, iov_rx, iov_cnt,
chain_len);
else
chunk_size = sz - bufsz;
sz = vionet_rx_zerocopy(dev, fd, iov_rx, iov_cnt);
if (sz == -1)
goto reset;
if (sz == 0) /* No packets, so bail out for now. */
break;
if (chain_len == 0) {
off = hdr_sz;
if (chunk_size == pkt_desc->len)
chunk_size -= off;
}
/*
* Account for the prefixed header since it wasn't included
* in the copy or zerocopy operations.
*/
sz += sizeof(struct virtio_net_hdr);
/* Write a chunk of data if we need to */
if (chunk_size && write_mem(pkt_desc->addr + off,
pkt + pkt_offset, chunk_size)) {
log_warnx("%s: failed to write to buffer 0x%llx",
__func__, pkt_desc->addr);
return (0);
}
chain_len += chunk_size + off;
bufsz += chunk_size;
pkt_offset += chunk_size;
dxx = pkt_desc->next & VIONET_QUEUE_MASK;
} while (bufsz < sz && pkt_desc->flags & VRING_DESC_F_NEXT);
/* Move our marker in the ring...*/
vq_info->last_avail = (vq_info->last_avail + 1) &
VIONET_QUEUE_MASK;
/* Prepend the virtio net header in the first buffer. */
memset(&hdr, 0, sizeof(hdr));
hdr.hdr_len = hdr_sz;
if (write_mem(hdr_desc->addr, &hdr, hdr_sz)) {
log_warnx("vionet: rx enq header write_mem error @ 0x%llx",
hdr_desc->addr);
return (0);
/* Mark our buffers as used. */
used->ring[used->idx & VIONET_QUEUE_MASK].id = hdr_idx;
used->ring[used->idx & VIONET_QUEUE_MASK].len = sz;
__sync_synchronize();
used->idx++;
idx++;
}
/* Update the index field in the used ring. This must be done last. */
dev->cfg.isr_status = 1;
*spc = (vq_info->notified_avail - vq_info->last_avail)
& VIONET_QUEUE_MASK;
/* Update the list of used buffers. */
used->ring[used->idx & VIONET_QUEUE_MASK].id = chain_hdr_idx;
used->ring[used->idx & VIONET_QUEUE_MASK].len = chain_len;
__sync_synchronize();
used->idx++;
if (idx != vq_info->last_avail &&
!(avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
notify = 1;
dev->cfg.isr_status |= 1;
}
vq_info->last_avail = idx;
return (notify);
reset:
log_warnx("%s: requesting device reset", __func__);
dev->cfg.device_status |= DEVICE_NEEDS_RESET;
dev->cfg.isr_status |= VIRTIO_CONFIG_ISR_CONFIG_CHANGE;
return (1);
}
/*
* vionet_rx
* vionet_rx_copy
*
* Enqueue data that was received on a tap file descriptor
* to the vionet device queue.
* Read a packet off the provided file descriptor, validating packet
* characteristics, and copy into the provided buffers in the iovec array.
*
* It's assumed that the provided iovec array contains validated host virtual
* address translations and not guest physical addreses.
*
* Returns number of bytes copied on success, 0 if packet is dropped, and
* -1 on an error.
*/
static int
vionet_rx(struct vionet_dev *dev)
ssize_t
vionet_rx_copy(struct vionet_dev *dev, int fd, const struct iovec *iov,
int iov_cnt, size_t chain_len)
{
char buf[PAGE_SIZE];
int num_enq = 0, spc = 0;
struct ether_header *eh;
ssize_t sz;
static uint8_t buf[VIONET_HARD_MTU];
struct packet *pkt = NULL;
struct ether_header *eh = NULL;
uint8_t *payload = buf;
size_t i, chunk, nbytes, copied = 0;
ssize_t sz;
do {
sz = read(dev->data_fd, buf, sizeof(buf));
if (sz == -1) {
/*
* If we get EAGAIN, No data is currently available.
* Do not treat this as an error.
*/
if (errno != EAGAIN)
log_warn("%s: read error", __func__);
} else if (sz > 0) {
eh = (struct ether_header *)buf;
if (!dev->lockedmac ||
ETHER_IS_MULTICAST(eh->ether_dhost) ||
memcmp(eh->ether_dhost, dev->mac,
sizeof(eh->ether_dhost)) == 0)
num_enq += vionet_enq_rx(dev, buf, sz, &spc);
} else if (sz == 0) {
log_debug("%s: no data", __func__);
break;
/* If reading from the tap(4), try to right-size the read. */
if (fd == dev->data_fd)
nbytes = MIN(chain_len, VIONET_HARD_MTU);
else if (fd == pipe_inject[READ])
nbytes = sizeof(struct packet);
else {
log_warnx("%s: invalid fd: %d", __func__, fd);
return (-1);
}
/*
* Try to pull a packet. The fd should be non-blocking and we don't
* care if we under-read (i.e. sz != nbytes) as we may not have a
* packet large enough to fill the buffer.
*/
sz = read(fd, buf, nbytes);
if (sz == -1) {
if (errno != EAGAIN) {
log_warn("%s: error reading packet", __func__);
return (-1);
}
} while (spc > 0 && sz > 0);
return (0);
} else if (fd == dev->data_fd && sz < VIONET_MIN_TXLEN) {
/* If reading the tap(4), we should get valid ethernet. */
log_warnx("%s: invalid packet size", __func__);
return (0);
} else if (sz != sizeof(struct packet)) {
log_warnx("%s: invalid injected packet object", __func__);
return (0);
}
return (num_enq);
/* Decompose an injected packet, if that's what we're working with. */
if (fd == pipe_inject[READ]) {
pkt = (struct packet *)buf;
if (pkt->buf == NULL) {
log_warnx("%s: invalid injected packet, no buffer",
__func__);
return (0);
}
if (sz < VIONET_MIN_TXLEN || sz > VIONET_MAX_TXLEN) {
log_warnx("%s: invalid injected packet size", __func__);
goto drop;
}
payload = pkt->buf;
sz = (ssize_t)pkt->len;
}
/* Validate the ethernet header, if required. */
if (dev->lockedmac) {
eh = (struct ether_header *)(payload);
if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
memcmp(eh->ether_dhost, dev->mac,
sizeof(eh->ether_dhost)) != 0)
goto drop;
}
/* Truncate one last time to the chain length, if shorter. */
sz = MIN(chain_len, (size_t)sz);
/*
* Copy the packet into the provided buffers. We can use memcpy(3)
* here as the gpa was validated and translated to an hva previously.
*/
for (i = 0; (int)i < iov_cnt && (size_t)sz > copied; i++) {
chunk = MIN(iov[i].iov_len, (size_t)(sz - copied));
memcpy(iov[i].iov_base, payload + copied, chunk);
copied += chunk;
}
drop:
/* Free any injected packet buffer. */
if (pkt != NULL)
free(pkt->buf);
return (copied);
}
/*
* vionet_rx_zerocopy
*
* Perform a vectorized read from the given fd into the guest physical memory
* pointed to by iovecs.
*
* Returns number of bytes read on success, -1 on error, or 0 if EAGAIN was
* returned by readv.
*
*/
static ssize_t
vionet_rx_zerocopy(struct vionet_dev *dev, int fd, const struct iovec *iov,
int iov_cnt)
{
ssize_t sz;
if (dev->lockedmac) {
log_warnx("%s: zerocopy not available for locked lladdr",
__func__);
return (-1);
}
sz = readv(fd, iov, iov_cnt);
if (sz == -1 && errno == EAGAIN)
return (0);
return (sz);
}
/*
* vionet_rx_event
*
* Called when new data can be received on the tap fd of a vionet device.
*/
static void
vionet_rx_event(int fd, short kind, void *arg)
vionet_rx_event(int fd, short event, void *arg)
{
struct virtio_dev *dev = (struct virtio_dev *)arg;
if (vionet_rx(&dev->vionet) > 0)
if (!(event & EV_READ))
fatalx("%s: invalid event type", __func__);
if (vionet_rx(&dev->vionet, fd) > 0)
virtio_assert_pic_irq(dev, 0);
}
void
vionet_notify_rx(struct virtio_dev *dev)
{
struct vionet_dev *vionet = &dev->vionet;
struct vring_avail *avail;
struct virtio_vq_info *vq_info;
char *vr;
vq_info = &vionet->vq[RXQ];
vr = vq_info->q_hva;
if (vr == NULL)
fatalx("%s: vr == NULL", __func__);
/* Compute offset into avail ring */
avail = (struct vring_avail *)(vr + vq_info->vq_availoffset);
vq_info->notified_avail = avail->idx - 1;
}
int
static int
vionet_notifyq(struct virtio_dev *dev)
{
struct vionet_dev *vionet = &dev->vionet;
int ret = 0;
switch (vionet->cfg.queue_notify) {
case RXQ:
vionet_notify_rx(dev);
break;
case TXQ:
ret = vionet_notify_tx(dev);
break;
case TXQ: return vionet_notify_tx(dev);
default:
/*
* Catch the unimplemented queue ID 2 (control queue) as
@ -497,23 +633,25 @@ vionet_notifyq(struct virtio_dev *dev)
break;
}
return (ret);
return (0);
}
int
static int
vionet_notify_tx(struct virtio_dev *dev)
{
uint16_t idx, pkt_desc_idx, hdr_desc_idx, dxx, cnt;
size_t pktsz, chunk_size = 0;
ssize_t dhcpsz = 0;
int ofs, spc = 0;
char *vr = NULL, *pkt = NULL, *dhcppkt = NULL;
uint16_t idx, hdr_idx;
size_t chain_len, iov_cnt;
ssize_t dhcpsz = 0, sz;
int notify = 0;
char *vr = NULL, *dhcppkt = NULL;
struct vionet_dev *vionet = &dev->vionet;
struct vring_desc *desc, *pkt_desc, *hdr_desc;
struct vring_desc *desc, *table;
struct vring_avail *avail;
struct vring_used *used;
struct virtio_vq_info *vq_info;
struct ether_header *eh;
struct iovec *iov;
struct packet pkt;
if (!(vionet->cfg.device_status
& VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK)) {
@ -522,155 +660,161 @@ vionet_notify_tx(struct virtio_dev *dev)
}
vq_info = &vionet->vq[TXQ];
idx = vq_info->last_avail;
vr = vq_info->q_hva;
if (vr == NULL)
fatalx("%s: vr == NULL", __func__);
/* Compute offsets in ring of descriptors, avail ring, and used ring */
desc = (struct vring_desc *)(vr);
table = (struct vring_desc *)(vr);
avail = (struct vring_avail *)(vr + vq_info->vq_availoffset);
used = (struct vring_used *)(vr + vq_info->vq_usedoffset);
idx = vq_info->last_avail & VIONET_QUEUE_MASK;
while (idx != avail->idx) {
hdr_idx = avail->ring[idx & VIONET_QUEUE_MASK];
desc = &table[hdr_idx & VIONET_QUEUE_MASK];
if (DESC_WRITABLE(desc)) {
log_warnx("%s: invalid descriptor state", __func__);
goto reset;
}
if ((avail->idx & VIONET_QUEUE_MASK) == idx)
return (0);
iov = &iov_tx[0];
iov_cnt = 0;
chain_len = 0;
while ((avail->idx & VIONET_QUEUE_MASK) != idx) {
hdr_desc_idx = avail->ring[idx] & VIONET_QUEUE_MASK;
hdr_desc = &desc[hdr_desc_idx];
pktsz = 0;
/*
* As a legacy device, we most likely will receive a lead
* descriptor sized to the virtio_net_hdr. However, the framing
* is not guaranteed, so check for packet data.
*/
iov->iov_len = desc->len;
if (iov->iov_len < sizeof(struct virtio_net_hdr)) {
log_warnx("%s: invalid descriptor length", __func__);
goto reset;
} else if (iov->iov_len > sizeof(struct virtio_net_hdr)) {
/* Chop off the virtio header, leaving packet data. */
iov->iov_len -= sizeof(struct virtio_net_hdr);
chain_len += iov->iov_len;
iov->iov_base = hvaddr_mem(desc->addr +
sizeof(struct virtio_net_hdr), iov->iov_len);
if (iov->iov_base == NULL)
goto reset;
iov_cnt++;
}
cnt = 0;
dxx = hdr_desc_idx;
do {
pktsz += desc[dxx].len;
dxx = desc[dxx].next & VIONET_QUEUE_MASK;
/*
* Virtio 1.0, cs04, section 2.4.5:
* "The number of descriptors in the table is defined
* by the queue size for this virtqueue: this is the
* maximum possible descriptor chain length."
*/
if (++cnt >= VIONET_QUEUE_SIZE) {
log_warnx("%s: descriptor table invalid",
/*
* Walk the chain and collect remaining addresses and lengths.
*/
while (desc->flags & VRING_DESC_F_NEXT) {
desc = &table[desc->next & VIONET_QUEUE_MASK];
if (DESC_WRITABLE(desc)) {
log_warnx("%s: invalid descriptor state",
__func__);
goto out;
}
} while (desc[dxx].flags & VRING_DESC_F_NEXT);
pktsz += desc[dxx].len;
/* Remove virtio header descriptor len */
pktsz -= hdr_desc->len;
/* Drop packets violating device MTU-based limits */
if (pktsz < VIONET_MIN_TXLEN || pktsz > VIONET_MAX_TXLEN) {
log_warnx("%s: invalid packet size %lu", __func__,
pktsz);
goto drop_packet;
}
pkt = malloc(pktsz);
if (pkt == NULL) {
log_warn("malloc error alloc packet buf");
goto out;
}
ofs = 0;
pkt_desc_idx = hdr_desc->next & VIONET_QUEUE_MASK;
pkt_desc = &desc[pkt_desc_idx];
while (pkt_desc->flags & VRING_DESC_F_NEXT) {
/* must be not writable */
if (pkt_desc->flags & VRING_DESC_F_WRITE) {
log_warnx("unexpected writable tx desc "
"%d", pkt_desc_idx);
goto out;
goto reset;
}
/* Check we don't read beyond allocated pktsz */
if (pkt_desc->len > pktsz - ofs) {
log_warnx("%s: descriptor len past pkt len",
/* Collect our IO information, translating gpa's. */
iov = &iov_tx[iov_cnt];
iov->iov_len = desc->len;
iov->iov_base = hvaddr_mem(desc->addr, iov->iov_len);
if (iov->iov_base == NULL)
goto reset;
chain_len += iov->iov_len;
/* Guard against infinitely looping chains. */
if (++iov_cnt >= nitems(iov_tx)) {
log_warnx("%s: infinite chain detected",
__func__);
chunk_size = pktsz - ofs;
} else
chunk_size = pkt_desc->len;
/* Read packet from descriptor ring */
if (read_mem(pkt_desc->addr, pkt + ofs, chunk_size)) {
log_warnx("vionet: packet read_mem error "
"@ 0x%llx", pkt_desc->addr);
goto out;
goto reset;
}
ofs += pkt_desc->len;
pkt_desc_idx = pkt_desc->next & VIONET_QUEUE_MASK;
pkt_desc = &desc[pkt_desc_idx];
}
/* Now handle tail descriptor - must be not writable */
if (pkt_desc->flags & VRING_DESC_F_WRITE) {
log_warnx("unexpected writable tx descriptor %d",
pkt_desc_idx);
goto out;
/* Check if we've got a minimum viable amount of data. */
if (chain_len < VIONET_MIN_TXLEN) {
sz = chain_len;
goto drop;
}
/* Check we don't read beyond allocated pktsz */
if (pkt_desc->len > pktsz - ofs) {
log_warnx("%s: descriptor len past pkt len", __func__);
chunk_size = pktsz - ofs - pkt_desc->len;
} else
chunk_size = pkt_desc->len;
/* Read packet from descriptor ring */
if (read_mem(pkt_desc->addr, pkt + ofs, chunk_size)) {
log_warnx("vionet: packet read_mem error @ "
"0x%llx", pkt_desc->addr);
goto out;
/*
* Packet inspection for ethernet header (if using a "local"
* interface) for possibility of a DHCP packet or (if using
* locked lladdr) for validating ethernet header.
*
* To help preserve zero-copy semantics, we require the first
* descriptor with packet data contains a large enough buffer
* for this inspection.
*/
iov = &iov_tx[0];
if (vionet->lockedmac) {
if (iov->iov_len < ETHER_HDR_LEN) {
log_warnx("%s: insufficient header data",
__func__);
goto drop;
}
eh = (struct ether_header *)iov->iov_base;
if (memcmp(eh->ether_shost, vionet->mac,
sizeof(eh->ether_shost)) != 0) {
log_warnx("%s: bad source address %s",
__func__, ether_ntoa((struct ether_addr *)
eh->ether_shost));
sz = chain_len;
goto drop;
}
}
if (vionet->local) {
dhcpsz = dhcp_request(dev, iov->iov_base, iov->iov_len,
&dhcppkt);
log_debug("%s: detected dhcp request of %zu bytes",
__func__, dhcpsz);
}
/* reject other source addresses */
if (vionet->lockedmac && pktsz >= ETHER_HDR_LEN &&
(eh = (struct ether_header *)pkt) &&
memcmp(eh->ether_shost, vionet->mac,
sizeof(eh->ether_shost)) != 0)
log_debug("vionet: wrong source address %s for vm %d",
ether_ntoa((struct ether_addr *)
eh->ether_shost), dev->vm_id);
else if (vionet->local &&
(dhcpsz = dhcp_request(dev, pkt, pktsz, &dhcppkt)) != -1) {
log_debug("vionet: dhcp request,"
" local response size %zd", dhcpsz);
/* XXX signed vs unsigned here, funky cast */
} else if (write(vionet->data_fd, pkt, pktsz) != (int)pktsz) {
log_warnx("vionet: tx failed writing to tap: "
"%d", errno);
goto out;
/* Write our packet to the tap(4). */
sz = writev(vionet->data_fd, iov_tx, iov_cnt);
if (sz == -1 && errno != ENOBUFS) {
log_warn("%s", __func__);
goto reset;
}
drop_packet:
vionet->cfg.isr_status = 1;
used->ring[used->idx & VIONET_QUEUE_MASK].id = hdr_desc_idx;
used->ring[used->idx & VIONET_QUEUE_MASK].len = hdr_desc->len;
sz += sizeof(struct virtio_net_hdr);
drop:
used->ring[used->idx & VIONET_QUEUE_MASK].id = hdr_idx;
used->ring[used->idx & VIONET_QUEUE_MASK].len = sz;
__sync_synchronize();
used->idx++;
idx++;
vq_info->last_avail = avail->idx & VIONET_QUEUE_MASK;
idx = (idx + 1) & VIONET_QUEUE_MASK;
free(pkt);
pkt = NULL;
/* Facilitate DHCP reply injection, if needed. */
if (dhcpsz > 0) {
pkt.buf = dhcppkt;
pkt.len = dhcpsz;
sz = write(pipe_inject[WRITE], &pkt, sizeof(pkt));
if (sz == -1 && errno != EAGAIN) {
log_warn("%s: packet injection", __func__);
free(pkt.buf);
} else if (sz == -1 && errno == EAGAIN) {
log_debug("%s: dropping dhcp reply", __func__);
free(pkt.buf);
} else if (sz != sizeof(pkt)) {
log_warnx("%s: failed packet injection",
__func__);
free(pkt.buf);
}
log_debug("%s: injected dhcp reply with %ld bytes",
__func__, sz);
}
}
if (dhcpsz > 0)
vionet_enq_rx(vionet, dhcppkt, dhcpsz, &spc);
out:
free(pkt);
free(dhcppkt);
if (idx != vq_info->last_avail &&
!(avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
notify = 1;
vionet->cfg.isr_status |= 1;
}
vq_info->last_avail = idx;
return (notify);
reset:
log_warnx("%s: requesting device reset", __func__);
vionet->cfg.device_status |= DEVICE_NEEDS_RESET;
vionet->cfg.isr_status |= VIRTIO_CONFIG_ISR_CONFIG_CHANGE;
return (1);
}
@ -728,12 +872,15 @@ dev_dispatch_vm(int fd, short event, void *arg)
case IMSG_VMDOP_PAUSE_VM:
log_debug("%s: pausing", __func__);
event_del(&ev_tap);
event_del(&ev_inject);
break;
case IMSG_VMDOP_UNPAUSE_VM:
log_debug("%s: unpausing", __func__);
if (vionet->cfg.device_status
& VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK)
& VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK) {
event_add(&ev_tap, NULL);
event_add(&ev_inject, NULL);
}
break;
case IMSG_CTL_VERBOSE:
IMSG_SIZE_CHECK(&imsg, &verbose);
@ -821,6 +968,7 @@ handle_sync_io(int fd, short event, void *arg)
case VIODEV_MSG_SHUTDOWN:
event_del(&dev->sync_iev.ev);
event_del(&ev_tap);
event_del(&ev_inject);
event_loopbreak();
return;
default:
@ -881,11 +1029,11 @@ handle_io_write(struct viodev_msg *msg, struct virtio_dev *dev)
virtio_deassert_pic_irq(dev, msg->vcpu);
}
event_del(&ev_tap);
event_del(&ev_inject);
if (vionet->cfg.device_status
& VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK) {
if (event_add(&ev_tap, NULL))
log_warn("%s: could not initialize virtio tap "
"event handler", __func__);
event_add(&ev_tap, NULL);
event_add(&ev_inject, NULL);
}
break;
default:

View File

@ -1,4 +1,4 @@
/* $OpenBSD: virtio.h,v 1.49 2023/09/26 01:53:54 dv Exp $ */
/* $OpenBSD: virtio.h,v 1.50 2024/01/30 23:01:49 dv Exp $ */
/*
* Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
@ -366,13 +366,6 @@ int vioblk_restore(int, struct vmd_vm *, int[][VM_MAX_BASE_PER_DISK]);
int vionet_dump(int);
int vionet_restore(int, struct vmd_vm *, int *);
void vionet_update_qs(struct vionet_dev *);
void vionet_update_qa(struct vionet_dev *);
int vionet_notifyq(struct virtio_dev *);
void vionet_notify_rx(struct virtio_dev *);
int vionet_notify_tx(struct virtio_dev *);
void vionet_process_rx(uint32_t);
int vionet_enq_rx(struct vionet_dev *, char *, size_t, int *);
void vionet_set_hostmac(struct vmd_vm *, unsigned int, uint8_t *);
int vmmci_io(int, uint16_t, uint32_t *, uint8_t *, void *, uint8_t);