diff --git a/lib/libcrypto/crypto_init.c b/lib/libcrypto/crypto_init.c index ed92c410c..56f4460e6 100644 --- a/lib/libcrypto/crypto_init.c +++ b/lib/libcrypto/crypto_init.c @@ -1,4 +1,4 @@ -/* $OpenBSD: crypto_init.c,v 1.17 2024/01/13 17:04:29 tb Exp $ */ +/* $OpenBSD: crypto_init.c,v 1.18 2024/01/25 12:22:31 tb Exp $ */ /* * Copyright (c) 2018 Bob Beck * @@ -81,7 +81,6 @@ OPENSSL_cleanup(void) CRYPTO_cleanup_all_ex_data(); EVP_cleanup(); - X509V3_EXT_cleanup(); X509_VERIFY_PARAM_table_cleanup(); x509_issuer_cache_free(); diff --git a/lib/libcrypto/pkcs12/p12_add.c b/lib/libcrypto/pkcs12/p12_add.c index 93c7c7221..8ce1fede7 100644 --- a/lib/libcrypto/pkcs12/p12_add.c +++ b/lib/libcrypto/pkcs12/p12_add.c @@ -1,4 +1,4 @@ -/* $OpenBSD: p12_add.c,v 1.22 2023/02/16 08:38:17 tb Exp $ */ +/* $OpenBSD: p12_add.c,v 1.23 2024/01/25 13:44:08 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ @@ -124,11 +124,15 @@ LCRYPTO_ALIAS(PKCS12_pack_p7data); STACK_OF(PKCS12_SAFEBAG) * PKCS12_unpack_p7data(PKCS7 *p7) { + ASN1_OCTET_STRING *aos; + if (!PKCS7_type_is_data(p7)) { PKCS12error(PKCS12_R_CONTENT_TYPE_NOT_DATA); return NULL; } - return ASN1_item_unpack(p7->d.data, &PKCS12_SAFEBAGS_it); + if ((aos = PKCS7_get_octet_string(p7)) == NULL) + return NULL; + return ASN1_item_unpack(aos, &PKCS12_SAFEBAGS_it); } LCRYPTO_ALIAS(PKCS12_unpack_p7data); @@ -182,11 +186,16 @@ LCRYPTO_ALIAS(PKCS12_pack_p7encdata); STACK_OF(PKCS12_SAFEBAG) * PKCS12_unpack_p7encdata(PKCS7 *p7, const char *pass, int passlen) { + PKCS7_ENC_CONTENT *content; + if (!PKCS7_type_is_encrypted(p7)) return NULL; - return PKCS12_item_decrypt_d2i(p7->d.encrypted->enc_data->algorithm, - &PKCS12_SAFEBAGS_it, pass, passlen, - p7->d.encrypted->enc_data->enc_data, 1); + if (p7->d.encrypted == NULL) + return NULL; + if ((content = p7->d.encrypted->enc_data) == NULL) + return NULL; + return PKCS12_item_decrypt_d2i(content->algorithm, &PKCS12_SAFEBAGS_it, + pass, passlen, content->enc_data, 1); } LCRYPTO_ALIAS(PKCS12_unpack_p7encdata); @@ -210,11 +219,14 @@ LCRYPTO_ALIAS(PKCS12_pack_authsafes); STACK_OF(PKCS7) * PKCS12_unpack_authsafes(const PKCS12 *p12) { + ASN1_OCTET_STRING *aos; + if (!PKCS7_type_is_data(p12->authsafes)) { PKCS12error(PKCS12_R_CONTENT_TYPE_NOT_DATA); return NULL; } - return ASN1_item_unpack(p12->authsafes->d.data, - &PKCS12_AUTHSAFES_it); + if ((aos = PKCS7_get_octet_string(p12->authsafes)) == NULL) + return NULL; + return ASN1_item_unpack(aos, &PKCS12_AUTHSAFES_it); } LCRYPTO_ALIAS(PKCS12_unpack_authsafes); diff --git a/lib/libcrypto/pkcs12/p12_mutl.c b/lib/libcrypto/pkcs12/p12_mutl.c index f0e6df9eb..c71ed735e 100644 --- a/lib/libcrypto/pkcs12/p12_mutl.c +++ b/lib/libcrypto/pkcs12/p12_mutl.c @@ -1,4 +1,4 @@ -/* $OpenBSD: p12_mutl.c,v 1.35 2023/02/16 08:38:17 tb Exp $ */ +/* $OpenBSD: p12_mutl.c,v 1.36 2024/01/25 13:44:08 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ @@ -115,6 +115,7 @@ PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen, { const EVP_MD *md_type; HMAC_CTX *hmac = NULL; + ASN1_OCTET_STRING *aos; unsigned char key[EVP_MAX_MD_SIZE], *salt; int saltlen, iter; int md_size; @@ -124,6 +125,10 @@ PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen, PKCS12error(PKCS12_R_CONTENT_TYPE_NOT_DATA); goto err; } + if ((aos = PKCS7_get_octet_string(p12->authsafes)) == NULL) { + PKCS12error(PKCS12_R_DECODE_ERROR); + goto err; + } salt = p12->mac->salt->data; saltlen = p12->mac->salt->length; @@ -155,8 +160,7 @@ PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen, goto err; if (!HMAC_Init_ex(hmac, key, md_size, md_type, NULL)) goto err; - if (!HMAC_Update(hmac, p12->authsafes->d.data->data, - p12->authsafes->d.data->length)) + if (!HMAC_Update(hmac, aos->data, aos->length)) goto err; if (!HMAC_Final(hmac, mac, maclen)) goto err; diff --git a/lib/libcrypto/pkcs12/p12_npas.c b/lib/libcrypto/pkcs12/p12_npas.c index 30dd2ef8c..6d3b43ce2 100644 --- a/lib/libcrypto/pkcs12/p12_npas.c +++ b/lib/libcrypto/pkcs12/p12_npas.c @@ -1,4 +1,4 @@ -/* $OpenBSD: p12_npas.c,v 1.18 2023/02/16 08:38:17 tb Exp $ */ +/* $OpenBSD: p12_npas.c,v 1.27 2024/01/25 15:33:35 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ @@ -68,130 +68,68 @@ /* PKCS#12 password change routine */ -static int newpass_p12(PKCS12 *p12, const char *oldpass, const char *newpass); -static int newpass_bags(STACK_OF(PKCS12_SAFEBAG) *bags, const char *oldpass, - const char *newpass); -static int newpass_bag(PKCS12_SAFEBAG *bag, const char *oldpass, - const char *newpass); -static int alg_get(X509_ALGOR *alg, int *pnid, int *piter, int *psaltlen); - -/* - * Change the password on a PKCS#12 structure. - */ - -int -PKCS12_newpass(PKCS12 *p12, const char *oldpass, const char *newpass) -{ - /* Check for NULL PKCS12 structure */ - - if (!p12) { - PKCS12error(PKCS12_R_INVALID_NULL_PKCS12_POINTER); - return 0; - } - - /* Check the mac */ - - if (!PKCS12_verify_mac(p12, oldpass, -1)) { - PKCS12error(PKCS12_R_MAC_VERIFY_FAILURE); - return 0; - } - - if (!newpass_p12(p12, oldpass, newpass)) { - PKCS12error(PKCS12_R_PARSE_ERROR); - return 0; - } - - return 1; -} -LCRYPTO_ALIAS(PKCS12_newpass); - -/* Parse the outer PKCS#12 structure */ - static int -newpass_p12(PKCS12 *p12, const char *oldpass, const char *newpass) +alg_get(X509_ALGOR *alg, int *nid, int *iter, int *salt_len) { - STACK_OF(PKCS7) *asafes, *newsafes; - STACK_OF(PKCS12_SAFEBAG) *bags; - int i, bagnid, pbe_nid = 0, pbe_iter = 0, pbe_saltlen = 0; - PKCS7 *p7, *p7new; - ASN1_OCTET_STRING *p12_data_tmp = NULL, *macnew = NULL; - unsigned char mac[EVP_MAX_MD_SIZE]; - unsigned int maclen; + const ASN1_OBJECT *aobj; + int param_type; + const void *param; + PBEPARAM *pbe = NULL; + int ret = 0; - if (!(asafes = PKCS12_unpack_authsafes(p12))) - return 0; - if (!(newsafes = sk_PKCS7_new_null())) - return 0; - for (i = 0; i < sk_PKCS7_num(asafes); i++) { - p7 = sk_PKCS7_value(asafes, i); - bagnid = OBJ_obj2nid(p7->type); - if (bagnid == NID_pkcs7_data) { - bags = PKCS12_unpack_p7data(p7); - } else if (bagnid == NID_pkcs7_encrypted) { - bags = PKCS12_unpack_p7encdata(p7, oldpass, -1); - if (!alg_get(p7->d.encrypted->enc_data->algorithm, - &pbe_nid, &pbe_iter, &pbe_saltlen)) { - sk_PKCS12_SAFEBAG_pop_free(bags, - PKCS12_SAFEBAG_free); - bags = NULL; - } - } else - continue; - if (bags == NULL) - goto err; - if (!newpass_bags(bags, oldpass, newpass)) { - sk_PKCS12_SAFEBAG_pop_free(bags, PKCS12_SAFEBAG_free); - goto err; - } - /* Repack bag in same form with new password */ - if (bagnid == NID_pkcs7_data) - p7new = PKCS12_pack_p7data(bags); - else - p7new = PKCS12_pack_p7encdata(pbe_nid, newpass, -1, - NULL, pbe_saltlen, pbe_iter, bags); - sk_PKCS12_SAFEBAG_pop_free(bags, PKCS12_SAFEBAG_free); - if (p7new == NULL) - goto err; - if (sk_PKCS7_push(newsafes, p7new) == 0) - goto err; - } - sk_PKCS7_pop_free(asafes, PKCS7_free); + *nid = *iter = *salt_len = 0; - /* Repack safe: save old safe in case of error */ - - p12_data_tmp = p12->authsafes->d.data; - if (!(p12->authsafes->d.data = ASN1_OCTET_STRING_new())) { - p12->authsafes->d.data = p12_data_tmp; + X509_ALGOR_get0(&aobj, ¶m_type, ¶m, alg); + if (param_type != V_ASN1_SEQUENCE) + goto err; + if ((pbe = ASN1_item_unpack(param, &PBEPARAM_it)) == NULL) goto err; - } - if (!PKCS12_pack_authsafes(p12, newsafes)) - goto saferr; - if (!PKCS12_gen_mac(p12, newpass, -1, mac, &maclen)) - goto saferr; - if (!(macnew = ASN1_OCTET_STRING_new())) - goto saferr; - if (!ASN1_OCTET_STRING_set(macnew, mac, maclen)) - goto saferr; - ASN1_OCTET_STRING_free(p12->mac->dinfo->digest); - p12->mac->dinfo->digest = macnew; - ASN1_OCTET_STRING_free(p12_data_tmp); + /* XXX - can we validate these somehow? */ + *nid = OBJ_obj2nid(alg->algorithm); + *iter = ASN1_INTEGER_get(pbe->iter); + *salt_len = pbe->salt->length; - return 1; + ret = 1; -saferr: - /* Restore old safe */ - ASN1_OCTET_STRING_free(p12->authsafes->d.data); - ASN1_OCTET_STRING_free(macnew); - p12->authsafes->d.data = p12_data_tmp; - return 0; + err: + PBEPARAM_free(pbe); -err: - sk_PKCS7_pop_free(asafes, PKCS7_free); - sk_PKCS7_pop_free(newsafes, PKCS7_free); - return 0; + return ret; } +/* Change password of safebag: only needs handle shrouded keybags */ +static int +newpass_bag(PKCS12_SAFEBAG *bag, const char *oldpass, const char *newpass) +{ + PKCS8_PRIV_KEY_INFO *p8 = NULL; + X509_SIG *keybag; + int nid, salt_len, iter; + int ret = 0; + + if (OBJ_obj2nid(bag->type) != NID_pkcs8ShroudedKeyBag) + goto done; + + if ((p8 = PKCS8_decrypt(bag->value.shkeybag, oldpass, -1)) == NULL) + goto err; + if (!alg_get(bag->value.shkeybag->algor, &nid, &iter, &salt_len)) + goto err; + + if ((keybag = PKCS8_encrypt(nid, NULL, newpass, -1, NULL, salt_len, + iter, p8)) == NULL) + goto err; + + X509_SIG_free(bag->value.shkeybag); + bag->value.shkeybag = keybag; + + done: + ret = 1; + + err: + PKCS8_PRIV_KEY_INFO_free(p8); + + return ret; +} static int newpass_bags(STACK_OF(PKCS12_SAFEBAG) *bags, const char *oldpass, @@ -200,50 +138,163 @@ newpass_bags(STACK_OF(PKCS12_SAFEBAG) *bags, const char *oldpass, int i; for (i = 0; i < sk_PKCS12_SAFEBAG_num(bags); i++) { - if (!newpass_bag(sk_PKCS12_SAFEBAG_value(bags, i), - oldpass, newpass)) + PKCS12_SAFEBAG *bag = sk_PKCS12_SAFEBAG_value(bags, i); + + if (!newpass_bag(bag, oldpass, newpass)) return 0; } - return 1; -} -/* Change password of safebag: only needs handle shrouded keybags */ - -static int -newpass_bag(PKCS12_SAFEBAG *bag, const char *oldpass, const char *newpass) -{ - PKCS8_PRIV_KEY_INFO *p8; - X509_SIG *p8new; - int p8_nid, p8_saltlen, p8_iter; - - if (OBJ_obj2nid(bag->type) != NID_pkcs8ShroudedKeyBag) - return 1; - - if (!(p8 = PKCS8_decrypt(bag->value.shkeybag, oldpass, -1))) - return 0; - if (!alg_get(bag->value.shkeybag->algor, &p8_nid, &p8_iter, - &p8_saltlen)) - return 0; - if (!(p8new = PKCS8_encrypt(p8_nid, NULL, newpass, -1, NULL, p8_saltlen, - p8_iter, p8))) return 0; - X509_SIG_free(bag->value.shkeybag); - bag->value.shkeybag = p8new; return 1; } static int -alg_get(X509_ALGOR *alg, int *pnid, int *piter, int *psaltlen) +pkcs7_repack_data(PKCS7 *pkcs7, STACK_OF(PKCS7) *safes, const char *oldpass, + const char *newpass) { - PBEPARAM *pbe; - const unsigned char *p; + STACK_OF(PKCS12_SAFEBAG) *bags; + PKCS7 *data = NULL; + int ret = 0; - p = alg->parameter->value.sequence->data; - pbe = d2i_PBEPARAM(NULL, &p, alg->parameter->value.sequence->length); - if (!pbe) - return 0; - *pnid = OBJ_obj2nid(alg->algorithm); - *piter = ASN1_INTEGER_get(pbe->iter); - *psaltlen = pbe->salt->length; - PBEPARAM_free(pbe); - return 1; + if ((bags = PKCS12_unpack_p7data(pkcs7)) == NULL) + goto err; + if (!newpass_bags(bags, oldpass, newpass)) + goto err; + if ((data = PKCS12_pack_p7data(bags)) == NULL) + goto err; + if (sk_PKCS7_push(safes, data) == 0) + goto err; + data = NULL; + + ret = 1; + + err: + sk_PKCS12_SAFEBAG_pop_free(bags, PKCS12_SAFEBAG_free); + PKCS7_free(data); + + return ret; } + +static int +pkcs7_repack_encdata(PKCS7 *pkcs7, STACK_OF(PKCS7) *safes, const char *oldpass, + const char *newpass) +{ + STACK_OF(PKCS12_SAFEBAG) *bags; + int nid, iter, salt_len; + PKCS7 *data = NULL; + int ret = 0; + + if ((bags = PKCS12_unpack_p7encdata(pkcs7, oldpass, -1)) == NULL) + goto err; + if (!alg_get(pkcs7->d.encrypted->enc_data->algorithm, &nid, + &iter, &salt_len)) + goto err; + if (!newpass_bags(bags, oldpass, newpass)) + goto err; + if ((data = PKCS12_pack_p7encdata(nid, newpass, -1, NULL, salt_len, + iter, bags)) == NULL) + goto err; + if (!sk_PKCS7_push(safes, data)) + goto err; + data = NULL; + + ret = 1; + + err: + sk_PKCS12_SAFEBAG_pop_free(bags, PKCS12_SAFEBAG_free); + PKCS7_free(data); + + return ret; +} + +static int +pkcs12_repack_authsafes(PKCS12 *pkcs12, STACK_OF(PKCS7) *safes, + const char *newpass) +{ + ASN1_OCTET_STRING *old_data; + ASN1_OCTET_STRING *new_mac = NULL; + unsigned char mac[EVP_MAX_MD_SIZE]; + unsigned int mac_len; + int ret = 0; + + if ((old_data = pkcs12->authsafes->d.data) == NULL) + goto err; + if ((pkcs12->authsafes->d.data = ASN1_OCTET_STRING_new()) == NULL) + goto err; + if (!PKCS12_pack_authsafes(pkcs12, safes)) + goto err; + if (!PKCS12_gen_mac(pkcs12, newpass, -1, mac, &mac_len)) + goto err; + if ((new_mac = ASN1_OCTET_STRING_new()) == NULL) + goto err; + if (!ASN1_OCTET_STRING_set(new_mac, mac, mac_len)) + goto err; + + ASN1_OCTET_STRING_free(pkcs12->mac->dinfo->digest); + pkcs12->mac->dinfo->digest = new_mac; + new_mac = NULL; + + ASN1_OCTET_STRING_free(old_data); + old_data = NULL; + + ret = 1; + + err: + if (old_data != NULL) { + ASN1_OCTET_STRING_free(pkcs12->authsafes->d.data); + pkcs12->authsafes->d.data = old_data; + } + explicit_bzero(mac, sizeof(mac)); + ASN1_OCTET_STRING_free(new_mac); + + return ret; +} + +int +PKCS12_newpass(PKCS12 *pkcs12, const char *oldpass, const char *newpass) +{ + STACK_OF(PKCS7) *authsafes = NULL, *safes = NULL; + int i; + int ret = 0; + + if (pkcs12 == NULL) { + PKCS12error(PKCS12_R_INVALID_NULL_PKCS12_POINTER); + goto err; + } + + if (!PKCS12_verify_mac(pkcs12, oldpass, -1)) { + PKCS12error(PKCS12_R_MAC_VERIFY_FAILURE); + goto err; + } + + if ((authsafes = PKCS12_unpack_authsafes(pkcs12)) == NULL) + goto err; + if ((safes = sk_PKCS7_new_null()) == NULL) + goto err; + + for (i = 0; i < sk_PKCS7_num(authsafes); i++) { + PKCS7 *pkcs7 = sk_PKCS7_value(authsafes, i); + + switch (OBJ_obj2nid(pkcs7->type)) { + case NID_pkcs7_data: + if (pkcs7_repack_data(pkcs7, safes, oldpass, newpass)) + goto err; + break; + case NID_pkcs7_encrypted: + if (pkcs7_repack_encdata(pkcs7, safes, oldpass, newpass)) + goto err; + break; + } + } + + if (!pkcs12_repack_authsafes(pkcs12, safes, newpass)) + goto err; + + ret = 1; + + err: + sk_PKCS7_pop_free(authsafes, PKCS7_free); + sk_PKCS7_pop_free(safes, PKCS7_free); + + return ret; +} +LCRYPTO_ALIAS(PKCS12_newpass); diff --git a/lib/libcrypto/pkcs12/pkcs12_local.h b/lib/libcrypto/pkcs12/pkcs12_local.h index 1d6f0558e..8d82d2f46 100644 --- a/lib/libcrypto/pkcs12/pkcs12_local.h +++ b/lib/libcrypto/pkcs12/pkcs12_local.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pkcs12_local.h,v 1.3 2022/11/26 17:23:18 tb Exp $ */ +/* $OpenBSD: pkcs12_local.h,v 1.4 2024/01/25 13:44:08 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ @@ -96,6 +96,9 @@ struct pkcs12_bag_st { } value; }; +/* XXX - should go into pkcs7_local.h. */ +ASN1_OCTET_STRING *PKCS7_get_octet_string(PKCS7 *p7); + __END_HIDDEN_DECLS #endif /* !HEADER_PKCS12_LOCAL_H */ diff --git a/lib/libcrypto/pkcs7/pk7_doit.c b/lib/libcrypto/pkcs7/pk7_doit.c index 755badf41..607daea1a 100644 --- a/lib/libcrypto/pkcs7/pk7_doit.c +++ b/lib/libcrypto/pkcs7/pk7_doit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pk7_doit.c,v 1.54 2023/11/15 00:55:43 tb Exp $ */ +/* $OpenBSD: pk7_doit.c,v 1.55 2024/01/25 13:44:08 tb Exp $ */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * @@ -92,7 +92,7 @@ PKCS7_type_is_other(PKCS7* p7) } -static ASN1_OCTET_STRING * +ASN1_OCTET_STRING * PKCS7_get_octet_string(PKCS7 *p7) { if (PKCS7_type_is_data(p7)) diff --git a/lib/libcrypto/pkcs7/pk7_mime.c b/lib/libcrypto/pkcs7/pk7_mime.c index f00e18c7e..381335589 100644 --- a/lib/libcrypto/pkcs7/pk7_mime.c +++ b/lib/libcrypto/pkcs7/pk7_mime.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pk7_mime.c,v 1.19 2023/05/02 09:56:12 tb Exp $ */ +/* $OpenBSD: pk7_mime.c,v 1.20 2024/01/25 13:44:08 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project. */ @@ -89,8 +89,11 @@ SMIME_write_PKCS7(BIO *bio, PKCS7 *p7, BIO *data, int flags) STACK_OF(X509_ALGOR) *mdalgs = NULL; int ctype_nid; - if ((ctype_nid = OBJ_obj2nid(p7->type)) == NID_pkcs7_signed) + if ((ctype_nid = OBJ_obj2nid(p7->type)) == NID_pkcs7_signed) { + if (p7->d.sign == NULL) + return 0; mdalgs = p7->d.sign->md_algs; + } flags ^= SMIME_OLDMIME; diff --git a/lib/libcrypto/x509/x509_lib.c b/lib/libcrypto/x509/x509_lib.c index 93f8dc207..5ddfc3761 100644 --- a/lib/libcrypto/x509/x509_lib.c +++ b/lib/libcrypto/x509/x509_lib.c @@ -1,4 +1,4 @@ -/* $OpenBSD: x509_lib.c,v 1.14 2023/04/25 10:56:58 tb Exp $ */ +/* $OpenBSD: x509_lib.c,v 1.16 2024/01/25 15:09:22 tb Exp $ */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ @@ -65,8 +65,6 @@ #include "x509_local.h" -static STACK_OF(X509V3_EXT_METHOD) *ext_list = NULL; - extern const X509V3_EXT_METHOD v3_bcons, v3_nscert, v3_key_usage, v3_ext_ku; extern const X509V3_EXT_METHOD v3_pkey_usage_period, v3_info, v3_sinfo; extern const X509V3_EXT_METHOD v3_ns_ia5_list[], v3_alt[], v3_skey_id, v3_akey_id; @@ -80,10 +78,6 @@ extern const X509V3_EXT_METHOD v3_name_constraints, v3_inhibit_anyp, v3_idp; extern const X509V3_EXT_METHOD v3_addr, v3_asid; extern const X509V3_EXT_METHOD v3_ct_scts[3]; -/* - * This table needs to be sorted by increasing ext_nid values for OBJ_bsearch_. - */ - static const X509V3_EXT_METHOD *standard_exts[] = { &v3_nscert, &v3_ns_ia5_list[0], @@ -142,62 +136,17 @@ static const X509V3_EXT_METHOD *standard_exts[] = { #define STANDARD_EXTENSION_COUNT (sizeof(standard_exts) / sizeof(standard_exts[0])) -static int -ext_cmp(const X509V3_EXT_METHOD * const *a, const X509V3_EXT_METHOD * const *b) -{ - return ((*a)->ext_nid - (*b)->ext_nid); -} - -int -X509V3_EXT_add(X509V3_EXT_METHOD *ext) -{ - if (!ext_list && !(ext_list = sk_X509V3_EXT_METHOD_new(ext_cmp))) { - X509V3error(ERR_R_MALLOC_FAILURE); - return 0; - } - if (!sk_X509V3_EXT_METHOD_push(ext_list, ext)) { - X509V3error(ERR_R_MALLOC_FAILURE); - return 0; - } - return 1; -} -LCRYPTO_ALIAS(X509V3_EXT_add); - -static int -ext_cmp_BSEARCH_CMP_FN(const void *a_, const void *b_) -{ - const X509V3_EXT_METHOD * const *a = a_; - const X509V3_EXT_METHOD * const *b = b_; - return ext_cmp(a, b); -} - -static const X509V3_EXT_METHOD ** -OBJ_bsearch_ext(const X509V3_EXT_METHOD **key, - const X509V3_EXT_METHOD *const *base, int num) -{ - return (const X509V3_EXT_METHOD **)OBJ_bsearch_(key, base, num, - sizeof(const X509V3_EXT_METHOD *), ext_cmp_BSEARCH_CMP_FN); -} - const X509V3_EXT_METHOD * X509V3_EXT_get_nid(int nid) { - X509V3_EXT_METHOD tmp; - const X509V3_EXT_METHOD *t = &tmp, * const *ret; - int idx; + size_t i; - if (nid < 0) - return NULL; - tmp.ext_nid = nid; - ret = OBJ_bsearch_ext(&t, standard_exts, STANDARD_EXTENSION_COUNT); - if (ret) - return *ret; - if (!ext_list) - return NULL; - idx = sk_X509V3_EXT_METHOD_find(ext_list, &tmp); - if (idx == -1) - return NULL; - return sk_X509V3_EXT_METHOD_value(ext_list, idx); + for (i = 0; i < STANDARD_EXTENSION_COUNT; i++) { + if (standard_exts[i]->ext_nid == nid) + return standard_exts[i]; + } + + return NULL; } LCRYPTO_ALIAS(X509V3_EXT_get_nid); @@ -212,56 +161,6 @@ X509V3_EXT_get(X509_EXTENSION *ext) } LCRYPTO_ALIAS(X509V3_EXT_get); -int -X509V3_EXT_add_list(X509V3_EXT_METHOD *extlist) -{ - for (; extlist->ext_nid!=-1; extlist++) - if (!X509V3_EXT_add(extlist)) - return 0; - return 1; -} -LCRYPTO_ALIAS(X509V3_EXT_add_list); - -int -X509V3_EXT_add_alias(int nid_to, int nid_from) -{ - const X509V3_EXT_METHOD *ext; - X509V3_EXT_METHOD *tmpext; - - if (!(ext = X509V3_EXT_get_nid(nid_from))) { - X509V3error(X509V3_R_EXTENSION_NOT_FOUND); - return 0; - } - if (!(tmpext = malloc(sizeof(X509V3_EXT_METHOD)))) { - X509V3error(ERR_R_MALLOC_FAILURE); - return 0; - } - *tmpext = *ext; - tmpext->ext_nid = nid_to; - tmpext->ext_flags |= X509V3_EXT_DYNAMIC; - if (!X509V3_EXT_add(tmpext)) { - free(tmpext); - return 0; - } - return 1; -} -LCRYPTO_ALIAS(X509V3_EXT_add_alias); - -static void -ext_list_free(X509V3_EXT_METHOD *ext) -{ - if (ext->ext_flags & X509V3_EXT_DYNAMIC) - free(ext); -} - -void -X509V3_EXT_cleanup(void) -{ - sk_X509V3_EXT_METHOD_pop_free(ext_list, ext_list_free); - ext_list = NULL; -} -LCRYPTO_ALIAS(X509V3_EXT_cleanup); - int X509V3_add_standard_extensions(void) { @@ -434,3 +333,37 @@ err: return 0; } LCRYPTO_ALIAS(X509V3_add1_i2d); + +/* + * XXX - remove all the functions below in the next major bump. + */ + +int +X509V3_EXT_add(X509V3_EXT_METHOD *ext) +{ + X509V3error(ERR_R_DISABLED); + return 0; +} +LCRYPTO_ALIAS(X509V3_EXT_add); + +int +X509V3_EXT_add_list(X509V3_EXT_METHOD *extlist) +{ + X509V3error(ERR_R_DISABLED); + return 0; +} +LCRYPTO_ALIAS(X509V3_EXT_add_list); + +int +X509V3_EXT_add_alias(int nid_to, int nid_from) +{ + X509V3error(ERR_R_DISABLED); + return 0; +} +LCRYPTO_ALIAS(X509V3_EXT_add_alias); + +void +X509V3_EXT_cleanup(void) +{ +} +LCRYPTO_ALIAS(X509V3_EXT_cleanup); diff --git a/sys/dev/ic/qwx.c b/sys/dev/ic/qwx.c index 2cc510274..2eaac1414 100644 --- a/sys/dev/ic/qwx.c +++ b/sys/dev/ic/qwx.c @@ -1,4 +1,4 @@ -/* $OpenBSD: qwx.c,v 1.2 2024/01/02 17:39:08 stsp Exp $ */ +/* $OpenBSD: qwx.c,v 1.8 2024/01/25 17:00:20 stsp Exp $ */ /* * Copyright 2023 Stefan Sperling @@ -85,6 +85,7 @@ #endif #define GENMASK(h, l) (((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l))) #define __bf_shf(x) (__builtin_ffsll(x) - 1) +#define ffz(x) ffs(~(x)) #define FIELD_GET(_m, _v) ((typeof(_m))(((_v) & (_m)) >> __bf_shf(_m))) #define FIELD_PREP(_m, _v) (((typeof(_m))(_v) << __bf_shf(_m)) & (_m)) #define BIT(x) (1UL << (x)) @@ -122,13 +123,20 @@ int qwx_ce_send(struct qwx_softc *, struct mbuf *, uint8_t, uint16_t); int qwx_htc_connect_service(struct qwx_htc *, struct qwx_htc_svc_conn_req *, struct qwx_htc_svc_conn_resp *); void qwx_hal_srng_shadow_update_hp_tp(struct qwx_softc *, struct hal_srng *); -int qwx_dp_service_srng(struct qwx_softc *); void qwx_wmi_free_dbring_caps(struct qwx_softc *); +int qwx_wmi_set_peer_param(struct qwx_softc *, uint8_t *, uint32_t, + uint32_t, uint32_t, uint32_t); +int qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *, int, int, + uint8_t *, uint64_t, uint8_t, uint8_t, uint32_t); +const void **qwx_wmi_tlv_parse_alloc(struct qwx_softc *, const void *, size_t); int qwx_core_init(struct qwx_softc *); int qwx_qmi_event_server_arrive(struct qwx_softc *); int qwx_mac_register(struct qwx_softc *); int qwx_mac_start(struct qwx_softc *); void qwx_mac_scan_finish(struct qwx_softc *); +int qwx_dp_tx_send_reo_cmd(struct qwx_softc *, struct dp_rx_tid *, + enum hal_reo_cmd_type , struct ath11k_hal_reo_cmd *, + void (*func)(struct qwx_dp *, void *, enum hal_reo_cmd_status)); int qwx_scan(struct qwx_softc *); void qwx_scan_abort(struct qwx_softc *); @@ -142,7 +150,11 @@ int qwx_run_stop(struct qwx_softc *); struct ieee80211_node * qwx_node_alloc(struct ieee80211com *ic) { - return malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO); + struct qwx_node *nq; + + nq = malloc(sizeof(struct qwx_node), M_DEVBUF, M_NOWAIT | M_ZERO); + nq->peer.peer_id = HAL_INVALID_PEERID; + return (struct ieee80211_node *)nq; } int @@ -272,6 +284,7 @@ qwx_stop(struct ifnet *ifp) sc->ns_nstate = IEEE80211_S_INIT; sc->scan.state = ATH11K_SCAN_IDLE; sc->vdev_id_11d_scan = QWX_11D_INVALID_VDEV_ID; + sc->pdevs_active = 0; /* power off hardware */ qwx_core_deinit(sc); @@ -624,14 +637,37 @@ void qwx_init_wmi_config_ipq8074(struct qwx_softc *sc, config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; } +int +qwx_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw, int mac_id) +{ + return mac_id; +} + +int +qwx_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw, int mac_id) +{ + return 0; +} + +int qwx_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw, int mac_id) +{ + return 0; +} + +int +qwx_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, int mac_id) +{ + return mac_id; +} + const struct ath11k_hw_ops ipq8074_ops = { #if notyet .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074, #if notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, @@ -673,9 +709,9 @@ const struct ath11k_hw_ops ipq6018_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074, #if notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, @@ -717,9 +753,9 @@ const struct ath11k_hw_ops qca6390_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_qca6390, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390, #if notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu, @@ -761,9 +797,9 @@ const struct ath11k_hw_ops qcn9074_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_ipq8074, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_ipq8074, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_ipq8074, #ifdef notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, @@ -805,9 +841,9 @@ const struct ath11k_hw_ops wcn6855_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_qca6390, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390, #ifdef notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, .tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu, @@ -849,9 +885,9 @@ const struct ath11k_hw_ops wcn6750_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, #endif .wmi_init_config = qwx_init_wmi_config_qca6390, + .mac_id_to_pdev_id = qwx_hw_mac_id_to_pdev_id_qca6390, + .mac_id_to_srng_id = qwx_hw_mac_id_to_srng_id_qca6390, #if notyet - .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390, - .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390, .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable, .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu, .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu, @@ -2079,7 +2115,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hw_rev = ATH11K_HW_IPQ8074, .name = "ipq8074 hw2.0", .fw = { - .dir = "IPQ8074/hw2.0", + .dir = "ipq8074-hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2169,7 +2205,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .hw_rev = ATH11K_HW_IPQ6018_HW10, .name = "ipq6018 hw1.0", .fw = { - .dir = "IPQ6018/hw1.0", + .dir = "ipq6018-hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2256,7 +2292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .name = "qca6390 hw2.0", .hw_rev = ATH11K_HW_QCA6390_HW20, .fw = { - .dir = "QCA6390/hw2.0", + .dir = "qca6390-hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2345,7 +2381,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .name = "qcn9074 hw1.0", .hw_rev = ATH11K_HW_QCN9074_HW10, .fw = { - .dir = "QCN9074/hw1.0", + .dir = "qcn9074-hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2433,7 +2469,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .name = "wcn6855 hw2.0", .hw_rev = ATH11K_HW_WCN6855_HW20, .fw = { - .dir = "WCN6855/hw2.0", + .dir = "wcn6855-hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2522,7 +2558,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .name = "wcn6855 hw2.1", .hw_rev = ATH11K_HW_WCN6855_HW21, .fw = { - .dir = "WCN6855/hw2.1", + .dir = "wcn6855-hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -2610,7 +2646,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .name = "wcn6750 hw1.0", .hw_rev = ATH11K_HW_WCN6750_HW10, .fw = { - .dir = "WCN6750/hw1.0", + .dir = "wcn6750-hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, @@ -4743,23 +4779,34 @@ const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { }; int -qwx_intr(struct qwx_softc *sc) +qwx_ce_intr(void *arg) { - int ret = 0, i; + struct qwx_ce_pipe *pipe = arg; + struct qwx_softc *sc = pipe->sc; - if (test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags)) { - for (i = 0; i < sc->hw_params.ce_count; i++) { - if (qwx_ce_per_engine_service(sc, i)) - ret = 1; - } + if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, sc->sc_flags) || + ((sc->msi_ce_irqmask & (1 << pipe->pipe_num)) == 0)) { + DPRINTF("%s: unexpected interrupt on pipe %d\n", + __func__, pipe->pipe_num); + return 1; } - if (test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) { - if (qwx_dp_service_srng(sc)) - ret = 1; + return qwx_ce_per_engine_service(sc, pipe->pipe_num); +} + +int +qwx_ext_intr(void *arg) +{ + struct qwx_ext_irq_grp *irq_grp = arg; + struct qwx_softc *sc = irq_grp->sc; + + if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, sc->sc_flags)) { + DPRINTF("%s: unexpected interrupt for ext group %d\n", + __func__, irq_grp->grp_id); + return 1; } - return ret; + return qwx_dp_service_srng(sc, irq_grp->grp_id); } const char *qmi_data_type_name[QMI_NUM_DATA_TYPES] = { @@ -7249,7 +7296,7 @@ qwx_core_fetch_bdf(struct qwx_softc *sc, u_char **data, size_t *len, char boardname[200]; int ret; - ret = snprintf(path, sizeof(path), "%s/%s/%s", + ret = snprintf(path, sizeof(path), "%s-%s-%s", ATH11K_FW_DIR, sc->hw_params.fw.dir, filename); if (ret < 0 || ret >= sizeof(path)) return ENOSPC; @@ -7537,7 +7584,7 @@ qwx_qmi_m3_load(struct qwx_softc *sc) char path[PATH_MAX]; int ret; - ret = snprintf(path, sizeof(path), "%s/%s/%s", + ret = snprintf(path, sizeof(path), "%s-%s-%s", ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_M3_FILE); if (ret < 0 || ret >= sizeof(path)) return ENOSPC; @@ -7709,6 +7756,19 @@ qwx_hal_srng_src_get_next_reaped(struct qwx_softc *sc, struct hal_srng *srng) return desc; } +uint32_t * +qwx_hal_srng_src_peek(struct qwx_softc *sc, struct hal_srng *srng) +{ +#ifdef notyet + lockdep_assert_held(&srng->lock); +#endif + if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == + srng->u.src_ring.cached_tp) + return NULL; + + return srng->ring_base_vaddr + srng->u.src_ring.hp; +} + void qwx_get_msi_address(struct qwx_softc *sc, uint32_t *addr_lo, uint32_t *addr_hi) @@ -8663,6 +8723,247 @@ qwx_hal_reo_init_cmd_ring(struct qwx_softc *sc, struct hal_srng *srng) } } +int +qwx_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_get_queue_stats *desc; + + tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) | + FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); + + desc = (struct hal_reo_get_queue_stats *)tlv->value; + + desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + + desc->queue_addr_lo = cmd->addr_lo; + desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI, + cmd->addr_hi); + if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR) + desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS; + + return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0); +} + +int +qwx_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_flush_cache *desc; + uint8_t avail_slot = ffz(hal->avail_blk_resource); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) { + if (avail_slot >= HAL_MAX_AVAIL_BLK_RES) + return ENOSPC; + + hal->current_blk_index = avail_slot; + } + + tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) | + FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); + + desc = (struct hal_reo_flush_cache *)tlv->value; + + desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + + desc->cache_addr_lo = cmd->addr_lo; + desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI, + cmd->addr_hi); + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS) + desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS; + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) { + desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE; + desc->info0 |= + FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX, + avail_slot); + } + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL) + desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE; + + if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL) + desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL; + + return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0); +} + +int +qwx_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_update_rx_queue *desc; + + tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) | + FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc)); + + desc = (struct hal_reo_update_rx_queue *)tlv->value; + + desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS) + desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED; + + desc->queue_addr_lo = cmd->addr_lo; + desc->info0 = + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI, + cmd->addr_hi) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN, + !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN)); + + desc->info1 = + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER, + cmd->rx_queue_num) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER, + FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC, + FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG, + !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG)); + + if (cmd->pn_size == 24) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24; + else if (cmd->pn_size == 48) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48; + else if (cmd->pn_size == 128) + cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128; + + if (cmd->ba_window_size < 1) + cmd->ba_window_size = 1; + + if (cmd->ba_window_size == 1) + cmd->ba_window_size++; + + desc->info2 = FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE, + cmd->ba_window_size - 1) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD, + !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN, + FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR, + !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) | + FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR, + !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR)); + + return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0); +} + +int +qwx_hal_reo_cmd_send(struct qwx_softc *sc, struct hal_srng *srng, + enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_tlv_hdr *reo_desc; + int ret; +#ifdef notyet + spin_lock_bh(&srng->lock); +#endif + qwx_hal_srng_access_begin(sc, srng); + reo_desc = (struct hal_tlv_hdr *)qwx_hal_srng_src_get_next_entry(sc, srng); + if (!reo_desc) { + ret = ENOBUFS; + goto out; + } + + switch (type) { + case HAL_REO_CMD_GET_QUEUE_STATS: + ret = qwx_hal_reo_cmd_queue_stats(reo_desc, cmd); + break; + case HAL_REO_CMD_FLUSH_CACHE: + ret = qwx_hal_reo_cmd_flush_cache(&sc->hal, reo_desc, cmd); + break; + case HAL_REO_CMD_UPDATE_RX_QUEUE: + ret = qwx_hal_reo_cmd_update_rx_queue(reo_desc, cmd); + break; + case HAL_REO_CMD_FLUSH_QUEUE: + case HAL_REO_CMD_UNBLOCK_CACHE: + case HAL_REO_CMD_FLUSH_TIMEOUT_LIST: + printf("%s: unsupported reo command %d\n", + sc->sc_dev.dv_xname, type); + ret = ENOTSUP; + break; + default: + printf("%s: unknown reo command %d\n", + sc->sc_dev.dv_xname, type); + ret = EINVAL; + break; + } + + qwx_dp_shadow_start_timer(sc, srng, &sc->dp.reo_cmd_timer); +out: + qwx_hal_srng_access_end(sc, srng); +#ifdef notyet + spin_unlock_bh(&srng->lock); +#endif + return ret; +} int qwx_dp_srng_common_setup(struct qwx_softc *sc) { @@ -10178,6 +10479,142 @@ qwx_ready_event(struct qwx_softc *sc, struct mbuf *m) wakeup(&sc->wmi.unified_ready); } +int +qwx_pull_peer_del_resp_ev(struct qwx_softc *sc, struct mbuf *m, + struct wmi_peer_delete_resp_event *peer_del_resp) +{ + const void **tb; + const struct wmi_peer_delete_resp_event *ev; + int ret; + + tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len); + if (tb == NULL) { + ret = ENOMEM; + printf("%s: failed to parse tlv: %d\n", + sc->sc_dev.dv_xname, ret); + return ret; + } + + ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; + if (!ev) { + printf("%s: failed to fetch peer delete resp ev\n", + sc->sc_dev.dv_xname); + free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb)); + return EPROTO; + } + + memset(peer_del_resp, 0, sizeof(*peer_del_resp)); + + peer_del_resp->vdev_id = ev->vdev_id; + IEEE80211_ADDR_COPY(peer_del_resp->peer_macaddr.addr, + ev->peer_macaddr.addr); + + free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb)); + return 0; +} + +void +qwx_peer_delete_resp_event(struct qwx_softc *sc, struct mbuf *m) +{ + struct wmi_peer_delete_resp_event peer_del_resp; + + if (qwx_pull_peer_del_resp_ev(sc, m, &peer_del_resp) != 0) { + printf("%s: failed to extract peer delete resp", + sc->sc_dev.dv_xname); + return; + } + + sc->peer_delete_done = 1; + wakeup(&sc->peer_delete_done); + + DNPRINTF(QWX_D_WMI, "%s: peer delete resp for vdev id %d addr %s\n", + __func__, peer_del_resp.vdev_id, + ether_sprintf(peer_del_resp.peer_macaddr.addr)); +} + +const char * +qwx_wmi_vdev_resp_print(uint32_t vdev_resp_status) +{ + switch (vdev_resp_status) { + case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: + return "invalid vdev id"; + case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: + return "not supported"; + case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: + return "dfs violation"; + case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: + return "invalid regdomain"; + default: + return "unknown"; + } +} + +int +qwx_pull_vdev_start_resp_tlv(struct qwx_softc *sc, struct mbuf *m, + struct wmi_vdev_start_resp_event *vdev_rsp) +{ + const void **tb; + const struct wmi_vdev_start_resp_event *ev; + int ret; + + tb = qwx_wmi_tlv_parse_alloc(sc, mtod(m, void *), m->m_pkthdr.len); + if (tb == NULL) { + ret = ENOMEM; + printf("%s: failed to parse tlv: %d\n", + sc->sc_dev.dv_xname, ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; + if (!ev) { + printf("%s: failed to fetch vdev start resp ev\n", + sc->sc_dev.dv_xname); + free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb)); + return EPROTO; + } + + memset(vdev_rsp, 0, sizeof(*vdev_rsp)); + + vdev_rsp->vdev_id = ev->vdev_id; + vdev_rsp->requestor_id = ev->requestor_id; + vdev_rsp->resp_type = ev->resp_type; + vdev_rsp->status = ev->status; + vdev_rsp->chain_mask = ev->chain_mask; + vdev_rsp->smps_mode = ev->smps_mode; + vdev_rsp->mac_id = ev->mac_id; + vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; + vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; + + free(tb, M_DEVBUF, WMI_TAG_MAX * sizeof(*tb)); + return 0; +} + +void +qwx_vdev_start_resp_event(struct qwx_softc *sc, struct mbuf *m) +{ + struct wmi_vdev_start_resp_event vdev_start_resp; + uint32_t status; + + if (qwx_pull_vdev_start_resp_tlv(sc, m, &vdev_start_resp) != 0) { + printf("%s: failed to extract vdev start resp", + sc->sc_dev.dv_xname); + return; + } + + status = vdev_start_resp.status; + if (status) { + printf("%s: vdev start resp error status %d (%s)\n", + sc->sc_dev.dv_xname, status, + qwx_wmi_vdev_resp_print(status)); + } + + sc->vdev_setup_done = 1; + wakeup(&sc->vdev_setup_done); + + DNPRINTF(QWX_D_WMI, "%s: vdev start resp for vdev id %d", __func__, + vdev_start_resp.vdev_id); +} + int qwx_wmi_tlv_iter_parse(struct qwx_softc *sc, uint16_t tag, uint16_t len, const void *ptr, void *data) @@ -10198,7 +10635,6 @@ qwx_wmi_tlv_parse(struct qwx_softc *sc, const void **tb, (void *)tb); } - const void ** qwx_wmi_tlv_parse_alloc(struct qwx_softc *sc, const void *ptr, size_t len) { @@ -11241,13 +11677,13 @@ qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m) case WMI_READY_EVENTID: qwx_ready_event(sc, m); break; -#if 0 case WMI_PEER_DELETE_RESP_EVENTID: - ath11k_peer_delete_resp_event(ab, skb); + qwx_peer_delete_resp_event(sc, m); break; case WMI_VDEV_START_RESP_EVENTID: - ath11k_vdev_start_resp_event(ab, skb); + qwx_vdev_start_resp_event(sc, m); break; +#if 0 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: ath11k_bcn_tx_status_event(ab, skb); break; @@ -11257,12 +11693,8 @@ qwx_wmi_tlv_op_rx(struct qwx_softc *sc, struct mbuf *m) #endif case WMI_MGMT_RX_EVENTID: DPRINTF("%s: 0x%x: mgmt rx event\n", __func__, id); -#if 0 qwx_mgmt_rx_event(sc, m); /* mgmt_rx_event() owns the skb now! */ -#else - m_freem(m); -#endif return; #if 0 case WMI_MGMT_TX_COMPLETION_EVENTID: @@ -12053,6 +12485,60 @@ qwx_dp_htt_htc_tx_complete(struct qwx_softc *sc, struct mbuf *m) m_freem(m); } +static inline void +qwx_dp_get_mac_addr(uint32_t addr_l32, uint16_t addr_h16, uint8_t *addr) +{ +#if 0 /* Not needed on OpenBSD? We do swapping in sofware... */ + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { + addr_l32 = swab32(addr_l32); + addr_h16 = swab16(addr_h16); + } +#endif + uint32_t val32; + uint16_t val16; + + val32 = le32toh(addr_l32); + memcpy(addr, &val32, 4); + val16 = le16toh(addr_h16); + memcpy(addr + 4, &val16, IEEE80211_ADDR_LEN - 4); +} + +void +qwx_peer_map_event(struct qwx_softc *sc, uint8_t vdev_id, uint16_t peer_id, + uint8_t *mac_addr, uint16_t ast_hash, uint16_t hw_peer_id) +{ + struct ieee80211com *ic = &sc->sc_ic; + struct ieee80211_node *ni; + struct qwx_node *nq; + struct ath11k_peer *peer; +#ifdef notyet + spin_lock_bh(&ab->base_lock); +#endif + ni = ieee80211_find_node(ic, mac_addr); + if (ni == NULL) + return; + nq = (struct qwx_node *)ni; + peer = &nq->peer; + + peer->vdev_id = vdev_id; + peer->peer_id = peer_id; + peer->ast_hash = ast_hash; + peer->hw_peer_id = hw_peer_id; +#if 0 + ether_addr_copy(peer->addr, mac_addr); + list_add(&peer->list, &ab->peers); +#endif + sc->peer_mapped = 1; + wakeup(&sc->peer_mapped); + + DNPRINTF(QWX_D_HTT, "%s: peer map vdev %d peer %s id %d\n", + __func__, vdev_id, ether_sprintf(mac_addr), peer_id); +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif +} + + void qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m) { @@ -12060,14 +12546,13 @@ qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m) struct htt_resp_msg *resp = mtod(m, struct htt_resp_msg *); enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(uint32_t *)resp); -#if 0 uint16_t peer_id; uint8_t vdev_id; uint8_t mac_addr[IEEE80211_ADDR_LEN]; uint16_t peer_mac_h16; uint16_t ast_hash; uint16_t hw_peer_id; -#endif + DPRINTF("%s: dp_htt rx msg type: 0x%0x\n", __func__, type); switch (type) { @@ -12079,34 +12564,34 @@ qwx_dp_htt_htc_t2h_msg_handler(struct qwx_softc *sc, struct mbuf *m) dp->htt_tgt_version_received = 1; wakeup(&dp->htt_tgt_version_received); break; -#if 0 case HTT_T2H_MSG_TYPE_PEER_MAP: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, - resp->peer_map_ev.info); + resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, - resp->peer_map_ev.info); + resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, - resp->peer_map_ev.info1); - ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, - peer_mac_h16, mac_addr); - ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); + resp->peer_map_ev.info1); + qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, + peer_mac_h16, mac_addr); + qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, 0, 0); break; case HTT_T2H_MSG_TYPE_PEER_MAP2: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, - resp->peer_map_ev.info); + resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, - resp->peer_map_ev.info); + resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, - resp->peer_map_ev.info1); - ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, - peer_mac_h16, mac_addr); + resp->peer_map_ev.info1); + qwx_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, + peer_mac_h16, mac_addr); ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, - resp->peer_map_ev.info2); + resp->peer_map_ev.info2); hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, resp->peer_map_ev.info1); - ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, - hw_peer_id); + qwx_peer_map_event(sc, vdev_id, peer_id, mac_addr, ast_hash, + hw_peer_id); break; +#if 0 case HTT_T2H_MSG_TYPE_PEER_UNMAP: case HTT_T2H_MSG_TYPE_PEER_UNMAP2: peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, @@ -12214,7 +12699,9 @@ int qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc) { struct qwx_pdev_dp *dp = &sc->pdev_dp; +#if 0 struct dp_srng *srng = NULL; +#endif int i; int ret; @@ -12249,7 +12736,7 @@ qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc) return ret; } } - +#if 0 for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) { srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; ret = qwx_dp_srng_setup(sc, srng, HAL_RXDMA_MONITOR_STATUS, 0, @@ -12261,7 +12748,7 @@ qwx_dp_rx_pdev_srng_alloc(struct qwx_softc *sc) return ret; } } - +#endif /* if rxdma1_enable is false, then it doesn't need * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring * and rxdma_mon_desc_ring. @@ -12368,6 +12855,19 @@ qwx_hal_rx_buf_addr_info_set(void *desc, uint64_t paddr, uint32_t cookie, FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager); } +void +qwx_hal_rx_buf_addr_info_get(void *desc, uint64_t *paddr, uint32_t *cookie, + uint8_t *rbm) +{ + struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + + *paddr = (((uint64_t)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, + binfo->info1)) << 32) | + FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0); + *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1); + *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1); +} + /* Returns number of Rx buffers replenished */ int qwx_dp_rxbufs_replenish(struct qwx_softc *sc, int mac_id, @@ -12495,7 +12995,10 @@ qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc) { struct qwx_pdev_dp *dp = &sc->pdev_dp; struct dp_rxdma_ring *rx_ring; - int ret, i; + int ret; +#if 0 + int i; +#endif rx_ring = &dp->rx_refill_buf_ring; ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring, HAL_RXDMA_BUF); @@ -12509,7 +13012,7 @@ qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc) if (ret) return ret; } - +#if 0 for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ret = qwx_dp_rxdma_ring_buf_setup(sc, rx_ring, @@ -12517,7 +13020,7 @@ qwx_dp_rxdma_pdev_buf_setup(struct qwx_softc *sc) if (ret) return ret; } - +#endif return 0; } @@ -12920,6 +13423,7 @@ qwx_dp_rx_pdev_alloc(struct qwx_softc *sc, int mac_id) } #endif config_refill_ring: +#if 0 for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) { ret = qwx_dp_tx_htt_srng_setup(sc, dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id, @@ -12931,7 +13435,7 @@ config_refill_ring: return ret; } } - +#endif return 0; } @@ -13198,10 +13702,285 @@ exit: return total_msdu_reaped; } -int -qwx_dp_rx_process_mon_rings(struct qwx_softc *sc) +struct mbuf * +qwx_dp_rx_alloc_mon_status_buf(struct qwx_softc *sc, + struct dp_rxdma_ring *rx_ring, int *buf_idx) { - return 0; + struct mbuf *m; + struct qwx_rx_data *rx_data; + const size_t size = DP_RX_BUFFER_SIZE; + int ret; + + m = m_gethdr(M_DONTWAIT, MT_DATA); + if (m == NULL) + return NULL; + + if (size <= MCLBYTES) + MCLGET(m, M_DONTWAIT); + else + MCLGETL(m, M_DONTWAIT, size); + if ((m->m_flags & M_EXT) == 0) + goto fail_free_mbuf; + + m->m_len = m->m_pkthdr.len = size; + rx_data = &rx_ring->rx_data[rx_ring->cur]; + if (rx_data->m != NULL) + goto fail_free_mbuf; + + if (rx_data->map == NULL) { + ret = bus_dmamap_create(sc->sc_dmat, size, 1, + size, 0, BUS_DMA_NOWAIT, &rx_data->map); + if (ret) + goto fail_free_mbuf; + } + + ret = bus_dmamap_load_mbuf(sc->sc_dmat, rx_data->map, m, + BUS_DMA_READ | BUS_DMA_NOWAIT); + if (ret) { + printf("%s: can't map mbuf (error %d)\n", + sc->sc_dev.dv_xname, ret); + goto fail_free_mbuf; + } + + *buf_idx = rx_ring->cur; + rx_data->m = m; + return m; + +fail_free_mbuf: + m_freem(m); + return NULL; +} + +int +qwx_dp_rx_reap_mon_status_ring(struct qwx_softc *sc, int mac_id, + struct mbuf_list *ml) +{ + const struct ath11k_hw_hal_params *hal_params; + struct qwx_pdev_dp *dp; + struct dp_rxdma_ring *rx_ring; + struct qwx_mon_data *pmon; + struct hal_srng *srng; + void *rx_mon_status_desc; + struct mbuf *m; + struct qwx_rx_data *rx_data; + struct hal_tlv_hdr *tlv; + uint32_t cookie; + int buf_idx, srng_id; + uint64_t paddr; + uint8_t rbm; + int num_buffs_reaped = 0; + + dp = &sc->pdev_dp; + pmon = &dp->mon_data; + + srng_id = sc->hw_params.hw_ops->mac_id_to_srng_id(&sc->hw_params, + mac_id); + rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; + + srng = &sc->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; +#ifdef notyet + spin_lock_bh(&srng->lock); +#endif + qwx_hal_srng_access_begin(sc, srng); + while (1) { + rx_mon_status_desc = qwx_hal_srng_src_peek(sc, srng); + if (!rx_mon_status_desc) { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + break; + } + + qwx_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, + &cookie, &rbm); + if (paddr) { + buf_idx = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); + + rx_data = &rx_ring->rx_data[buf_idx]; + if (rx_data->m == NULL) { + printf("%s: rx monitor status with invalid " + "buf_idx %d\n", __func__, buf_idx); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; + } + + bus_dmamap_sync(sc->sc_dmat, rx_data->map, 0, + m->m_pkthdr.len, BUS_DMASYNC_POSTREAD); + + tlv = mtod(m, struct hal_tlv_hdr *); + if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != + HAL_RX_STATUS_BUFFER_DONE) { + printf("%s: mon status DONE not set %lx, " + "buf_idx %d\n", __func__, + FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl), + buf_idx); + /* If done status is missing, hold onto status + * ring until status is done for this status + * ring buffer. + * Keep HP in mon_status_ring unchanged, + * and break from here. + * Check status for same buffer for next time + */ + pmon->buf_state = DP_MON_STATUS_NO_DMA; + break; + } + + bus_dmamap_unload(sc->sc_dmat, rx_data->map); + m = rx_data->m; + rx_data->m = NULL; +#if 0 + if (ab->hw_params.full_monitor_mode) { + ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); + if (paddr == pmon->mon_status_paddr) + pmon->buf_state = DP_MON_STATUS_MATCH; + } +#endif + ml_enqueue(ml, m); + } else { + pmon->buf_state = DP_MON_STATUS_REPLINISH; + } +move_next: + m = qwx_dp_rx_alloc_mon_status_buf(sc, rx_ring, &buf_idx); + if (!m) { + hal_params = sc->hw_params.hal_params; + qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, + hal_params->rx_buf_rbm); + num_buffs_reaped++; + break; + } + rx_data = &rx_ring->rx_data[buf_idx]; + KASSERT(rx_data->m == NULL); + + cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | + FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_idx); + + paddr = rx_data->map->dm_segs[0].ds_addr; + qwx_hal_rx_buf_addr_info_set(rx_mon_status_desc, paddr, + cookie, sc->hw_params.hal_params->rx_buf_rbm); + qwx_hal_srng_src_get_next_entry(sc, srng); + num_buffs_reaped++; + } + qwx_hal_srng_access_end(sc, srng); +#ifdef notyet + spin_unlock_bh(&srng->lock); +#endif + return num_buffs_reaped; +} + +enum hal_rx_mon_status +qwx_hal_rx_parse_mon_status(struct qwx_softc *sc, + struct hal_rx_mon_ppdu_info *ppdu_info, struct mbuf *m) +{ + /* TODO */ + return HAL_RX_MON_STATUS_PPDU_NOT_DONE; +} + +int +qwx_dp_rx_process_mon_status(struct qwx_softc *sc, int mac_id) +{ + enum hal_rx_mon_status hal_status; + struct mbuf *m; + struct mbuf_list ml = MBUF_LIST_INITIALIZER(); +#if 0 + struct ath11k_peer *peer; + struct ath11k_sta *arsta; +#endif + int num_buffs_reaped = 0; +#if 0 + uint32_t rx_buf_sz; + uint16_t log_type; +#endif + struct qwx_mon_data *pmon = (struct qwx_mon_data *)&sc->pdev_dp.mon_data; +#if 0 + struct qwx_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; +#endif + struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; + + num_buffs_reaped = qwx_dp_rx_reap_mon_status_ring(sc, mac_id, &ml); + printf("%s: processing %d packets\n", __func__, num_buffs_reaped); + if (!num_buffs_reaped) + goto exit; + + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + + while ((m = ml_dequeue(&ml))) { +#if 0 + if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_LITE_RX; + rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; + } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { + log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; + rx_buf_sz = DP_RX_BUFFER_SIZE; + } else { + log_type = ATH11K_PKTLOG_TYPE_INVALID; + rx_buf_sz = 0; + } + + if (log_type != ATH11K_PKTLOG_TYPE_INVALID) + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); +#endif + + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; + hal_status = qwx_hal_rx_parse_mon_status(sc, ppdu_info, m); +#if 0 + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && + pmon->mon_ppdu_status == DP_PPDU_STATUS_START && + hal_status == HAL_TLV_STATUS_PPDU_DONE) { + rx_mon_stats->status_ppdu_done++; + pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; + ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); + pmon->mon_ppdu_status = DP_PPDU_STATUS_START; + } +#endif + if (ppdu_info->peer_id == HAL_INVALID_PEERID || + hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + m_freem(m); + continue; + } +#if 0 + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); + + if (!peer || !peer->sta) { + ath11k_dbg(ab, ATH11K_DBG_DATA, + "failed to find the peer with peer_id %d\n", + ppdu_info->peer_id); + goto next_skb; + } + + arsta = (struct ath11k_sta *)peer->sta->drv_priv; + ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); + + if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) + trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); + +next_skb: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + + dev_kfree_skb_any(skb); + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; +#endif + } +exit: + return num_buffs_reaped; +} + +int +qwx_dp_rx_process_mon_rings(struct qwx_softc *sc, int mac_id) +{ + int ret = 0; +#if 0 + if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && + ab->hw_params.full_monitor_mode) + ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); + else +#endif + ret = qwx_dp_rx_process_mon_status(sc, mac_id); + + return ret; } int @@ -13217,38 +13996,67 @@ qwx_dp_process_reo_status(struct qwx_softc *sc) } int -qwx_dp_service_srng(struct qwx_softc *sc) +qwx_dp_service_srng(struct qwx_softc *sc, int grp_id) { struct qwx_pdev_dp *dp = &sc->pdev_dp; - int i, ret = 0; + int i, j, ret = 0; for (i = 0; i < sc->hw_params.max_tx_ring; i++) { - if (qwx_dp_tx_completion_handler(sc, i)) + const struct ath11k_hw_tcl2wbm_rbm_map *map; + + map = &sc->hw_params.hal_params->tcl2wbm_rbm_map[i]; + if ((sc->hw_params.ring_mask->tx[grp_id]) & + (1 << (map->wbm_ring_num)) && + qwx_dp_tx_completion_handler(sc, i)) ret = 1; } - if (qwx_dp_process_rx_err(sc)) + if (sc->hw_params.ring_mask->rx_err[grp_id] && + qwx_dp_process_rx_err(sc)) ret = 1; - if (qwx_dp_rx_process_wbm_err(sc)) + if (sc->hw_params.ring_mask->rx_wbm_rel[grp_id] && + qwx_dp_rx_process_wbm_err(sc)) ret = 1; - for (i = 0; i < DP_REO_DST_RING_MAX; i++) { + if (sc->hw_params.ring_mask->rx[grp_id]) { + i = fls(sc->hw_params.ring_mask->rx[grp_id]) - 1; if (qwx_dp_process_rx(sc, i)) ret = 1; } - if (qwx_dp_rx_process_mon_rings(sc)) + for (i = 0; i < sc->num_radios; i++) { + for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) { + int id = i * sc->hw_params.num_rxmda_per_pdev + j; + + if ((sc->hw_params.ring_mask->rx_mon_status[grp_id] & + (1 << id)) == 0) + continue; + + if (qwx_dp_rx_process_mon_rings(sc, id)) + ret = 1; + } + } + + if (sc->hw_params.ring_mask->reo_status[grp_id] && + qwx_dp_process_reo_status(sc)) ret = 1; - if (qwx_dp_process_reo_status(sc)) - ret = 1; + for (i = 0; i < sc->num_radios; i++) { + for (j = 0; j < sc->hw_params.num_rxmda_per_pdev; j++) { + int id = i * sc->hw_params.num_rxmda_per_pdev + j; - if (qwx_dp_process_rxdma_err(sc)) - ret = 1; + if ((sc->hw_params.ring_mask->rxdma2host[grp_id] & + (1 << (id))) == 0) + continue; - qwx_dp_rxbufs_replenish(sc, dp->mac_id, &dp->rx_refill_buf_ring, 0, - sc->hw_params.hal_params->rx_buf_rbm); + if (qwx_dp_process_rxdma_err(sc)) + ret = 1; + + qwx_dp_rxbufs_replenish(sc, id, &dp->rx_refill_buf_ring, + 0, sc->hw_params.hal_params->rx_buf_rbm); + } + } return ret; } @@ -14059,6 +14867,77 @@ qwx_wmi_send_scan_stop_cmd(struct qwx_softc *sc, return ret; } +int +qwx_wmi_send_peer_create_cmd(struct qwx_softc *sc, uint8_t pdev_id, + struct peer_create_params *param) +{ + struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id]; + struct wmi_peer_create_cmd *cmd; + struct mbuf *m; + int ret; + + m = qwx_wmi_alloc_mbuf(sizeof(*cmd)); + if (!m) + return ENOMEM; + + cmd = (struct wmi_peer_create_cmd *)(mtod(m, uint8_t *) + + sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr)); + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, param->peer_addr); + cmd->peer_type = param->peer_type; + cmd->vdev_id = param->vdev_id; + + ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_CREATE_CMDID); + if (ret) { + printf("%s: failed to submit WMI_PEER_CREATE cmd\n", + sc->sc_dev.dv_xname); + m_freem(m); + return ret; + } + + DNPRINTF(QWX_D_WMI, "%s: cmd peer create vdev_id %d peer_addr %s\n", + __func__, param->vdev_id, ether_sprintf(param->peer_addr)); + + return ret; +} + +int +qwx_wmi_send_peer_delete_cmd(struct qwx_softc *sc, const uint8_t *peer_addr, + uint8_t vdev_id, uint8_t pdev_id) +{ + struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id]; + struct wmi_peer_delete_cmd *cmd; + struct mbuf *m; + int ret; + + m = qwx_wmi_alloc_mbuf(sizeof(*cmd)); + if (!m) + return ENOMEM; + + cmd = (struct wmi_peer_delete_cmd *)(mtod(m, uint8_t *) + + sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr)); + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = vdev_id; + + ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_DELETE_CMDID); + if (ret) { + printf("%s: failed to send WMI_PEER_DELETE cmd\n", + sc->sc_dev.dv_xname); + m_freem(m); + return ret; + } + + DNPRINTF(QWX_D_WMI, "%s: cmd peer delete vdev_id %d peer_addr %pM\n", + __func__, vdev_id, peer_addr); + + return 0; +} + void qwx_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, struct target_resource_config *tg_cfg) @@ -14486,6 +15365,149 @@ qwx_wmi_vdev_set_param_cmd(struct qwx_softc *sc, uint32_t vdev_id, return 0; } +void +qwx_wmi_put_wmi_channel(struct wmi_channel *chan, + struct wmi_vdev_start_req_arg *arg) +{ + uint32_t center_freq1 = arg->channel.band_center_freq1; + + memset(chan, 0, sizeof(*chan)); + + chan->mhz = arg->channel.freq; + chan->band_center_freq1 = arg->channel.band_center_freq1; + + if (arg->channel.mode == MODE_11AX_HE160) { + if (arg->channel.freq > arg->channel.band_center_freq1) + chan->band_center_freq1 = center_freq1 + 40; + else + chan->band_center_freq1 = center_freq1 - 40; + + chan->band_center_freq2 = arg->channel.band_center_freq1; + } else if ((arg->channel.mode == MODE_11AC_VHT80_80) || + (arg->channel.mode == MODE_11AX_HE80_80)) { + chan->band_center_freq2 = arg->channel.band_center_freq2; + } else + chan->band_center_freq2 = 0; + + chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode); + if (arg->channel.passive) + chan->info |= WMI_CHAN_INFO_PASSIVE; + if (arg->channel.allow_ibss) + chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED; + if (arg->channel.allow_ht) + chan->info |= WMI_CHAN_INFO_ALLOW_HT; + if (arg->channel.allow_vht) + chan->info |= WMI_CHAN_INFO_ALLOW_VHT; + if (arg->channel.allow_he) + chan->info |= WMI_CHAN_INFO_ALLOW_HE; + if (arg->channel.ht40plus) + chan->info |= WMI_CHAN_INFO_HT40_PLUS; + if (arg->channel.chan_radar) + chan->info |= WMI_CHAN_INFO_DFS; + if (arg->channel.freq2_radar) + chan->info |= WMI_CHAN_INFO_DFS_FREQ2; + + chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, + arg->channel.max_power) | + FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, + arg->channel.max_reg_power); + + chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, + arg->channel.max_antenna_gain) | + FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, + arg->channel.max_power); +} + +int +qwx_wmi_vdev_start(struct qwx_softc *sc, struct wmi_vdev_start_req_arg *arg, + int pdev_id, int restart) +{ + struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id]; + struct wmi_vdev_start_request_cmd *cmd; + struct mbuf *m; + struct wmi_channel *chan; + struct wmi_tlv *tlv; + void *ptr; + int ret, len; + + if (arg->ssid_len > sizeof(cmd->ssid.ssid)) + return EINVAL; + + len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; + + m = qwx_wmi_alloc_mbuf(len); + if (!m) + return ENOMEM; + + cmd = (struct wmi_vdev_start_request_cmd *)(mtod(m, uint8_t *) + + sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr)); + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_VDEV_START_REQUEST_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = arg->vdev_id; + cmd->beacon_interval = arg->bcn_intval; + cmd->bcn_tx_rate = arg->bcn_tx_rate; + cmd->dtim_period = arg->dtim_period; + cmd->num_noa_descriptors = arg->num_noa_descriptors; + cmd->preferred_rx_streams = arg->pref_rx_streams; + cmd->preferred_tx_streams = arg->pref_tx_streams; + cmd->cac_duration_ms = arg->cac_duration_ms; + cmd->regdomain = arg->regdomain; + cmd->he_ops = arg->he_ops; + cmd->mbssid_flags = arg->mbssid_flags; + cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id; + + if (!restart) { + if (arg->ssid) { + cmd->ssid.ssid_len = arg->ssid_len; + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); + } + if (arg->hidden_ssid) + cmd->flags |= WMI_VDEV_START_HIDDEN_SSID; + if (arg->pmf_enabled) + cmd->flags |= WMI_VDEV_START_PMF_ENABLED; + } + + cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED; + if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, sc->sc_flags)) + cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED; + + ptr = mtod(m, void *) + sizeof(struct ath11k_htc_hdr) + + sizeof(struct wmi_cmd_hdr) + sizeof(*cmd); + chan = ptr; + + qwx_wmi_put_wmi_channel(chan, arg); + + chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE); + ptr += sizeof(*chan); + + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, 0); + + /* Note: This is a nested TLV containing: + * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + */ + + ptr += sizeof(*tlv); + + ret = qwx_wmi_cmd_send(wmi, m, restart ? + WMI_VDEV_RESTART_REQUEST_CMDID : WMI_VDEV_START_REQUEST_CMDID); + if (ret) { + printf("%s: failed to submit vdev_%s cmd\n", + sc->sc_dev.dv_xname, restart ? "restart" : "start"); + m_freem(m); + return ret; + } + + DNPRINTF(QWX_D_WMI, "%s: cmd vdev %s id 0x%x freq %u mode 0x%x\n", + __func__, restart ? "restart" : "start", arg->vdev_id, + arg->channel.freq, arg->channel.mode); + + return ret; +} + int qwx_core_start(struct qwx_softc *sc) { @@ -17171,10 +18193,12 @@ qwx_reg_update_chan_list(struct qwx_softc *sc, uint8_t pdev_id) ch->antennamax = 0; /* TODO: Use appropriate phymodes */ - if (IEEE80211_IS_CHAN_2GHZ(channel)) + if (IEEE80211_IS_CHAN_A(channel)) + ch->phy_mode = MODE_11A; + else if (IEEE80211_IS_CHAN_G(channel)) ch->phy_mode = MODE_11G; else - ch->phy_mode = MODE_11A; + ch->phy_mode = MODE_11B; #ifdef notyet if (channel->band == NL80211_BAND_6GHZ && cfg80211_channel_is_psc(channel)) @@ -17226,12 +18250,15 @@ int qwx_mac_config_mon_status_default(struct qwx_softc *sc, int enable) { struct htt_rx_ring_tlv_filter tlv_filter = { 0 }; - int i, ret = 0; + int ret = 0; +#if 0 + int i; struct dp_rxdma_ring *ring; +#endif if (enable) tlv_filter = qwx_mac_mon_status_filter_default; - +#if 0 for (i = 0; i < sc->hw_params.num_rxmda_per_pdev; i++) { ring = &sc->pdev_dp.rx_mon_status_refill_ring[i]; ret = qwx_dp_tx_htt_rx_filter_setup(sc, @@ -17240,6 +18267,7 @@ qwx_mac_config_mon_status_default(struct qwx_softc *sc, int enable) if (ret) return ret; } +#endif #if 0 if (enable && !ar->ab->hw_params.rxdma1_enable) mod_timer(&ar->ab->mon_reap_timer, jiffies + @@ -17409,11 +18437,7 @@ qwx_mac_op_start(struct qwx_pdev *pdev) #ifdef notyet mutex_unlock(&ar->conf_mutex); #endif -#if 0 - rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], - &ab->pdevs[ar->pdev_idx]); -#endif - + sc->pdevs_active |= (1 << pdev->pdev_id); return 0; err: #ifdef notyet @@ -17516,6 +18540,176 @@ qwx_mac_vdev_delete(struct qwx_softc *sc, struct qwx_vif *arvif) printf("%s: not implemented\n", __func__); } +int +qwx_mac_vdev_setup_sync(struct qwx_softc *sc) +{ + int ret; + +#ifdef notyet + lockdep_assert_held(&ar->conf_mutex); +#endif + if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) + return ESHUTDOWN; + + while (!sc->vdev_setup_done) { + ret = tsleep_nsec(&sc->vdev_setup_done, 0, "qwxvdev", + SEC_TO_NSEC(1)); + if (ret) { + printf("%s: vdev start timeout\n", + sc->sc_dev.dv_xname); + return ret; + } + } + + return 0; +} + +int +qwx_mac_set_txbf_conf(struct qwx_vif *arvif) +{ + /* TX beamforming is not yet supported. */ + return 0; +} + +int +qwx_mac_vdev_start_restart(struct qwx_softc *sc, struct qwx_vif *arvif, + int pdev_id, int restart) +{ + struct ieee80211com *ic = &sc->sc_ic; + struct ieee80211_channel *chan = ic->ic_bss->ni_chan; + struct wmi_vdev_start_req_arg arg = {}; + int ret = 0; +#ifdef notyet + lockdep_assert_held(&ar->conf_mutex); +#endif +#if 0 + reinit_completion(&ar->vdev_setup_done); +#endif + arg.vdev_id = arvif->vdev_id; + arg.dtim_period = ic->ic_dtim_period; + arg.bcn_intval = ic->ic_lintval; + + arg.channel.freq = chan->ic_freq; + arg.channel.band_center_freq1 = chan->ic_freq; + arg.channel.band_center_freq2 = chan->ic_freq; + + switch (ic->ic_curmode) { + case IEEE80211_MODE_11A: + arg.channel.mode = MODE_11A; + break; + case IEEE80211_MODE_11B: + arg.channel.mode = MODE_11B; + break; + case IEEE80211_MODE_11G: + arg.channel.mode = MODE_11G; + break; + default: + printf("%s: unsupported phy mode %d\n", + sc->sc_dev.dv_xname, ic->ic_curmode); + return ENOTSUP; + } + + arg.channel.min_power = 0; + arg.channel.max_power = 20; /* XXX */ + arg.channel.max_reg_power = 20; /* XXX */ + arg.channel.max_antenna_gain = 0; /* XXX */ + + arg.pref_tx_streams = 1; + arg.pref_rx_streams = 1; + + arg.mbssid_flags = 0; + arg.mbssid_tx_vdev_id = 0; + if (isset(sc->wmi.svc_map, + WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT)) { + ret = qwx_mac_setup_vdev_params_mbssid(arvif, + &arg.mbssid_flags, &arg.mbssid_tx_vdev_id); + if (ret) + return ret; + } +#if 0 + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = arvif->u.ap.ssid; + arg.ssid_len = arvif->u.ap.ssid_len; + arg.hidden_ssid = arvif->u.ap.hidden_ssid; + + /* For now allow DFS for AP mode */ + arg.channel.chan_radar = + !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); + + arg.channel.freq2_radar = ctx->radar_enabled; + + arg.channel.passive = arg.channel.chan_radar; + + spin_lock_bh(&ab->base_lock); + arg.regdomain = ar->ab->dfs_region; + spin_unlock_bh(&ab->base_lock); + } +#endif + /* XXX */ + arg.channel.passive |= !!(ieee80211_chan2ieee(ic, chan) >= 52); + + DNPRINTF(QWX_D_MAC, "%s: vdev %d start center_freq %d phymode %s\n", + __func__, arg.vdev_id, arg.channel.freq, + qwx_wmi_phymode_str(arg.channel.mode)); + + sc->vdev_setup_done = 0; + ret = qwx_wmi_vdev_start(sc, &arg, pdev_id, restart); + if (ret) { + printf("%s: failed to %s WMI vdev %i\n", sc->sc_dev.dv_xname, + restart ? "restart" : "start", arg.vdev_id); + return ret; + } + + ret = qwx_mac_vdev_setup_sync(sc); + if (ret) { + printf("%s: failed to synchronize setup for vdev %i %s: %d\n", + sc->sc_dev.dv_xname, arg.vdev_id, + restart ? "restart" : "start", ret); + return ret; + } + + if (!restart) + sc->num_started_vdevs++; + + DNPRINTF(QWX_D_MAC, "%s: vdev %d started\n", __func__, arvif->vdev_id); + + /* Enable CAC Flag in the driver by checking the channel DFS cac time, + * i.e dfs_cac_ms value which will be valid only for radar channels + * and state as NL80211_DFS_USABLE which indicates CAC needs to be + * done before channel usage. This flags is used to drop rx packets. + * during CAC. + */ + /* TODO Set the flag for other interface types as required */ +#if 0 + if (arvif->vdev_type == WMI_VDEV_TYPE_AP && + chandef->chan->dfs_cac_ms && + chandef->chan->dfs_state == NL80211_DFS_USABLE) { + set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); + ath11k_dbg(ab, ATH11K_DBG_MAC, + "CAC Started in chan_freq %d for vdev %d\n", + arg.channel.freq, arg.vdev_id); + } +#endif + ret = qwx_mac_set_txbf_conf(arvif); + if (ret) + printf("%s: failed to set txbf conf for vdev %d: %d\n", + sc->sc_dev.dv_xname, arvif->vdev_id, ret); + + return 0; +} + +int +qwx_mac_vdev_restart(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id) +{ + return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 1); +} + +int +qwx_mac_vdev_start(struct qwx_softc *sc, struct qwx_vif *arvif, int pdev_id) +{ + return qwx_mac_vdev_start_restart(sc, arvif, pdev_id, 0); +} + int qwx_mac_op_add_interface(struct qwx_pdev *pdev) { @@ -17574,7 +18768,13 @@ qwx_mac_op_add_interface(struct qwx_pdev *pdev) } #endif - bit = ffs(sc->free_vdev_map); + if (sc->free_vdev_map == 0) { + printf("%s: cannot add interface; all vdevs are busy\n", + sc->sc_dev.dv_xname); + ret = EBUSY; + goto err; + } + bit = ffs(sc->free_vdev_map) - 1; arvif->vdev_id = bit; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; @@ -17901,6 +19101,812 @@ qwx_mac_scan_finish(struct qwx_softc *sc) } } +int +qwx_mac_get_rate_hw_value(struct ieee80211com *ic, + struct ieee80211_node *ni, int bitrate) +{ + uint32_t preamble; + uint16_t hw_value; + int shortpre = 0; + + if (IEEE80211_IS_CHAN_CCK(ni->ni_chan)) + preamble = WMI_RATE_PREAMBLE_CCK; + else + preamble = WMI_RATE_PREAMBLE_OFDM; + + if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && + IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) + shortpre = 1; + + switch (bitrate) { + case 2: + hw_value = ATH11K_HW_RATE_CCK_LP_1M; + break; + case 4: + if (shortpre) + hw_value = ATH11K_HW_RATE_CCK_SP_2M; + else + hw_value = ATH11K_HW_RATE_CCK_LP_2M; + break; + case 11: + if (shortpre) + hw_value = ATH11K_HW_RATE_CCK_SP_5_5M; + else + hw_value = ATH11K_HW_RATE_CCK_LP_5_5M; + break; + case 22: + if (shortpre) + hw_value = ATH11K_HW_RATE_CCK_SP_11M; + else + hw_value = ATH11K_HW_RATE_CCK_LP_11M; + break; + case 12: + hw_value = ATH11K_HW_RATE_OFDM_6M; + break; + case 18: + hw_value = ATH11K_HW_RATE_OFDM_9M; + break; + case 24: + hw_value = ATH11K_HW_RATE_OFDM_12M; + break; + case 36: + hw_value = ATH11K_HW_RATE_OFDM_18M; + break; + case 48: + hw_value = ATH11K_HW_RATE_OFDM_24M; + break; + case 72: + hw_value = ATH11K_HW_RATE_OFDM_36M; + break; + case 96: + hw_value = ATH11K_HW_RATE_OFDM_48M; + break; + case 108: + hw_value = ATH11K_HW_RATE_OFDM_54M; + break; + default: + return -1; + } + + return ATH11K_HW_RATE_CODE(hw_value, 0, preamble); +} + +int +qwx_peer_delete(struct qwx_softc *sc, uint32_t vdev_id, uint8_t pdev_id, + uint8_t *addr) +{ + int ret; + + sc->peer_delete_done = 0; + ret = qwx_wmi_send_peer_delete_cmd(sc, addr, vdev_id, pdev_id); + if (ret) { + printf("%s: failed to delete peer vdev_id %d addr %s ret %d\n", + sc->sc_dev.dv_xname, vdev_id, ether_sprintf(addr), ret); + return ret; + } + + while (!sc->peer_delete_done) { + ret = tsleep_nsec(&sc->peer_delete_done, 0, "qwxpeerd", + SEC_TO_NSEC(3)); + if (ret) { + printf("%s: peer delete command timeout\n", + sc->sc_dev.dv_xname); + return ret; + } + } + + sc->num_peers--; + return 0; +} + +int +qwx_peer_create(struct qwx_softc *sc, struct qwx_vif *arvif, uint8_t pdev_id, + struct ieee80211_node *ni, struct peer_create_params *param) +{ + struct qwx_node *nq = (struct qwx_node *)ni; + struct ath11k_peer *peer; + int ret; +#ifdef notyet + lockdep_assert_held(&ar->conf_mutex); +#endif + if (sc->num_peers > (TARGET_NUM_PEERS_PDEV(sc) - 1)) { + DPRINTF("%s: failed to create peer due to insufficient " + "peer entry resource in firmware\n", __func__); + return ENOBUFS; + } +#ifdef notyet + mutex_lock(&ar->ab->tbl_mtx_lock); + spin_lock_bh(&ar->ab->base_lock); +#endif + peer = &nq->peer; + if (peer) { + if (peer->peer_id != HAL_INVALID_PEERID && + peer->vdev_id == param->vdev_id) { +#ifdef notyet + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); +#endif + return EINVAL; + } +#if 0 + /* Assume sta is transitioning to another band. + * Remove here the peer from rhash. + */ + ath11k_peer_rhash_delete(ar->ab, peer); +#endif + } +#ifdef notyet + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); +#endif + sc->peer_mapped = 0; + + ret = qwx_wmi_send_peer_create_cmd(sc, pdev_id, param); + if (ret) { + printf("%s: failed to send peer create vdev_id %d ret %d\n", + sc->sc_dev.dv_xname, param->vdev_id, ret); + return ret; + } + + while (!sc->peer_mapped) { + ret = tsleep_nsec(&sc->peer_mapped, 0, "qwxpeer", + SEC_TO_NSEC(3)); + if (ret) { + printf("%s: peer create command timeout\n", + sc->sc_dev.dv_xname); + return ret; + } + } + +#ifdef notyet + mutex_lock(&ar->ab->tbl_mtx_lock); + spin_lock_bh(&ar->ab->base_lock); +#endif +#if 0 + peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr); + if (!peer) { + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); + ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n", + param->peer_addr, param->vdev_id); + + ret = -ENOENT; + goto cleanup; + } + + ret = ath11k_peer_rhash_add(ar->ab, peer); + if (ret) { + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); + goto cleanup; + } +#endif + peer->pdev_id = pdev_id; +#if 0 + peer->sta = sta; + + if (arvif->vif->type == NL80211_IFTYPE_STATION) { + arvif->ast_hash = peer->ast_hash; + arvif->ast_idx = peer->hw_peer_id; + } + peer->sec_type = HAL_ENCRYPT_TYPE_OPEN; + peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; + + if (sta) { + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) | + FIELD_PREP(HTT_TCL_META_DATA_PEER_ID, + peer->peer_id); + + /* set HTT extension valid bit to 0 by default */ + arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; + } +#endif + sc->num_peers++; +#ifdef notyet + spin_unlock_bh(&ar->ab->base_lock); + mutex_unlock(&ar->ab->tbl_mtx_lock); +#endif + return 0; +#if 0 +cleanup: + int fbret = qwx_peer_delete(sc, param->vdev_id, param->peer_addr); + if (fbret) { + printf("%s: failed peer %s delete vdev_id %d fallback ret %d\n", + sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr), + param->vdev_id, fbret); + } + + return ret; +#endif +} + +int +qwx_dp_tx_send_reo_cmd(struct qwx_softc *sc, struct dp_rx_tid *rx_tid, + enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd, + void (*cb)(struct qwx_dp *, void *, enum hal_reo_cmd_status)) +{ + struct qwx_dp *dp = &sc->dp; + struct dp_reo_cmd *dp_cmd; + struct hal_srng *cmd_ring; + int cmd_num; + + if (test_bit(ATH11K_FLAG_CRASH_FLUSH, sc->sc_flags)) + return ESHUTDOWN; + + cmd_ring = &sc->hal.srng_list[dp->reo_cmd_ring.ring_id]; + cmd_num = qwx_hal_reo_cmd_send(sc, cmd_ring, type, cmd); + /* cmd_num should start from 1, during failure return the error code */ + if (cmd_num < 0) + return cmd_num; + + /* reo cmd ring descriptors has cmd_num starting from 1 */ + if (cmd_num == 0) + return EINVAL; + + if (!cb) + return 0; + + /* Can this be optimized so that we keep the pending command list only + * for tid delete command to free up the resource on the command status + * indication? + */ + dp_cmd = malloc(sizeof(*dp_cmd), M_DEVBUF, M_ZERO | M_NOWAIT); + if (!dp_cmd) + return ENOMEM; + + memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid)); + dp_cmd->cmd_num = cmd_num; + dp_cmd->handler = cb; +#ifdef notyet + spin_lock_bh(&dp->reo_cmd_lock); +#endif + TAILQ_INSERT_TAIL(&dp->reo_cmd_list, dp_cmd, entry); +#ifdef notyet + spin_unlock_bh(&dp->reo_cmd_lock); +#endif + return 0; +} + +uint32_t +qwx_hal_reo_qdesc_size(uint32_t ba_window_size, uint8_t tid) +{ + uint32_t num_ext_desc; + + if (ba_window_size <= 1) { + if (tid != HAL_DESC_REO_NON_QOS_TID) + num_ext_desc = 1; + else + num_ext_desc = 0; + } else if (ba_window_size <= 105) { + num_ext_desc = 1; + } else if (ba_window_size <= 210) { + num_ext_desc = 2; + } else { + num_ext_desc = 3; + } + + return sizeof(struct hal_rx_reo_queue) + + (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)); +} + +void +qwx_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, uint8_t owner, uint8_t buffer_type, uint32_t magic) +{ + hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) | + FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type); + + /* Magic pattern in reserved bits for debugging */ + hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic); +} + +void +qwx_hal_reo_qdesc_setup(void *vaddr, int tid, uint32_t ba_window_size, + uint32_t start_seq, enum hal_pn_type type) +{ + struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr; + struct hal_rx_reo_queue_ext *ext_desc; + + memset(qdesc, 0, sizeof(*qdesc)); + + qwx_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0); + + qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid); + + qdesc->info0 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) | + FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) | + FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, qwx_tid_to_ac(tid)); + + if (ba_window_size < 1) + ba_window_size = 1; + + if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID) + ba_window_size++; + + if (ba_window_size == 1) + qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1); + + qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE, + ba_window_size - 1); + switch (type) { + case HAL_PN_TYPE_NONE: + case HAL_PN_TYPE_WAPI_EVEN: + case HAL_PN_TYPE_WAPI_UNEVEN: + break; + case HAL_PN_TYPE_WPA: + qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) | + FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE, + HAL_RX_REO_QUEUE_PN_SIZE_48); + break; + } + + /* TODO: Set Ignore ampdu flags based on BA window size and/or + * AMPDU capabilities + */ + qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1); + + qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0); + + if (start_seq <= 0xfff) + qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN, + start_seq); + + if (tid == HAL_DESC_REO_NON_QOS_TID) + return; + + ext_desc = qdesc->ext_desc; + + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is received. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + memset(ext_desc, 0, sizeof(*ext_desc)); + qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1); + ext_desc++; + memset(ext_desc, 0, sizeof(*ext_desc)); + qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2); + ext_desc++; + memset(ext_desc, 0, sizeof(*ext_desc)); + qwx_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED, + HAL_DESC_REO_QUEUE_EXT_DESC, REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3); +} + +void +qwx_dp_reo_cmd_free(struct qwx_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct qwx_softc *sc = dp->sc; + struct dp_rx_tid *rx_tid = ctx; + + if (status != HAL_REO_CMD_SUCCESS) + printf("%s: failed to flush rx tid hw desc, tid %d status %d\n", + sc->sc_dev.dv_xname, rx_tid->tid, status); + + if (rx_tid->mem) { + qwx_dmamem_free(sc->sc_dmat, rx_tid->mem); + rx_tid->mem = NULL; + rx_tid->vaddr = NULL; + rx_tid->paddr = 0ULL; + rx_tid->size = 0; + } +} + +void +qwx_dp_reo_cache_flush(struct qwx_softc *sc, struct dp_rx_tid *rx_tid) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + unsigned long tot_desc_sz, desc_sz; + int ret; + + tot_desc_sz = rx_tid->size; + desc_sz = qwx_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); + + while (tot_desc_sz > desc_sz) { + tot_desc_sz -= desc_sz; + cmd.addr_lo = (rx_tid->paddr + tot_desc_sz) & 0xffffffff; + cmd.addr_hi = rx_tid->paddr >> 32; + ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, + HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL); + if (ret) { + printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE, " + "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, + ret); + } + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.addr_lo = rx_tid->paddr & 0xffffffff; + cmd.addr_hi = rx_tid->paddr >> 32; + cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; + ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_FLUSH_CACHE, + &cmd, qwx_dp_reo_cmd_free); + if (ret) { + printf("%s: failed to send HAL_REO_CMD_FLUSH_CACHE cmd, " + "tid %d (%d)\n", sc->sc_dev.dv_xname, rx_tid->tid, ret); + if (rx_tid->mem) { + qwx_dmamem_free(sc->sc_dmat, rx_tid->mem); + rx_tid->mem = NULL; + rx_tid->vaddr = NULL; + rx_tid->paddr = 0ULL; + rx_tid->size = 0; + } + } +} + +void +qwx_dp_rx_tid_del_func(struct qwx_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct qwx_softc *sc = dp->sc; + struct dp_rx_tid *rx_tid = ctx; + struct dp_reo_cache_flush_elem *elem, *tmp; + time_t now; + + if (status == HAL_REO_CMD_DRAIN) { + goto free_desc; + } else if (status != HAL_REO_CMD_SUCCESS) { + /* Shouldn't happen! Cleanup in case of other failure? */ + printf("%s: failed to delete rx tid %d hw descriptor %d\n", + sc->sc_dev.dv_xname, rx_tid->tid, status); + return; + } + + elem = malloc(sizeof(*elem), M_DEVBUF, M_ZERO | M_NOWAIT); + if (!elem) + goto free_desc; + + now = gettime(); + elem->ts = now; + memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); +#ifdef notyet + spin_lock_bh(&dp->reo_cmd_lock); +#endif + TAILQ_INSERT_TAIL(&dp->reo_cmd_cache_flush_list, elem, entry); + dp->reo_cmd_cache_flush_count++; + + /* Flush and invalidate aged REO desc from HW cache */ + TAILQ_FOREACH_SAFE(elem, &dp->reo_cmd_cache_flush_list, entry, tmp) { + if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || + now < elem->ts + DP_REO_DESC_FREE_TIMEOUT_MS) { + TAILQ_REMOVE(&dp->reo_cmd_cache_flush_list, elem, entry); + dp->reo_cmd_cache_flush_count--; +#ifdef notyet + spin_unlock_bh(&dp->reo_cmd_lock); +#endif + qwx_dp_reo_cache_flush(sc, &elem->data); + free(elem, M_DEVBUF, sizeof(*elem)); +#ifdef notyet + spin_lock_bh(&dp->reo_cmd_lock); +#endif + } + } +#ifdef notyet + spin_unlock_bh(&dp->reo_cmd_lock); +#endif + return; +free_desc: + if (rx_tid->mem) { + qwx_dmamem_free(sc->sc_dmat, rx_tid->mem); + rx_tid->mem = NULL; + rx_tid->vaddr = NULL; + rx_tid->paddr = 0ULL; + rx_tid->size = 0; + } +} + +void +qwx_peer_rx_tid_delete(struct qwx_softc *sc, struct ath11k_peer *peer, + uint8_t tid) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + int ret; + + if (!rx_tid->active) + return; + + rx_tid->active = 0; + + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.addr_lo = rx_tid->paddr & 0xffffffff; + cmd.addr_hi = rx_tid->paddr >> 32; + cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; + ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, + &cmd, qwx_dp_rx_tid_del_func); + if (ret && ret != ESHUTDOWN) { + printf("%s: failed to send " + "HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", + sc->sc_dev.dv_xname, tid, ret); + } + + if (rx_tid->mem) { + qwx_dmamem_free(sc->sc_dmat, rx_tid->mem); + rx_tid->mem = NULL; + rx_tid->vaddr = NULL; + rx_tid->paddr = 0ULL; + rx_tid->size = 0; + } +} + +int +qwx_peer_rx_tid_reo_update(struct qwx_softc *sc, struct ath11k_peer *peer, + struct dp_rx_tid *rx_tid, uint32_t ba_win_sz, uint16_t ssn, + int update_ssn) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + int ret; + + cmd.addr_lo = rx_tid->paddr & 0xffffffff; + cmd.addr_hi = rx_tid->paddr >> 32; + cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; + cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; + cmd.ba_window_size = ba_win_sz; + + if (update_ssn) { + cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; + cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); + } + + ret = qwx_dp_tx_send_reo_cmd(sc, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, + &cmd, NULL); + if (ret) { + printf("%s: failed to update rx tid queue, tid %d (%d)\n", + sc->sc_dev.dv_xname, rx_tid->tid, ret); + return ret; + } + + rx_tid->ba_win_sz = ba_win_sz; + + return 0; +} + +void +qwx_dp_rx_tid_mem_free(struct qwx_softc *sc, struct ieee80211_node *ni, + int vdev_id, uint8_t tid) +{ + struct qwx_node *nq = (struct qwx_node *)ni; + struct ath11k_peer *peer = &nq->peer; + struct dp_rx_tid *rx_tid; +#ifdef notyet + spin_lock_bh(&ab->base_lock); +#endif + rx_tid = &peer->rx_tid[tid]; + + if (rx_tid->mem) { + qwx_dmamem_free(sc->sc_dmat, rx_tid->mem); + rx_tid->mem = NULL; + rx_tid->vaddr = NULL; + rx_tid->paddr = 0ULL; + rx_tid->size = 0; + } + + rx_tid->active = 0; +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif +} + +int +qwx_peer_rx_tid_setup(struct qwx_softc *sc, struct ieee80211_node *ni, + int vdev_id, int pdev_id, uint8_t tid, uint32_t ba_win_sz, uint16_t ssn, + enum hal_pn_type pn_type) +{ + struct qwx_node *nq = (struct qwx_node *)ni; + struct ath11k_peer *peer = &nq->peer; + struct dp_rx_tid *rx_tid; + uint32_t hw_desc_sz; + void *vaddr; + uint64_t paddr; + int ret; +#ifdef notyet + spin_lock_bh(&ab->base_lock); +#endif + rx_tid = &peer->rx_tid[tid]; + /* Update the tid queue if it is already setup */ + if (rx_tid->active) { + paddr = rx_tid->paddr; + ret = qwx_peer_rx_tid_reo_update(sc, peer, rx_tid, + ba_win_sz, ssn, 1); +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif + if (ret) { + printf("%s: failed to update reo for peer %s " + "rx tid %d\n: %d", sc->sc_dev.dv_xname, + ether_sprintf(ni->ni_macaddr), tid, ret); + return ret; + } + + ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, + pdev_id, ni->ni_macaddr, paddr, tid, 1, ba_win_sz); + if (ret) + printf("%s: failed to send wmi rx reorder queue " + "for peer %s tid %d: %d\n", sc->sc_dev.dv_xname, + ether_sprintf(ni->ni_macaddr), tid, ret); + return ret; + } + + rx_tid->tid = tid; + + rx_tid->ba_win_sz = ba_win_sz; + + /* TODO: Optimize the memory allocation for qos tid based on + * the actual BA window size in REO tid update path. + */ + if (tid == HAL_DESC_REO_NON_QOS_TID) + hw_desc_sz = qwx_hal_reo_qdesc_size(ba_win_sz, tid); + else + hw_desc_sz = qwx_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); + + rx_tid->mem = qwx_dmamem_alloc(sc->sc_dmat, hw_desc_sz, + HAL_LINK_DESC_ALIGN); + if (rx_tid->mem == NULL) { +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif + return ENOMEM; + } + + vaddr = QWX_DMA_KVA(rx_tid->mem); + + qwx_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); + + paddr = QWX_DMA_DVA(rx_tid->mem); + + rx_tid->vaddr = vaddr; + rx_tid->paddr = paddr; + rx_tid->size = hw_desc_sz; + rx_tid->active = 1; +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif + ret = qwx_wmi_peer_rx_reorder_queue_setup(sc, vdev_id, pdev_id, + ni->ni_macaddr, paddr, tid, 1, ba_win_sz); + if (ret) { + printf("%s: failed to setup rx reorder queue for peer %s " + "tid %d: %d\n", sc->sc_dev.dv_xname, + ether_sprintf(ni->ni_macaddr), tid, ret); + qwx_dp_rx_tid_mem_free(sc, ni, vdev_id, tid); + } + + return ret; +} + +int +qwx_peer_rx_frag_setup(struct qwx_softc *sc, struct ieee80211_node *ni, + int vdev_id) +{ + struct qwx_node *nq = (struct qwx_node *)ni; + struct ath11k_peer *peer = &nq->peer; + struct dp_rx_tid *rx_tid; + int i; +#ifdef notyet + spin_lock_bh(&ab->base_lock); +#endif + for (i = 0; i <= nitems(peer->rx_tid); i++) { + rx_tid = &peer->rx_tid[i]; +#if 0 + rx_tid->ab = ab; + timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); +#endif + } +#if 0 + peer->dp_setup_done = true; +#endif +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif + return 0; +} + +int +qwx_dp_peer_setup(struct qwx_softc *sc, int vdev_id, int pdev_id, + struct ieee80211_node *ni) +{ + struct qwx_node *nq = (struct qwx_node *)ni; + struct ath11k_peer *peer = &nq->peer; + uint32_t reo_dest; + int ret = 0, tid; + + /* reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ + reo_dest = sc->pdev_dp.mac_id + 1; + ret = qwx_wmi_set_peer_param(sc, ni->ni_macaddr, vdev_id, pdev_id, + WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1)); + if (ret) { + printf("%s: failed to set default routing %d peer %s " + "vdev_id %d\n", sc->sc_dev.dv_xname, ret, + ether_sprintf(ni->ni_macaddr), vdev_id); + return ret; + } + + for (tid = 0; tid < IEEE80211_NUM_TID; tid++) { + ret = qwx_peer_rx_tid_setup(sc, ni, vdev_id, pdev_id, + tid, 1, 0, HAL_PN_TYPE_NONE); + if (ret) { + printf("%s: failed to setup rxd tid queue for tid %d: %d\n", + sc->sc_dev.dv_xname, tid, ret); + goto peer_clean; + } + } + + ret = qwx_peer_rx_frag_setup(sc, ni, vdev_id); + if (ret) { + printf("%s: failed to setup rx defrag context\n", + sc->sc_dev.dv_xname); + tid--; + goto peer_clean; + } + + /* TODO: Setup other peer specific resource used in data path */ + + return 0; + +peer_clean: +#ifdef notyet + spin_lock_bh(&ab->base_lock); +#endif +#if 0 + peer = ath11k_peer_find(ab, vdev_id, addr); + if (!peer) { + ath11k_warn(ab, "failed to find the peer to del rx tid\n"); + spin_unlock_bh(&ab->base_lock); + return -ENOENT; + } +#endif + for (; tid >= 0; tid--) + qwx_peer_rx_tid_delete(sc, peer, tid); +#ifdef notyet + spin_unlock_bh(&ab->base_lock); +#endif + return ret; +} + +int +qwx_mac_station_add(struct qwx_softc *sc, struct qwx_vif *arvif, + uint8_t pdev_id, struct ieee80211_node *ni) +{ + struct peer_create_params peer_param; + int ret; +#ifdef notyet + lockdep_assert_held(&ar->conf_mutex); +#endif + peer_param.vdev_id = arvif->vdev_id; + peer_param.peer_addr = ni->ni_macaddr; + peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; + + ret = qwx_peer_create(sc, arvif, pdev_id, ni, &peer_param); + if (ret) { + printf("%s: Failed to add peer: %s for VDEV: %d\n", + sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr), + arvif->vdev_id); + return ret; + } + + DNPRINTF(QWX_D_MAC, "%s: Added peer: %s for VDEV: %d\n", __func__, + ether_sprintf(ni->ni_macaddr), arvif->vdev_id); + + ret = qwx_dp_peer_setup(sc, arvif->vdev_id, pdev_id, ni); + if (ret) { + printf("%s: failed to setup dp for peer %s on vdev %d (%d)\n", + sc->sc_dev.dv_xname, ether_sprintf(ni->ni_macaddr), + arvif->vdev_id, ret); + goto free_peer; + } + + return 0; + +free_peer: + qwx_peer_delete(sc, arvif->vdev_id, pdev_id, ni->ni_macaddr); + return ret; +} + void qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg) { @@ -17940,6 +19946,85 @@ qwx_wmi_start_scan_init(struct qwx_softc *sc, struct scan_req_params *arg) IEEE80211_ADDR_COPY(arg->bssid_list[0].addr, etheranyaddr); } +int +qwx_wmi_set_peer_param(struct qwx_softc *sc, uint8_t *peer_addr, + uint32_t vdev_id, uint32_t pdev_id, uint32_t param_id, uint32_t param_val) +{ + struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id]; + struct wmi_peer_set_param_cmd *cmd; + struct mbuf *m; + int ret; + + m = qwx_wmi_alloc_mbuf(sizeof(*cmd)); + if (!m) + return ENOMEM; + + cmd = (struct wmi_peer_set_param_cmd *)(mtod(m, uint8_t *) + + sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr)); + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = vdev_id; + cmd->param_id = param_id; + cmd->param_value = param_val; + + ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_SET_PARAM_CMDID); + if (ret) { + printf("%s: failed to send WMI_PEER_SET_PARAM cmd\n", + sc->sc_dev.dv_xname); + m_freem(m); + return ret; + } + + DNPRINTF(QWX_D_WMI, "%s: cmd peer set param vdev %d peer %s " + "set param %d value %d\n", __func__, vdev_id, + ether_sprintf(peer_addr), param_id, param_val); + + return 0; +} + +int +qwx_wmi_peer_rx_reorder_queue_setup(struct qwx_softc *sc, int vdev_id, + int pdev_id, uint8_t *addr, uint64_t paddr, uint8_t tid, + uint8_t ba_window_size_valid, uint32_t ba_window_size) +{ + struct qwx_pdev_wmi *wmi = &sc->wmi.wmi[pdev_id]; + struct wmi_peer_reorder_queue_setup_cmd *cmd; + struct mbuf *m; + int ret; + + m = qwx_wmi_alloc_mbuf(sizeof(*cmd)); + if (!m) + return ENOMEM; + + cmd = (struct wmi_peer_reorder_queue_setup_cmd *)(mtod(m, uint8_t *) + + sizeof(struct ath11k_htc_hdr) + sizeof(struct wmi_cmd_hdr)); + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_REORDER_QUEUE_SETUP_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + IEEE80211_ADDR_COPY(cmd->peer_macaddr.addr, addr); + cmd->vdev_id = vdev_id; + cmd->tid = tid; + cmd->queue_ptr_lo = paddr & 0xffffffff; + cmd->queue_ptr_hi = paddr >> 32; + cmd->queue_no = tid; + cmd->ba_window_size_valid = ba_window_size_valid; + cmd->ba_window_size = ba_window_size; + + ret = qwx_wmi_cmd_send(wmi, m, WMI_PEER_REORDER_QUEUE_SETUP_CMDID); + if (ret) { + printf("%s: failed to send WMI_PEER_REORDER_QUEUE_SETUP\n", + sc->sc_dev.dv_xname); + m_freem(m); + } + + DNPRINTF(QWX_D_WMI, "%s: cmd peer reorder queue setup addr %s " + "vdev_id %d tid %d\n", __func__, ether_sprintf(addr), vdev_id, tid); + + return ret; +} + enum ath11k_spectral_mode qwx_spectral_get_mode(struct qwx_softc *sc) { @@ -18269,11 +20354,128 @@ qwx_scan_abort(struct qwx_softc *sc) #endif } +/* + * Find a pdev which corresponds to a given channel. + * This doesn't exactly match the semantics of the Linux driver + * but because OpenBSD does not (yet) implement multi-bss mode + * we can assume that only one PHY will be active in either the + * 2 GHz or the 5 GHz band. + */ +struct qwx_pdev * +qwx_get_pdev_for_chan(struct qwx_softc *sc, struct ieee80211_channel *chan) +{ + struct qwx_pdev *pdev; + int i; + + for (i = 0; i < sc->num_radios; i++) { + if ((sc->pdevs_active & (1 << i)) == 0) + continue; + + pdev = &sc->pdevs[i]; + if (IEEE80211_IS_CHAN_2GHZ(chan) && + (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)) + return pdev; + if (IEEE80211_IS_CHAN_5GHZ(chan) && + (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)) + return pdev; + } + + return NULL; +} + +void +qwx_recalculate_mgmt_rate(struct qwx_softc *sc, struct ieee80211_node *ni, + uint32_t vdev_id, uint32_t pdev_id) +{ + struct ieee80211com *ic = &sc->sc_ic; + int hw_rate_code; + uint32_t vdev_param; + int bitrate; + int ret; +#ifdef notyet + lockdep_assert_held(&ar->conf_mutex); +#endif + bitrate = ieee80211_min_basic_rate(ic); + hw_rate_code = qwx_mac_get_rate_hw_value(ic, ni, bitrate); + if (hw_rate_code < 0) { + DPRINTF("%s: bitrate not supported %d\n", + sc->sc_dev.dv_xname, bitrate); + return; + } + + vdev_param = WMI_VDEV_PARAM_MGMT_RATE; + ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, + vdev_param, hw_rate_code); + if (ret) + printf("%s: failed to set mgmt tx rate\n", + sc->sc_dev.dv_xname); +#if 0 + /* For WCN6855, firmware will clear this param when vdev starts, hence + * cache it here so that we can reconfigure it once vdev starts. + */ + ab->hw_rate_code = hw_rate_code; +#endif + vdev_param = WMI_VDEV_PARAM_BEACON_RATE; + ret = qwx_wmi_vdev_set_param_cmd(sc, vdev_id, pdev_id, vdev_param, + hw_rate_code); + if (ret) + printf("%s: failed to set beacon tx rate\n", + sc->sc_dev.dv_xname); +} + int qwx_auth(struct qwx_softc *sc) { - printf("%s: not implemented\n", __func__); - return ENOTSUP; + struct ieee80211com *ic = &sc->sc_ic; + struct ieee80211_node *ni = ic->ic_bss; + uint32_t param_id; + struct qwx_vif *arvif; + struct qwx_pdev *pdev; + int ret; + + arvif = TAILQ_FIRST(&sc->vif_list); + if (arvif == NULL) { + printf("%s: no vdev found\n", sc->sc_dev.dv_xname); + return EINVAL; + } + + pdev = qwx_get_pdev_for_chan(sc, ni->ni_chan); + if (pdev == NULL) { + printf("%s: no pdev found for channel %d\n", + sc->sc_dev.dv_xname, ieee80211_chan2ieee(ic, ni->ni_chan)); + return EINVAL; + } + + param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; + ret = qwx_wmi_vdev_set_param_cmd(sc, arvif->vdev_id, pdev->pdev_id, + param_id, ni->ni_intval); + if (ret) { + printf("%s: failed to set beacon interval for VDEV: %d\n", + sc->sc_dev.dv_xname, arvif->vdev_id); + return ret; + } + + qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id); + + ret = qwx_mac_station_add(sc, arvif, pdev->pdev_id, ni); + if (ret) + return ret; + + /* Start vdev. */ + ret = qwx_mac_vdev_start(sc, arvif, pdev->pdev_id); + if (ret) { + printf("%s: failed to start MAC for VDEV: %d\n", + sc->sc_dev.dv_xname, arvif->vdev_id); + return ret; + } + + /* + * WCN6855 firmware clears basic-rate parameters when vdev starts. + * Set it once more. + */ + qwx_recalculate_mgmt_rate(sc, ni, arvif->vdev_id, pdev->pdev_id); + + return ret; } int diff --git a/sys/dev/ic/qwxreg.h b/sys/dev/ic/qwxreg.h index 40b81cb2d..4ab0422b9 100644 --- a/sys/dev/ic/qwxreg.h +++ b/sys/dev/ic/qwxreg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: qwxreg.h,v 1.1 2023/12/28 17:36:29 stsp Exp $ */ +/* $OpenBSD: qwxreg.h,v 1.2 2024/01/25 10:11:04 stsp Exp $ */ /* * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. @@ -2911,7 +2911,7 @@ struct wmi_vdev_start_req_arg { }; struct peer_create_params { - const uint8_t *peer_addr; + uint8_t *peer_addr; uint32_t peer_type; uint32_t vdev_id; }; @@ -8257,6 +8257,85 @@ struct hal_reo_cmd_hdr { uint32_t info0; } __packed; + +#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000 + +#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0) +#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1) +#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2) +#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3) +#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4) +#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5) +#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6) +#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) +#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */ +#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) +#define HAL_REO_CMD_UPD0_VLD BIT(9) +#define HAL_REO_CMD_UPD0_ALDC BIT(10) +#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11) +#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12) +#define HAL_REO_CMD_UPD0_AC BIT(13) +#define HAL_REO_CMD_UPD0_BAR BIT(14) +#define HAL_REO_CMD_UPD0_RETRY BIT(15) +#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16) +#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17) +#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18) +#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19) +#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20) +#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21) +#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22) +#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23) +#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24) +#define HAL_REO_CMD_UPD0_SVLD BIT(25) +#define HAL_REO_CMD_UPD0_SSN BIT(26) +#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27) +#define HAL_REO_CMD_UPD0_PN_ERR BIT(28) +#define HAL_REO_CMD_UPD0_PN_VALID BIT(29) +#define HAL_REO_CMD_UPD0_PN BIT(30) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */ +#define HAL_REO_CMD_UPD1_VLD BIT(16) +#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) +#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) +#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20) +#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21) +#define HAL_REO_CMD_UPD1_BAR BIT(23) +#define HAL_REO_CMD_UPD1_RETRY BIT(24) +#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25) +#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26) +#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27) +#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28) +#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29) +#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) +#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) + +/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */ +#define HAL_REO_CMD_UPD2_SVLD BIT(10) +#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) +#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23) +#define HAL_REO_CMD_UPD2_PN_ERR BIT(24) + +#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8) + +struct ath11k_hal_reo_cmd { + uint32_t addr_lo; + uint32_t flag; + uint32_t upd0; + uint32_t upd1; + uint32_t upd2; + uint32_t pn[4]; + uint16_t rx_queue_num; + uint16_t min_rel; + uint16_t min_fwd; + uint8_t addr_hi; + uint8_t ac_list; + uint8_t blocking_idx; + uint16_t ba_window_size; + uint8_t pn_size; +}; + #define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0) #define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8) @@ -9862,6 +9941,11 @@ struct hal_reo_desc_thresh_reached_status { * entries into this Ring has looped around the ring. */ +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF +#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF + #define HAL_TX_ADDRX_EN 1 #define HAL_TX_ADDRY_EN 2 diff --git a/sys/dev/ic/qwxvar.h b/sys/dev/ic/qwxvar.h index 62773a2cf..f0afa3bb0 100644 --- a/sys/dev/ic/qwxvar.h +++ b/sys/dev/ic/qwxvar.h @@ -1,4 +1,4 @@ -/* $OpenBSD: qwxvar.h,v 1.1 2023/12/28 17:36:29 stsp Exp $ */ +/* $OpenBSD: qwxvar.h,v 1.5 2024/01/25 17:00:21 stsp Exp $ */ /* * Copyright (c) 2018-2019 The Linux Foundation. @@ -69,12 +69,12 @@ struct ath11k_hw_ring_mask { #define ATH11K_FW_DIR "qwx" #define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD" -#define ATH11K_BOARD_API2_FILE "board-2.bin" -#define ATH11K_DEFAULT_BOARD_FILE "board.bin" -#define ATH11K_DEFAULT_CAL_FILE "caldata.bin" -#define ATH11K_AMSS_FILE "amss.bin" -#define ATH11K_M3_FILE "m3.bin" -#define ATH11K_REGDB_FILE "regdb.bin" +#define ATH11K_BOARD_API2_FILE "board-2" +#define ATH11K_DEFAULT_BOARD_FILE "board" +#define ATH11K_DEFAULT_CAL_FILE "caldata" +#define ATH11K_AMSS_FILE "amss" +#define ATH11K_M3_FILE "m3" +#define ATH11K_REGDB_FILE "regdb" #define QWX_FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING=" @@ -214,9 +214,9 @@ struct ath11k_hw_ops { #endif void (*wmi_init_config)(struct qwx_softc *sc, struct target_resource_config *config); -#if notyet int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id); int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id); +#if notyet void (*tx_mesh_enable)(struct ath11k_base *ab, struct hal_tcl_data_cmd *tcl_cmd); bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc); @@ -645,6 +645,13 @@ struct ath11k_hal { #endif }; +enum hal_pn_type { + HAL_PN_TYPE_NONE, + HAL_PN_TYPE_WPA, + HAL_PN_TYPE_WAPI_EVEN, + HAL_PN_TYPE_WAPI_UNEVEN, +}; + enum hal_ce_desc { HAL_CE_DESC_SRC, HAL_CE_DESC_DST, @@ -839,8 +846,9 @@ struct qwx_hp_update_timer { struct dp_rx_tid { uint8_t tid; + struct qwx_dmamem *mem; uint32_t *vaddr; - bus_addr_t paddr; + uint64_t paddr; uint32_t size; uint32_t ba_win_sz; int active; @@ -1267,6 +1275,177 @@ struct dp_rxdma_ring { int bufs_max; }; +enum hal_rx_mon_status { + HAL_RX_MON_STATUS_PPDU_NOT_DONE, + HAL_RX_MON_STATUS_PPDU_DONE, + HAL_RX_MON_STATUS_BUF_DONE, +}; + +struct hal_rx_user_status { + uint32_t mcs:4, + nss:3, + ofdma_info_valid:1, + dl_ofdma_ru_start_index:7, + dl_ofdma_ru_width:7, + dl_ofdma_ru_size:8; + uint32_t ul_ofdma_user_v0_word0; + uint32_t ul_ofdma_user_v0_word1; + uint32_t ast_index; + uint32_t tid; + uint16_t tcp_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint16_t frame_control; + uint8_t frame_control_info_valid; + uint8_t data_sequence_control_info_valid; + uint16_t first_data_seq_ctrl; + uint32_t preamble_type; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t he_flags; + uint8_t rs_flags; + uint32_t mpdu_cnt_fcs_ok; + uint32_t mpdu_cnt_fcs_err; + uint32_t mpdu_fcs_ok_bitmap[8]; + uint32_t mpdu_ok_byte_count; + uint32_t mpdu_err_byte_count; +}; + +#define HAL_INVALID_PEERID 0xffff +#define VHT_SIG_SU_NSS_MASK 0x7 + +#define HAL_RX_MAX_MCS 12 +#define HAL_RX_MAX_NSS 8 + +#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE +#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE +#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE + +struct hal_rx_mon_ppdu_info { + uint32_t ppdu_id; + uint32_t ppdu_ts; + uint32_t num_mpdu_fcs_ok; + uint32_t num_mpdu_fcs_err; + uint32_t preamble_type; + uint16_t chan_num; + uint16_t tcp_msdu_count; + uint16_t tcp_ack_msdu_count; + uint16_t udp_msdu_count; + uint16_t other_msdu_count; + uint16_t peer_id; + uint8_t rate; + uint8_t mcs; + uint8_t nss; + uint8_t bw; + uint8_t vht_flag_values1; + uint8_t vht_flag_values2; + uint8_t vht_flag_values3[4]; + uint8_t vht_flag_values4; + uint8_t vht_flag_values5; + uint16_t vht_flag_values6; + uint8_t is_stbc; + uint8_t gi; + uint8_t ldpc; + uint8_t beamformed; + uint8_t rssi_comb; + uint8_t rssi_chain_pri20[HAL_RX_MAX_NSS]; + uint8_t tid; + uint16_t ht_flags; + uint16_t vht_flags; + uint16_t he_flags; + uint16_t he_mu_flags; + uint8_t dcm; + uint8_t ru_alloc; + uint8_t reception_type; + uint64_t tsft; + uint64_t rx_duration; + uint16_t frame_control; + uint32_t ast_index; + uint8_t rs_fcs_err; + uint8_t rs_flags; + uint8_t cck_flag; + uint8_t ofdm_flag; + uint8_t ulofdma_flag; + uint8_t frame_control_info_valid; + uint16_t he_per_user_1; + uint16_t he_per_user_2; + uint8_t he_per_user_position; + uint8_t he_per_user_known; + uint16_t he_flags1; + uint16_t he_flags2; + uint8_t he_RU[4]; + uint16_t he_data1; + uint16_t he_data2; + uint16_t he_data3; + uint16_t he_data4; + uint16_t he_data5; + uint16_t he_data6; + uint32_t ppdu_len; + uint32_t prev_ppdu_id; + uint32_t device_id; + uint16_t first_data_seq_ctrl; + uint8_t monitor_direct_used; + uint8_t data_sequence_control_info_valid; + uint8_t ltf_size; + uint8_t rxpcu_filter_pass; + char rssi_chain[8][8]; + struct hal_rx_user_status userstats; +}; + +enum dp_mon_status_buf_state { + /* PPDU id matches in dst ring and status ring */ + DP_MON_STATUS_MATCH, + /* status ring dma is not done */ + DP_MON_STATUS_NO_DMA, + /* status ring is lagging, reap status ring */ + DP_MON_STATUS_LAG, + /* status ring is leading, reap dst ring and drop */ + DP_MON_STATUS_LEAD, + /* replinish monitor status ring */ + DP_MON_STATUS_REPLINISH, +}; + +struct qwx_pdev_mon_stats { + uint32_t status_ppdu_state; + uint32_t status_ppdu_start; + uint32_t status_ppdu_end; + uint32_t status_ppdu_compl; + uint32_t status_ppdu_start_mis; + uint32_t status_ppdu_end_mis; + uint32_t status_ppdu_done; + uint32_t dest_ppdu_done; + uint32_t dest_mpdu_done; + uint32_t dest_mpdu_drop; + uint32_t dup_mon_linkdesc_cnt; + uint32_t dup_mon_buf_cnt; + uint32_t dest_mon_stuck; + uint32_t dest_mon_not_reaped; +}; + +struct qwx_mon_data { + struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX]; + struct hal_rx_mon_ppdu_info mon_ppdu_info; + + uint32_t mon_ppdu_status; + uint32_t mon_last_buf_cookie; + uint64_t mon_last_linkdesc_paddr; + uint16_t chan_noise_floor; + bool hold_mon_dst_ring; + enum dp_mon_status_buf_state buf_state; + bus_addr_t mon_status_paddr; + struct dp_full_mon_mpdu *mon_mpdu; +#ifdef notyet + struct hal_sw_mon_ring_entries sw_mon_entries; +#endif + struct qwx_pdev_mon_stats rx_mon_stats; +#ifdef notyet + /* lock for monitor data */ + spinlock_t mon_lock; + struct sk_buff_head rx_status_q; +#endif +}; + + #define MAX_RXDMA_PER_PDEV 2 struct qwx_pdev_dp { @@ -1285,8 +1464,8 @@ struct qwx_pdev_dp { struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV]; #if 0 struct ieee80211_rx_status rx_status; - struct ath11k_mon_data mon_data; #endif + struct qwx_mon_data mon_data; }; struct qwx_vif { @@ -1341,8 +1520,8 @@ struct qwx_vif { bool wpaie_present; bool bcca_zero_sent; bool do_not_send_tmpl; + struct ieee80211_channel *chan; #if 0 - struct ieee80211_chanctx_conf chanctx; struct ath11k_arp_ns_offload arp_ns_offload; struct ath11k_rekey_data rekey_data; #endif @@ -1359,6 +1538,22 @@ struct qwx_survey_info { uint64_t time_busy; }; +#define ATH11K_IRQ_NUM_MAX 52 +#define ATH11K_EXT_IRQ_NUM_MAX 16 + +struct qwx_ext_irq_grp { + struct qwx_softc *sc; + uint32_t irqs[ATH11K_EXT_IRQ_NUM_MAX]; + uint32_t num_irq; + uint32_t grp_id; + uint64_t timestamp; +#if 0 + bool napi_enabled; + struct napi_struct napi; + struct net_device napi_ndev; +#endif +}; + struct qwx_softc { struct device sc_dev; struct ieee80211com sc_ic; @@ -1410,6 +1605,8 @@ struct qwx_softc { enum ath11k_crypt_mode crypto_mode; enum ath11k_hw_txrx_mode frame_mode; + struct qwx_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; + uint16_t qmi_txn_id; int qmi_cal_done; struct qwx_qmi_ce_cfg qmi_ce_cfg; @@ -1426,6 +1623,9 @@ struct qwx_softc { uint32_t allocated_vdev_map; uint32_t free_vdev_map; int num_peers; + int peer_mapped; + int peer_delete_done; + int vdev_setup_done; struct qwx_dbring_cap *db_caps; uint32_t num_db_cap; @@ -1443,7 +1643,7 @@ struct qwx_softc { uint32_t pdev_id; } target_pdev_ids[MAX_RADIOS]; uint8_t target_pdev_count; - struct qwx_pdev *pdevs_active[MAX_RADIOS]; + uint32_t pdevs_active; int pdevs_macaddr_valid; struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS]; @@ -1470,15 +1670,19 @@ struct qwx_softc { enum ath11k_hw_rev sc_hw_rev; struct qwx_device_id id; char sc_bus_str[4]; /* "pci" or "ahb" */ + int num_msivec; uint32_t msi_addr_lo; uint32_t msi_addr_hi; uint32_t msi_data_start; const struct qwx_msi_config *msi_cfg; + uint32_t msi_ce_irqmask; struct qmi_wlanfw_request_mem_ind_msg_v01 *sc_req_mem_ind; }; -int qwx_intr(struct qwx_softc *); +int qwx_ce_intr(void *); +int qwx_ext_intr(void *); +int qwx_dp_service_srng(struct qwx_softc *, int); int qwx_init_hw_params(struct qwx_softc *); int qwx_attach(struct qwx_softc *); @@ -1495,8 +1699,46 @@ void qwx_init_task(void *); int qwx_newstate(struct ieee80211com *, enum ieee80211_state, int); void qwx_newstate_task(void *); +struct ath11k_peer { +#if 0 + struct list_head list; + struct ieee80211_sta *sta; +#endif + int vdev_id; +#if 0 + u8 addr[ETH_ALEN]; +#endif + int peer_id; + uint16_t ast_hash; + uint8_t pdev_id; + uint16_t hw_peer_id; +#if 0 + /* protected by ab->data_lock */ + struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; +#endif + struct dp_rx_tid rx_tid[IEEE80211_NUM_TID + 1]; +#if 0 + /* peer id based rhashtable list pointer */ + struct rhash_head rhash_id; + /* peer addr based rhashtable list pointer */ + struct rhash_head rhash_addr; + + /* Info used in MMIC verification of + * RX fragments + */ + struct crypto_shash *tfm_mmic; + u8 mcast_keyidx; + u8 ucast_keyidx; + u16 sec_type; + u16 sec_type_grp; + bool is_authorized; + bool dp_setup_done; +#endif +}; + struct qwx_node { struct ieee80211_node ni; + struct ath11k_peer peer; }; struct ieee80211_node *qwx_node_alloc(struct ieee80211com *); @@ -1506,6 +1748,7 @@ void qwx_qrtr_recv_msg(struct qwx_softc *, struct mbuf *); int qwx_hal_srng_init(struct qwx_softc *); int qwx_ce_alloc_pipes(struct qwx_softc *); +void qwx_ce_free_pipes(struct qwx_softc *); void qwx_ce_rx_post_buf(struct qwx_softc *); void qwx_ce_get_shadow_config(struct qwx_softc *, uint32_t **, uint32_t *); @@ -1521,3 +1764,11 @@ qwx_ce_get_attr_flags(struct qwx_softc *sc, int ce_id) KASSERT(ce_id < sc->hw_params.ce_count); return sc->hw_params.host_ce_config[ce_id].flags; } + +static inline enum ieee80211_edca_ac qwx_tid_to_ac(uint32_t tid) +{ + return (((tid == 0) || (tid == 3)) ? EDCA_AC_BE : + ((tid == 1) || (tid == 2)) ? EDCA_AC_BK : + ((tid == 4) || (tid == 5)) ? EDCA_AC_VI : + EDCA_AC_VO); +} diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c index 5dc2ffd13..ad0e8f4f0 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c @@ -3653,10 +3653,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_base = pci_resource_start(adev->pdev, 2); adev->rmmio_size = pci_resource_len(adev->pdev, 2); } +#endif for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); +#ifdef __linux__ adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (!adev->rmmio) return -ENOMEM; diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_drv.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_drv.c index 8e2b7deb1..46dde765f 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_drv.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_drv.c @@ -3478,63 +3478,9 @@ amdgpu_attachhook(struct device *self) struct drm_gem_object *obj; struct amdgpu_bo *rbo; - /* from amdgpu_driver_load_kms() */ - - /* amdgpu_device_init should report only fatal error - * like memory allocation failure or iomapping failure, - * or memory manager initialization failure, it must - * properly initialize the GPU MC controller and permit - * VRAM allocation - */ - r = amdgpu_device_init(adev, adev->flags); - if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + r = amdgpu_driver_load_kms(adev, adev->flags); + if (r) goto out; - } - - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - if (amdgpu_device_supports_px(dev) && - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; - dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; - dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_device_supports_baco(dev) && - (amdgpu_runtime_pm != 0)) { - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - /* enable BACO as runpm mode if runpm=1 */ - if (amdgpu_runtime_pm > 0) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - case CHIP_VEGA10: - /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - default: - /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - } - - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } - - /* Call ACPI methods: require modeset init - * but failure is not fatal - */ - - acpi_status = amdgpu_acpi_init(adev); - if (acpi_status) - dev_dbg(dev->dev, "Error during ACPI methods call\n"); - - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) - DRM_WARN("smart shift update failed\n"); /* * 1. don't init fbdev on hw without DCE diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c index 7e82a802c..e47abc6c4 100644 --- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c +++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c @@ -69,7 +69,6 @@ void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) mutex_unlock(&mgpu_info.mutex); } -#ifdef __linux__ /** * amdgpu_driver_unload_kms - Main unload function for KMS. * @@ -96,7 +95,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) amdgpu_acpi_fini(adev); amdgpu_device_fini_hw(adev); } -#endif /* __linux__ */ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) { @@ -123,7 +121,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) mutex_unlock(&mgpu_info.mutex); } -#ifdef __linux__ /** * amdgpu_driver_load_kms - Main load function for KMS. * @@ -202,7 +199,6 @@ out: return r; } -#endif /* __linux__ */ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, struct drm_amdgpu_query_fw *query_fw, diff --git a/sys/dev/pci/if_qwx_pci.c b/sys/dev/pci/if_qwx_pci.c index e11e6873b..5c560dc9f 100644 --- a/sys/dev/pci/if_qwx_pci.c +++ b/sys/dev/pci/if_qwx_pci.c @@ -1,4 +1,4 @@ -/* $OpenBSD: if_qwx_pci.c,v 1.2 2024/01/11 09:52:19 stsp Exp $ */ +/* $OpenBSD: if_qwx_pci.c,v 1.4 2024/01/25 17:00:21 stsp Exp $ */ /* * Copyright 2023 Stefan Sperling @@ -330,6 +330,8 @@ struct qwx_mhi_newstate { int queued; }; +#define QWX_NUM_MSI_VEC 32 + struct qwx_pci_softc { struct qwx_softc sc_sc; pci_chipset_tag_t sc_pc; @@ -337,7 +339,10 @@ struct qwx_pci_softc { int sc_cap_off; int sc_msi_off; pcireg_t sc_msi_cap; - void *sc_ih; + void *sc_ih[QWX_NUM_MSI_VEC]; + char sc_ivname[QWX_NUM_MSI_VEC][16]; + struct qwx_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; + int mhi_irq[2]; bus_space_tag_t sc_st; bus_space_handle_t sc_sh; bus_addr_t sc_map; @@ -414,6 +419,7 @@ void qwx_pcic_write32(struct qwx_softc *, uint32_t, uint32_t); void qwx_pcic_ext_irq_enable(struct qwx_softc *); void qwx_pcic_ext_irq_disable(struct qwx_softc *); +int qwx_pcic_config_irq(struct qwx_softc *, struct pci_attach_args *); int qwx_pci_start(struct qwx_softc *); void qwx_pci_stop(struct qwx_softc *); @@ -475,6 +481,8 @@ void qwx_pci_intr_data_event_tx(struct qwx_pci_softc *, struct qwx_mhi_ring_element *); int qwx_pci_intr_data_event(struct qwx_pci_softc *, struct qwx_pci_event_ring *); +int qwx_pci_intr_mhi_ctrl(void *); +int qwx_pci_intr_mhi_data(void *); int qwx_pci_intr(void *); struct qwx_pci_ops { @@ -555,6 +563,89 @@ const struct qwx_msi_config qwx_msi_config_one_msi = { }, }; +const struct qwx_msi_config qwx_msi_config[] = { + { + .total_vectors = 32, + .total_users = 4, + .users = (struct qwx_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_QCA6390_HW20, + }, + { + .total_vectors = 16, + .total_users = 3, + .users = (struct qwx_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 5, .base_vector = 3 }, + { .name = "DP", .num_vectors = 8, .base_vector = 8 }, + }, + .hw_rev = ATH11K_HW_QCN9074_HW10, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct qwx_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW20, + }, + { + .total_vectors = 32, + .total_users = 4, + .users = (struct qwx_msi_user[]) { + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, + .hw_rev = ATH11K_HW_WCN6855_HW21, + }, + { + .total_vectors = 28, + .total_users = 2, + .users = (struct qwx_msi_user[]) { + { .name = "CE", .num_vectors = 10, .base_vector = 0 }, + { .name = "DP", .num_vectors = 18, .base_vector = 10 }, + }, + .hw_rev = ATH11K_HW_WCN6750_HW10, + }, +}; + +int +qwx_pcic_init_msi_config(struct qwx_softc *sc) +{ + const struct qwx_msi_config *msi_config; + int i; + + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) { + sc->msi_cfg = &qwx_msi_config_one_msi; + return 0; + } + for (i = 0; i < nitems(qwx_msi_config); i++) { + msi_config = &qwx_msi_config[i]; + + if (msi_config->hw_rev == sc->sc_hw_rev) + break; + } + + if (i == nitems(qwx_msi_config)) { + printf("%s: failed to fetch msi config, " + "unsupported hw version: 0x%x\n", + sc->sc_dev.dv_xname, sc->sc_hw_rev); + return EINVAL; + } + + sc->msi_cfg = msi_config; + return 0; +} + int qwx_pci_alloc_msi(struct qwx_softc *sc) { @@ -562,11 +653,6 @@ qwx_pci_alloc_msi(struct qwx_softc *sc) uint64_t addr; pcireg_t data; - /* - * OpenBSD only supports one MSI vector at present. - * Mulitple vectors are only supported with MSI-X. - */ - if (psc->sc_msi_cap & PCI_MSI_MC_C64) { uint64_t addr_hi; pcireg_t addr_lo; @@ -592,7 +678,6 @@ qwx_pci_alloc_msi(struct qwx_softc *sc) DPRINTF("%s: MSI addr: 0x%llx MSI data: 0x%x\n", sc->sc_dev.dv_xname, addr, data); - sc->msi_cfg = &qwx_msi_config_one_msi; return 0; } @@ -661,6 +746,7 @@ qwx_pcic_get_user_msi_vector(struct qwx_softc *sc, char *user_name, DPRINTF("%s: Failed to find MSI assignment for %s\n", sc->sc_dev.dv_xname, user_name); + return EINVAL; } @@ -732,15 +818,31 @@ qwx_pci_attach(struct device *parent, struct device *self, void *aux) sc->mem = psc->sc_map; - if (pci_intr_map_msi(pa, &ih)) { - printf(": can't map interrupt\n"); - return; + sc->num_msivec = 32; + if (pci_intr_enable_msivec(pa, sc->num_msivec) != 0) { + sc->num_msivec = 1; + if (pci_intr_map_msi(pa, &ih) != 0) { + printf(": can't map interrupt\n"); + return; + } + clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags); + } else { + if (pci_intr_map_msivec(pa, 0, &ih) != 0 && + pci_intr_map_msi(pa, &ih) != 0) { + printf(": can't map interrupt\n"); + return; + } + set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags); + psc->mhi_irq[MHI_ER_CTRL] = 1; + psc->mhi_irq[MHI_ER_DATA] = 2; } intrstr = pci_intr_string(psc->sc_pc, ih); - psc->sc_ih = pci_intr_establish(psc->sc_pc, ih, IPL_NET, - qwx_pci_intr, psc, sc->sc_dev.dv_xname); - if (psc->sc_ih == NULL) { + snprintf(psc->sc_ivname[0], sizeof(psc->sc_ivname[0]), "%s:bhi", + sc->sc_dev.dv_xname); + psc->sc_ih[0] = pci_intr_establish(psc->sc_pc, ih, IPL_NET, + qwx_pci_intr, psc, psc->sc_ivname[0]); + if (psc->sc_ih[0] == NULL) { printf(": can't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); @@ -749,6 +851,46 @@ qwx_pci_attach(struct device *parent, struct device *self, void *aux) } printf(": %s\n", intrstr); + if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) { + int msivec; + + msivec = psc->mhi_irq[MHI_ER_CTRL]; + if (pci_intr_map_msivec(pa, msivec, &ih) != 0 && + pci_intr_map_msi(pa, &ih) != 0) { + printf(": can't map interrupt\n"); + return; + } + snprintf(psc->sc_ivname[msivec], + sizeof(psc->sc_ivname[msivec]), + "%s:mhic", sc->sc_dev.dv_xname); + psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih, + IPL_NET, qwx_pci_intr_mhi_ctrl, psc, + psc->sc_ivname[msivec]); + if (psc->sc_ih[msivec] == NULL) { + printf("%s: can't establish interrupt\n", + sc->sc_dev.dv_xname); + return; + } + + msivec = psc->mhi_irq[MHI_ER_DATA]; + if (pci_intr_map_msivec(pa, msivec, &ih) != 0 && + pci_intr_map_msi(pa, &ih) != 0) { + printf(": can't map interrupt\n"); + return; + } + snprintf(psc->sc_ivname[msivec], + sizeof(psc->sc_ivname[msivec]), + "%s:mhid", sc->sc_dev.dv_xname); + psc->sc_ih[msivec] = pci_intr_establish(psc->sc_pc, ih, + IPL_NET, qwx_pci_intr_mhi_data, psc, + psc->sc_ivname[msivec]); + if (psc->sc_ih[msivec] == NULL) { + printf("%s: can't establish interrupt\n", + sc->sc_dev.dv_xname); + return; + } + } + pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); switch (PCI_PRODUCT(pa->pa_id)) { @@ -810,16 +952,10 @@ unsupported_wcn6855_soc: /* register PCI ops */ psc->sc_pci_ops = pci_ops; - /* init MSI config */ - clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags); - -#if notyet - ret = ath11k_pcic_init_msi_config(ab); - if (ret) { - ath11k_err(ab, "failed to init msi config: %d\n", ret); + error = qwx_pcic_init_msi_config(sc); + if (error) goto err_pci_free_region; - } -#endif + error = qwx_pci_alloc_msi(sc); if (error) { printf("%s: failed to enable msi: %d\n", sc->sc_dev.dv_xname, @@ -891,17 +1027,17 @@ unsupported_wcn6855_soc: sc->sc_nswq = taskq_create("qwxns", 1, IPL_NET, 0); if (sc->sc_nswq == NULL) - goto err_hal_srng_deinit; + goto err_ce_free; qwx_pci_init_qmi_ce_config(sc); -#if notyet - ret = ath11k_pcic_config_irq(ab); - if (ret) { - ath11k_err(ab, "failed to config irq: %d\n", ret); + error = qwx_pcic_config_irq(sc, pa); + if (error) { + printf("%s: failed to config irq: %d\n", + sc->sc_dev.dv_xname, error); goto err_ce_free; } - +#if notyet ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); if (ret) { ath11k_err(ab, "failed to set irq affinity %d\n", ret); @@ -978,6 +1114,8 @@ unsupported_wcn6855_soc: config_mountroot(self, qwx_pci_attach_hook); return; +err_ce_free: + qwx_ce_free_pipes(sc); err_hal_srng_deinit: err_mhi_unregister: err_pci_free_cmd_ring: @@ -997,7 +1135,7 @@ err_pci_free_chan_ctxt: psc->chan_ctxt = NULL; err_pci_disable_msi: err_pci_free_region: - pci_intr_disestablish(psc->sc_pc, psc->sc_ih); + pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]); return; } @@ -1007,9 +1145,9 @@ qwx_pci_detach(struct device *self, int flags) struct qwx_pci_softc *psc = (struct qwx_pci_softc *)self; struct qwx_softc *sc = &psc->sc_sc; - if (psc->sc_ih) { - pci_intr_disestablish(psc->sc_pc, psc->sc_ih); - psc->sc_ih = NULL; + if (psc->sc_ih[0]) { + pci_intr_disestablish(psc->sc_pc, psc->sc_ih[0]); + psc->sc_ih[0] = NULL; } qwx_detach(sc); @@ -1289,12 +1427,12 @@ qwx_pci_alloc_event_rings(struct qwx_pci_softc *psc) int ret; ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[0], - MHI_ER_CTRL, 0, 0, 32); + MHI_ER_CTRL, psc->mhi_irq[MHI_ER_CTRL], 0, 32); if (ret) goto fail; ret = qwx_pci_alloc_event_ring(sc, &psc->event_rings[1], - MHI_ER_DATA, 0, 1, 256); + MHI_ER_DATA, psc->mhi_irq[MHI_ER_DATA], 1, 256); if (ret) goto fail; @@ -1449,7 +1587,8 @@ qwx_pcic_ce_irq_enable(struct qwx_softc *sc, uint16_t ce_id) if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) return; - printf("%s not implemented\n", __func__); + /* OpenBSD PCI stack does not yet implement MSI interrupt masking. */ + sc->msi_ce_irqmask |= (1U << ce_id); } void @@ -1461,7 +1600,145 @@ qwx_pcic_ce_irq_disable(struct qwx_softc *sc, uint16_t ce_id) if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) return; - printf("%s not implemented\n", __func__); + /* OpenBSD PCI stack does not yet implement MSI interrupt masking. */ + sc->msi_ce_irqmask &= ~(1U << ce_id); +} + +void +qwx_pcic_ext_grp_disable(struct qwx_ext_irq_grp *irq_grp) +{ + struct qwx_softc *sc = irq_grp->sc; + + /* In case of one MSI vector, we handle irq enable/disable + * in a uniform way since we only have one irq + */ + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) + return; +} + +int +qwx_pcic_ext_irq_config(struct qwx_softc *sc, struct pci_attach_args *pa) +{ + struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; + int i, ret, num_vectors = 0; + uint32_t msi_data_start = 0; + uint32_t base_vector = 0; + + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) + return 0; + + ret = qwx_pcic_get_user_msi_vector(sc, "DP", &num_vectors, + &msi_data_start, &base_vector); + if (ret < 0) + return ret; + + for (i = 0; i < nitems(sc->ext_irq_grp); i++) { + struct qwx_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; + uint32_t num_irq = 0; + + irq_grp->sc = sc; + irq_grp->grp_id = i; +#if 0 + init_dummy_netdev(&irq_grp->napi_ndev); + netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + ath11k_pcic_ext_grp_napi_poll); +#endif + if (sc->hw_params.ring_mask->tx[i] || + sc->hw_params.ring_mask->rx[i] || + sc->hw_params.ring_mask->rx_err[i] || + sc->hw_params.ring_mask->rx_wbm_rel[i] || + sc->hw_params.ring_mask->reo_status[i] || + sc->hw_params.ring_mask->rxdma2host[i] || + sc->hw_params.ring_mask->host2rxdma[i] || + sc->hw_params.ring_mask->rx_mon_status[i]) { + num_irq = 1; + } + + irq_grp->num_irq = num_irq; + irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; + + if (num_irq) { + int irq_idx = irq_grp->irqs[0]; + pci_intr_handle_t ih; + + if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 && + pci_intr_map(pa, &ih) != 0) { + printf("%s: can't map interrupt\n", + sc->sc_dev.dv_xname); + return EIO; + } + + snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]), + "%s:ex%d", sc->sc_dev.dv_xname, i); + psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih, + IPL_NET, qwx_ext_intr, irq_grp, psc->sc_ivname[irq_idx]); + if (psc->sc_ih[irq_idx] == NULL) { + printf("%s: failed to request irq %d\n", + sc->sc_dev.dv_xname, irq_idx); + return EIO; + } + } + + qwx_pcic_ext_grp_disable(irq_grp); + } + + return 0; +} + +int +qwx_pcic_config_irq(struct qwx_softc *sc, struct pci_attach_args *pa) +{ + struct qwx_pci_softc *psc = (struct qwx_pci_softc *)sc; + struct qwx_ce_pipe *ce_pipe; + uint32_t msi_data_start; + uint32_t msi_data_count, msi_data_idx; + uint32_t msi_irq_start; + int i, ret, irq_idx; + pci_intr_handle_t ih; + + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) + return 0; + + ret = qwx_pcic_get_user_msi_vector(sc, "CE", &msi_data_count, + &msi_data_start, &msi_irq_start); + if (ret) + return ret; + + /* Configure CE irqs */ + for (i = 0, msi_data_idx = 0; i < sc->hw_params.ce_count; i++) { + if (qwx_ce_get_attr_flags(sc, i) & CE_ATTR_DIS_INTR) + continue; + + ce_pipe = &sc->ce.ce_pipe[i]; + irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; + + if (pci_intr_map_msivec(pa, irq_idx, &ih) != 0 && + pci_intr_map(pa, &ih) != 0) { + printf("%s: can't map interrupt\n", + sc->sc_dev.dv_xname); + return EIO; + } + + snprintf(psc->sc_ivname[irq_idx], sizeof(psc->sc_ivname[0]), + "%s:ce%d", sc->sc_dev.dv_xname, ce_pipe->pipe_num); + psc->sc_ih[irq_idx] = pci_intr_establish(psc->sc_pc, ih, + IPL_NET, qwx_ce_intr, ce_pipe, psc->sc_ivname[irq_idx]); + if (psc->sc_ih[irq_idx] == NULL) { + printf("%s: failed to request irq %d\n", + sc->sc_dev.dv_xname, irq_idx); + return EIO; + } + + msi_data_idx++; + + qwx_pcic_ce_irq_disable(sc, i); + } + + ret = qwx_pcic_ext_irq_config(sc, pa); + if (ret) + return ret; + + return 0; } void @@ -2747,7 +3024,7 @@ qwx_mhi_fw_load_handler(struct qwx_pci_softc *psc) u_char *data; size_t len; - ret = snprintf(amss_path, sizeof(amss_path), "%s/%s/%s", + ret = snprintf(amss_path, sizeof(amss_path), "%s-%s-%s", ATH11K_FW_DIR, sc->hw_params.fw.dir, ATH11K_AMSS_FILE); if (ret < 0 || ret >= sizeof(amss_path)) return ENOSPC; @@ -3813,6 +4090,28 @@ qwx_pci_intr_data_event(struct qwx_pci_softc *psc, struct qwx_pci_event_ring *ri return 1; } +int +qwx_pci_intr_mhi_ctrl(void *arg) +{ + struct qwx_pci_softc *psc = arg; + + if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0])) + return 1; + + return 0; +} + +int +qwx_pci_intr_mhi_data(void *arg) +{ + struct qwx_pci_softc *psc = arg; + + if (qwx_pci_intr_data_event(psc, &psc->event_rings[1])) + return 1; + + return 0; +} + int qwx_pci_intr(void *arg) { @@ -3834,7 +4133,7 @@ qwx_pci_intr(void *arg) MHI_STATUS_MHISTATE_SHFT; DNPRINTF(QWX_D_MHI, - "%s: MHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n", + "%s: BHI interrupt with EE: 0x%x -> 0x%x state: 0x%x -> 0x%x\n", sc->sc_dev.dv_xname, psc->bhi_ee, ee, psc->mhi_state, state); if (ee == MHI_EE_RDDM) { @@ -3860,13 +4159,26 @@ qwx_pci_intr(void *arg) ret = 1; } - if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0])) - ret = 1; - if (qwx_pci_intr_data_event(psc, &psc->event_rings[1])) - ret = 1; + if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, sc->sc_flags)) { + int i; - if (qwx_intr(sc)) - ret = 1; + if (qwx_pci_intr_ctrl_event(psc, &psc->event_rings[0])) + ret = 1; + if (qwx_pci_intr_data_event(psc, &psc->event_rings[1])) + ret = 1; + + for (i = 0; i < sc->hw_params.ce_count; i++) { + struct qwx_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i]; + + if (qwx_ce_intr(ce_pipe)) + ret = 1; + } + + for (i = 0; i < nitems(sc->ext_irq_grp); i++) { + if (qwx_dp_service_srng(sc, i)) + ret = 1; + } + } return ret; } diff --git a/usr.bin/awk/FIXES b/usr.bin/awk/FIXES index d77bec29c..3b059250d 100644 --- a/usr.bin/awk/FIXES +++ b/usr.bin/awk/FIXES @@ -25,11 +25,22 @@ THIS SOFTWARE. This file lists all bug fixes, changes, etc., made since the second edition of the AWK book was published in September 2023. +Jan 22, 2024: + Restore the ability to compile with g++. Thanks to + Arnold Robbins. + +Dec 24, 2023: + matchop dereference after free problem fix when the first + argument is a function call. thanks to Oguz Ismail Uysal. + Fix inconsistent handling of --csv and FS set in the + command line. Thanks to Wilbert van der Poel. + casting changes to int for is* functions. + Nov 27, 2023: Fix exit status of system on MacOS. update to REGRESS. Thanks to Arnold Robbins. Fix inconsistent handling of -F and --csv, and loss of csv - mode when FS is set. Thanks to Wilbert van der Poel. + mode when FS is set. Nov 24, 2023: Fix issue #199: gototab improvements to dynamically resize the diff --git a/usr.bin/awk/b.c b/usr.bin/awk/b.c index 523e3ee19..bc3f06fd3 100644 --- a/usr.bin/awk/b.c +++ b/usr.bin/awk/b.c @@ -1,4 +1,4 @@ -/* $OpenBSD: b.c,v 1.49 2023/11/25 16:31:33 millert Exp $ */ +/* $OpenBSD: b.c,v 1.50 2024/01/25 16:40:51 millert Exp $ */ /**************************************************************** Copyright (C) Lucent Technologies 1997 All Rights Reserved @@ -117,7 +117,7 @@ static int entry_cmp(const void *l, const void *r); static int get_gototab(fa*, int, int); static int set_gototab(fa*, int, int, int); static void clear_gototab(fa*, int); -extern int u8_rune(int *, const uschar *); +extern int u8_rune(int *, const char *); static int * intalloc(size_t n, const char *f) @@ -422,7 +422,7 @@ int *cclenter(const char *argp) /* add a character class */ FATAL("out of space for character class [%.10s...] 1", p); bp = buf; for (i = 0; *p != 0; ) { - n = u8_rune(&c, p); + n = u8_rune(&c, (const char *) p); p += n; if (c == '\\') { c = quoted(&p); @@ -430,7 +430,7 @@ int *cclenter(const char *argp) /* add a character class */ if (*p != 0) { c = bp[-1]; /* c2 = *p++; */ - n = u8_rune(&c2, p); + n = u8_rune(&c2, (const char *) p); p += n; if (c2 == '\\') c2 = quoted(&p); /* BUG: sets p, has to be u8 size */ @@ -624,7 +624,7 @@ static int get_gototab(fa *f, int state, int ch) /* hide gototab inplementation key.ch = ch; key.state = 0; /* irrelevant */ - item = bsearch(& key, f->gototab[state].entries, + item = (gtte *) bsearch(& key, f->gototab[state].entries, f->gototab[state].inuse, sizeof(gtte), entry_cmp); @@ -668,7 +668,7 @@ static int set_gototab(fa *f, int state, int ch, int val) /* hide gototab inplem key.ch = ch; key.state = 0; /* irrelevant */ - item = bsearch(& key, f->gototab[state].entries, + item = (gtte *) bsearch(& key, f->gototab[state].entries, f->gototab[state].inuse, sizeof(gtte), entry_cmp); @@ -716,7 +716,7 @@ int match(fa *f, const char *p0) /* shortest match ? */ return(1); do { /* assert(*p < NCHARS); */ - n = u8_rune(&rune, p); + n = u8_rune(&rune, (const char *) p); if ((ns = get_gototab(f, s, rune)) != 0) s = ns; else @@ -749,7 +749,7 @@ int pmatch(fa *f, const char *p0) /* longest match, for sub */ if (f->out[s]) /* final state */ patlen = q-p; /* assert(*q < NCHARS); */ - n = u8_rune(&rune, q); + n = u8_rune(&rune, (const char *) q); if ((ns = get_gototab(f, s, rune)) != 0) s = ns; else @@ -780,7 +780,7 @@ int pmatch(fa *f, const char *p0) /* longest match, for sub */ s = 2; if (*p == 0) break; - n = u8_rune(&rune, p); + n = u8_rune(&rune, (const char *) p); p += n; } while (1); /* was *p++ */ return (0); @@ -805,7 +805,7 @@ int nematch(fa *f, const char *p0) /* non-empty match, for sub */ if (f->out[s]) /* final state */ patlen = q-p; /* assert(*q < NCHARS); */ - n = u8_rune(&rune, q); + n = u8_rune(&rune, (const char *) q); if ((ns = get_gototab(f, s, rune)) != 0) s = ns; else @@ -893,7 +893,7 @@ bool fnematch(fa *pfa, FILE *f, char **pbuf, int *pbufsize, int quantum) } } - j += u8_rune(&c, (uschar *)j); + j += u8_rune(&c, j); if ((ns = get_gototab(pfa, s, c)) != 0) s = ns; @@ -913,7 +913,7 @@ bool fnematch(fa *pfa, FILE *f, char **pbuf, int *pbufsize, int quantum) break; /* best match found */ /* no match at origin i, next i and start over */ - i += u8_rune(&c, (uschar *)i); + i += u8_rune(&c, i); if (c == 0) break; /* no match */ j = i; @@ -1234,8 +1234,6 @@ static int repeat(const uschar *reptok, int reptoklen, const uschar *atom, return 0; } -extern int u8_rune(int *, const uschar *); /* run.c; should be in header file */ - int relex(void) /* lexical analyzer for reparse */ { int c, n; @@ -1253,7 +1251,7 @@ int relex(void) /* lexical analyzer for reparse */ rescan: starttok = prestr; - if ((n = u8_rune(&rlxval, prestr)) > 1) { + if ((n = u8_rune(&rlxval, (const char *) prestr)) > 1) { prestr += n; starttok = prestr; return CHAR; @@ -1300,7 +1298,7 @@ rescan: if (!adjbuf((char **) &buf, &bufsz, n, n, (char **) &bp, "relex1")) FATAL("out of space for reg expr %.10s...", lastre); for (; ; ) { - if ((n = u8_rune(&rlxval, prestr)) > 1) { + if ((n = u8_rune(&rlxval, (const char *) prestr)) > 1) { for (i = 0; i < n; i++) *bp++ = *prestr++; continue; diff --git a/usr.bin/awk/main.c b/usr.bin/awk/main.c index 3ec64332f..481e98b07 100644 --- a/usr.bin/awk/main.c +++ b/usr.bin/awk/main.c @@ -1,4 +1,4 @@ -/* $OpenBSD: main.c,v 1.67 2023/11/28 20:54:38 millert Exp $ */ +/* $OpenBSD: main.c,v 1.68 2024/01/25 16:40:51 millert Exp $ */ /**************************************************************** Copyright (C) Lucent Technologies 1997 All Rights Reserved @@ -23,7 +23,7 @@ ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ****************************************************************/ -const char *version = "version 20231127"; +const char *version = "version 20240122"; #define DEBUG #include @@ -180,8 +180,6 @@ int main(int argc, char *argv[]) } if (strcmp(argv[1], "--csv") == 0) { /* turn on csv input processing */ CSV = true; - if (fs) - WARNING("danger: don't set FS when --csv is in effect"); argc--; argv++; continue; @@ -203,8 +201,6 @@ int main(int argc, char *argv[]) break; case 'F': /* set field separator */ fs = setfs(getarg(&argc, &argv, "no field separator")); - if (CSV) - WARNING("danger: don't set FS when --csv is in effect"); break; case 'v': /* -v a=1 to be done NOW. one -v for each */ vn = getarg(&argc, &argv, "no variable name"); @@ -238,6 +234,9 @@ int main(int argc, char *argv[]) } } + if (CSV && (fs != NULL || lookup("FS", symtab) != NULL)) + WARNING("danger: don't set FS when --csv is in effect"); + /* argv[1] is now the first argument */ if (npfile == 0) { /* no -f; first argument is program */ if (argc <= 1) { diff --git a/usr.bin/awk/run.c b/usr.bin/awk/run.c index e2c58b9cb..4c74cddbd 100644 --- a/usr.bin/awk/run.c +++ b/usr.bin/awk/run.c @@ -1,4 +1,4 @@ -/* $OpenBSD: run.c,v 1.83 2023/11/28 20:54:38 millert Exp $ */ +/* $OpenBSD: run.c,v 1.84 2024/01/25 16:40:51 millert Exp $ */ /**************************************************************** Copyright (C) Lucent Technologies 1997 All Rights Reserved @@ -796,7 +796,7 @@ int runetochar(char *str, int c) Cell *matchop(Node **a, int n) /* ~ and match() */ { - Cell *x, *y; + Cell *x, *y, *z; char *s, *t; int i; int cstart, cpatlen, len; @@ -818,7 +818,7 @@ Cell *matchop(Node **a, int n) /* ~ and match() */ i = (*mf)(pfa, s); tempfree(y); } - tempfree(x); + z = x; if (n == MATCHFCN) { int start = patbeg - s + 1; /* origin 1 */ if (patlen < 0) { @@ -840,11 +840,13 @@ Cell *matchop(Node **a, int n) /* ~ and match() */ x = gettemp(); x->tval = NUM; x->fval = start; - return x; } else if ((n == MATCH && i == 1) || (n == NOTMATCH && i == 0)) - return(True); + x = True; else - return(False); + x = False; + + tempfree(z); + return x; } @@ -1299,7 +1301,8 @@ int format(char **pbuf, int *pbufsize, const char *s, Node *a) /* printf-like co if (bs == NULL) { // invalid character // use unicode invalid character, 0xFFFD - bs = "\357\277\275"; + static char invalid_char[] = "\357\277\275"; + bs = invalid_char; count = 3; } t = bs; @@ -2567,7 +2570,7 @@ Cell *dosub(Node **a, int subop) /* sub and gsub */ start = getsval(x); while (pmatch(pfa, start)) { if (buf == NULL) { - if ((pb = buf = malloc(bufsz)) == NULL) + if ((pb = buf = (char *) malloc(bufsz)) == NULL) FATAL("out of memory in dosub"); tempstat = pfa->initstat; pfa->initstat = 2; @@ -2672,7 +2675,7 @@ Cell *gensub(Node **a, int nnn) /* global selective substitute */ int mflag, tempstat, num, whichm; int bufsz = recsize; - if ((buf = malloc(bufsz)) == NULL) + if ((buf = (char *) malloc(bufsz)) == NULL) FATAL("out of memory in gensub"); mflag = 0; /* if mflag == 0, can replace empty string */ num = 0; diff --git a/usr.sbin/bgpctl/bgpctl.c b/usr.sbin/bgpctl/bgpctl.c index 90e262cbd..014b3000a 100644 --- a/usr.sbin/bgpctl/bgpctl.c +++ b/usr.sbin/bgpctl/bgpctl.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bgpctl.c,v 1.301 2024/01/23 16:16:15 claudio Exp $ */ +/* $OpenBSD: bgpctl.c,v 1.302 2024/01/25 09:54:21 claudio Exp $ */ /* * Copyright (c) 2003 Henning Brauer @@ -470,6 +470,7 @@ show(struct imsg *imsg, struct parse_result *res) struct flowspec f; struct ctl_show_rib rib; struct rde_memstats stats; + struct ibuf ibuf; u_char *asdata; u_int rescode, ilen; size_t aslen; @@ -539,14 +540,11 @@ show(struct imsg *imsg, struct parse_result *res) output->rib(&rib, asdata, aslen, res); break; case IMSG_CTL_SHOW_RIB_COMMUNITIES: - ilen = imsg->hdr.len - IMSG_HEADER_SIZE; - if (ilen % sizeof(struct community)) { - warnx("bad IMSG_CTL_SHOW_RIB_COMMUNITIES received"); - break; - } if (output->communities == NULL) break; - output->communities(imsg->data, ilen, res); + if (imsg_get_ibuf(imsg, &ibuf) == -1) + err(1, "imsg_get_ibuf"); + output->communities(&ibuf, res); break; case IMSG_CTL_SHOW_RIB_ATTR: ilen = imsg->hdr.len - IMSG_HEADER_SIZE; @@ -1044,53 +1042,47 @@ fmt_large_community(uint32_t d1, uint32_t d2, uint32_t d3) } const char * -fmt_ext_community(uint8_t *data) +fmt_ext_community(uint64_t ext) { static char buf[32]; - uint64_t ext; struct in_addr ip; uint32_t as4, u32; uint16_t as2, u16; uint8_t type, subtype; - type = data[0]; - subtype = data[1]; + type = ext >> 56; + subtype = ext >> 48; switch (type) { case EXT_COMMUNITY_TRANS_TWO_AS: case EXT_COMMUNITY_GEN_TWO_AS: - memcpy(&as2, data + 2, sizeof(as2)); - memcpy(&u32, data + 4, sizeof(u32)); + as2 = ext >> 32; + u32 = ext; snprintf(buf, sizeof(buf), "%s %s:%u", - log_ext_subtype(type, subtype), - log_as(ntohs(as2)), ntohl(u32)); + log_ext_subtype(type, subtype), log_as(as2), u32); return buf; case EXT_COMMUNITY_TRANS_IPV4: case EXT_COMMUNITY_GEN_IPV4: - memcpy(&ip, data + 2, sizeof(ip)); - memcpy(&u16, data + 6, sizeof(u16)); + ip.s_addr = htonl(ext >> 16); + u16 = ext; snprintf(buf, sizeof(buf), "%s %s:%hu", - log_ext_subtype(type, subtype), - inet_ntoa(ip), ntohs(u16)); + log_ext_subtype(type, subtype), inet_ntoa(ip), u16); return buf; case EXT_COMMUNITY_TRANS_FOUR_AS: case EXT_COMMUNITY_GEN_FOUR_AS: - memcpy(&as4, data + 2, sizeof(as4)); - memcpy(&u16, data + 6, sizeof(u16)); + as4 = ext >> 16; + u16 = ext; snprintf(buf, sizeof(buf), "%s %s:%hu", - log_ext_subtype(type, subtype), - log_as(ntohl(as4)), ntohs(u16)); + log_ext_subtype(type, subtype), log_as(as4), u16); return buf; case EXT_COMMUNITY_TRANS_OPAQUE: case EXT_COMMUNITY_TRANS_EVPN: - memcpy(&ext, data, sizeof(ext)); - ext = be64toh(ext) & 0xffffffffffffLL; + ext &= 0xffffffffffffULL; snprintf(buf, sizeof(buf), "%s 0x%llx", log_ext_subtype(type, subtype), (unsigned long long)ext); return buf; case EXT_COMMUNITY_NON_TRANS_OPAQUE: - memcpy(&ext, data, sizeof(ext)); - ext = be64toh(ext) & 0xffffffffffffLL; + ext &= 0xffffffffffffULL; if (subtype == EXT_COMMUNITY_SUBTYPE_OVS) { switch (ext) { case EXT_COMMUNITY_OVS_VALID: @@ -1119,10 +1111,7 @@ fmt_ext_community(uint8_t *data) } break; default: - memcpy(&ext, data, sizeof(ext)); - snprintf(buf, sizeof(buf), "%s 0x%llx", - log_ext_subtype(type, subtype), - (unsigned long long)be64toh(ext)); + snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)ext); return buf; } } diff --git a/usr.sbin/bgpctl/bgpctl.h b/usr.sbin/bgpctl/bgpctl.h index cd15723e1..db9e206c1 100644 --- a/usr.sbin/bgpctl/bgpctl.h +++ b/usr.sbin/bgpctl/bgpctl.h @@ -1,4 +1,4 @@ -/* $OpenBSD: bgpctl.h,v 1.21 2023/04/20 14:01:50 claudio Exp $ */ +/* $OpenBSD: bgpctl.h,v 1.22 2024/01/25 09:54:21 claudio Exp $ */ /* * Copyright (c) 2019 Claudio Jeker @@ -28,7 +28,7 @@ struct output { void (*nexthop)(struct ctl_show_nexthop *); void (*interface)(struct ctl_show_interface *); void (*attr)(u_char *, size_t, int, int); - void (*communities)(u_char *, size_t, struct parse_result *); + void (*communities)(struct ibuf *, struct parse_result *); void (*rib)(struct ctl_show_rib *, u_char *, size_t, struct parse_result *); void (*rib_mem)(struct rde_memstats *); @@ -57,7 +57,7 @@ const char *fmt_errstr(uint8_t, uint8_t); const char *fmt_attr(uint8_t, int); const char *fmt_community(uint16_t, uint16_t); const char *fmt_large_community(uint32_t, uint32_t, uint32_t); -const char *fmt_ext_community(uint8_t *); +const char *fmt_ext_community(uint64_t); const char *fmt_set_type(struct ctl_show_set *); #define MPLS_LABEL_OFFSET 12 diff --git a/usr.sbin/bgpctl/output.c b/usr.sbin/bgpctl/output.c index 3eddeaff8..3a5e59cf4 100644 --- a/usr.sbin/bgpctl/output.c +++ b/usr.sbin/bgpctl/output.c @@ -1,4 +1,4 @@ -/* $OpenBSD: output.c,v 1.47 2024/01/23 16:16:15 claudio Exp $ */ +/* $OpenBSD: output.c,v 1.48 2024/01/25 09:54:21 claudio Exp $ */ /* * Copyright (c) 2003 Henning Brauer @@ -646,18 +646,17 @@ show_interface(struct ctl_show_interface *iface) } static void -show_communities(u_char *data, size_t len, struct parse_result *res) +show_communities(struct ibuf *data, struct parse_result *res) { struct community c; - size_t i; uint64_t ext; uint8_t type = 0; - if (len % sizeof(c)) - return; - - for (i = 0; i < len; i += sizeof(c)) { - memcpy(&c, data + i, sizeof(c)); + while (ibuf_size(data) != 0) { + if (ibuf_get(data, &c, sizeof(c)) == -1) { + warn("communities"); + break; + } if (type != c.flags) { if (type != 0) @@ -690,9 +689,7 @@ show_communities(u_char *data, size_t len, struct parse_result *res) ext |= (uint64_t)c.data2 & 0xffff; break; } - ext = htobe64(ext); - - printf(" %s", fmt_ext_community((void *)&ext)); + printf(" %s", fmt_ext_community(ext)); break; } } @@ -751,6 +748,7 @@ show_large_community(u_char *data, uint16_t len) static void show_ext_community(u_char *data, uint16_t len) { + uint64_t ext; uint16_t i; if (len & 0x7) { @@ -759,7 +757,9 @@ show_ext_community(u_char *data, uint16_t len) } for (i = 0; i < len; i += 8) { - printf("%s", fmt_ext_community(data + i)); + memcpy(&ext, data + i, sizeof(ext)); + ext = be64toh(ext); + printf("%s", fmt_ext_community(ext)); if (i + 8 < len) printf(" "); diff --git a/usr.sbin/bgpctl/output_json.c b/usr.sbin/bgpctl/output_json.c index f04a41be8..1ebd27b40 100644 --- a/usr.sbin/bgpctl/output_json.c +++ b/usr.sbin/bgpctl/output_json.c @@ -1,4 +1,4 @@ -/* $OpenBSD: output_json.c,v 1.39 2024/01/23 16:16:15 claudio Exp $ */ +/* $OpenBSD: output_json.c,v 1.40 2024/01/25 09:54:21 claudio Exp $ */ /* * Copyright (c) 2020 Claudio Jeker @@ -465,19 +465,17 @@ json_interface(struct ctl_show_interface *iface) } static void -json_communities(u_char *data, size_t len, struct parse_result *res) +json_communities(struct ibuf *data, struct parse_result *res) { struct community c; - size_t i; uint64_t ext; - if (len % sizeof(c)) { - warnx("communities: bad size"); - return; - } - for (i = 0; i < len; i += sizeof(c)) { - memcpy(&c, data + i, sizeof(c)); + while (ibuf_size(data) != 0) { + if (ibuf_get(data, &c, sizeof(c)) == -1) { + warn("communities"); + return; + } switch (c.flags) { case COMMUNITY_TYPE_BASIC: @@ -505,11 +503,9 @@ json_communities(u_char *data, size_t len, struct parse_result *res) ext |= (uint64_t)c.data2 & 0xffff; break; } - ext = htobe64(ext); json_do_array("extended_communities"); - json_do_string("community", - fmt_ext_community((void *)&ext)); + json_do_string("community", fmt_ext_community(ext)); break; } } @@ -569,6 +565,7 @@ json_do_large_community(u_char *data, uint16_t len) static void json_do_ext_community(u_char *data, uint16_t len) { + uint64_t ext; uint16_t i; if (len & 0x7) { @@ -578,8 +575,11 @@ json_do_ext_community(u_char *data, uint16_t len) json_do_array("extended_communities"); - for (i = 0; i < len; i += 8) - json_do_string("community", fmt_ext_community(data + i)); + for (i = 0; i < len; i += 8) { + memcpy(&ext, data + i, sizeof(ext)); + ext = be64toh(ext); + json_do_string("community", fmt_ext_community(ext)); + } json_do_end(); } diff --git a/usr.sbin/bgpd/rde.c b/usr.sbin/bgpd/rde.c index 5517ca99b..c357ea67a 100644 --- a/usr.sbin/bgpd/rde.c +++ b/usr.sbin/bgpd/rde.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rde.c,v 1.617 2024/01/24 14:51:11 claudio Exp $ */ +/* $OpenBSD: rde.c,v 1.619 2024/01/25 11:13:35 claudio Exp $ */ /* * Copyright (c) 2003, 2004 Henning Brauer @@ -1932,7 +1932,7 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, { struct bgpd_addr nexthop; struct rde_aspath *a = &state->aspath; - struct ibuf attrbuf; + struct ibuf attrbuf, tmpbuf; u_char *p, *npath; uint32_t tmp32, zero = 0; int error; @@ -1974,20 +1974,25 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, /* ignore and drop path attributes with a type code of 0 */ break; case ATTR_ORIGIN: - if (attr_len != 1) - goto bad_len; - if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0)) goto bad_flags; - - UPD_READ(&a->origin, p, plen, 1); - if (a->origin > ORIGIN_INCOMPLETE) { - rde_update_err(peer, ERR_UPDATE, ERR_UPD_ORIGIN, - &attrbuf); - return (-1); - } + if (ibuf_size(&attrbuf) != 1) + goto bad_len; if (a->flags & F_ATTR_ORIGIN) goto bad_list; + if (ibuf_get_n8(&attrbuf, &a->origin) == -1) + goto bad_len; + if (a->origin > ORIGIN_INCOMPLETE) { + /* + * mark update as bad and withdraw all routes as per + * RFC 7606 + */ + a->flags |= F_ATTR_PARSE_ERR; + log_peer_warnx(&peer->conf, "bad ORIGIN %u, " + "path invalidated and prefix withdrawn", + a->origin); + return (-1); + } a->flags |= F_ATTR_ORIGIN; break; case ATTR_ASPATH: @@ -2033,17 +2038,19 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, plen += attr_len; break; case ATTR_NEXTHOP: - if (attr_len != 4) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) != 4) + goto bad_len; if (a->flags & F_ATTR_NEXTHOP) goto bad_list; a->flags |= F_ATTR_NEXTHOP; memset(&nexthop, 0, sizeof(nexthop)); nexthop.aid = AID_INET; - UPD_READ(&nexthop.v4.s_addr, p, plen, 4); + if (ibuf_get_h32(&attrbuf, &nexthop.v4.s_addr) == -1) + goto bad_len; + /* * Check if the nexthop is a valid IP address. We consider * multicast addresses as invalid. @@ -2058,80 +2065,77 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, state->nexthop = nexthop_get(&nexthop); break; case ATTR_MED: - if (attr_len != 4) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) != 4) + goto bad_len; if (a->flags & F_ATTR_MED) goto bad_list; + if (ibuf_get_n32(&attrbuf, &a->med) == -1) + goto bad_len; a->flags |= F_ATTR_MED; - - UPD_READ(&tmp32, p, plen, 4); - a->med = ntohl(tmp32); break; case ATTR_LOCALPREF: - if (attr_len != 4) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) != 4) + goto bad_len; if (peer->conf.ebgp) { /* ignore local-pref attr on non ibgp peers */ - plen += attr_len; break; } if (a->flags & F_ATTR_LOCALPREF) goto bad_list; + if (ibuf_get_n32(&attrbuf, &a->lpref) == -1) + goto bad_len; a->flags |= F_ATTR_LOCALPREF; - - UPD_READ(&tmp32, p, plen, 4); - a->lpref = ntohl(tmp32); break; case ATTR_ATOMIC_AGGREGATE: - if (attr_len != 0) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_WELL_KNOWN, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) != 0) + goto bad_len; goto optattr; case ATTR_AGGREGATOR: - if ((!peer_has_as4byte(peer) && attr_len != 6) || - (peer_has_as4byte(peer) && attr_len != 8)) { + if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE, + ATTR_PARTIAL)) + goto bad_flags; + if ((!peer_has_as4byte(peer) && ibuf_size(&attrbuf) != 6) || + (peer_has_as4byte(peer) && ibuf_size(&attrbuf) != 8)) { /* * ignore attribute in case of error as per * RFC 7606 */ log_peer_warnx(&peer->conf, "bad AGGREGATOR, " "attribute discarded"); - plen += attr_len; break; } - if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE, - ATTR_PARTIAL)) - goto bad_flags; if (!peer_has_as4byte(peer)) { /* need to inflate aggregator AS to 4-byte */ u_char t[8]; t[0] = t[1] = 0; - UPD_READ(&t[2], p, plen, 2); - UPD_READ(&t[4], p, plen, 4); + if (ibuf_get(&attrbuf, &t[2], 6) == -1) + goto bad_list; if (memcmp(t, &zero, sizeof(uint32_t)) == 0) { /* As per RFC7606 use "attribute discard". */ log_peer_warnx(&peer->conf, "bad AGGREGATOR, " "AS 0 not allowed, attribute discarded"); break; } - if (attr_optadd(a, flags, type, t, - sizeof(t)) == -1) + if (attr_optadd(a, flags, type, t, sizeof(t)) == -1) goto bad_list; break; } /* 4-byte ready server take the default route */ - if (memcmp(p, &zero, sizeof(uint32_t)) == 0) { + ibuf_from_ibuf(&tmpbuf, &attrbuf); + if (ibuf_get_n32(&tmpbuf, &tmp32) == -1) + goto bad_len; + if (tmp32 == 0) { /* As per RFC7606 use "attribute discard" here. */ char *pfmt = log_fmt_peer(&peer->conf); log_debug("%s: bad AGGREGATOR, " "AS 0 not allowed, attribute discarded", pfmt); free(pfmt); - plen += attr_len; break; } goto optattr; @@ -2181,22 +2185,22 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, } break; case ATTR_ORIGINATOR_ID: - if (attr_len != 4) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) != 4) + goto bad_len; goto optattr; case ATTR_CLUSTER_LIST: - if (attr_len % 4 != 0) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) % 4 != 0) + goto bad_len; goto optattr; case ATTR_MP_REACH_NLRI: - if (attr_len < 5) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) < 5) + goto bad_len; /* the validity is checked in rde_update_dispatch() */ if (a->flags & F_ATTR_MP_REACH) goto bad_list; @@ -2205,10 +2209,10 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, *reach = attrbuf; break; case ATTR_MP_UNREACH_NLRI: - if (attr_len < 3) - goto bad_len; if (!CHECK_FLAGS(flags, ATTR_OPTIONAL, 0)) goto bad_flags; + if (ibuf_size(&attrbuf) < 3) + goto bad_len; /* the validity is checked in rde_update_dispatch() */ if (a->flags & F_ATTR_MP_UNREACH) goto bad_list; @@ -2217,21 +2221,22 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, *unreach = attrbuf; break; case ATTR_AS4_AGGREGATOR: - if (attr_len != 8) { - /* see ATTR_AGGREGATOR ... */ - log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, " - "attribute discarded"); - plen += attr_len; - break; - } if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE, ATTR_PARTIAL)) goto bad_flags; - if (memcmp(p, &zero, sizeof(uint32_t)) == 0) { + if (ibuf_size(&attrbuf) != 8) { + /* see ATTR_AGGREGATOR ... */ + log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, " + "attribute discarded"); + break; + } + ibuf_from_ibuf(&tmpbuf, &attrbuf); + if (ibuf_get_n32(&tmpbuf, &tmp32) == -1) + goto bad_len; + if (tmp32 == 0) { /* As per RFC6793 use "attribute discard" here. */ log_peer_warnx(&peer->conf, "bad AS4_AGGREGATOR, " "AS 0 not allowed, attribute discarded"); - plen += attr_len; break; } a->flags |= F_ATTR_AS4BYTE_NEW; @@ -2251,25 +2256,24 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, a->flags |= F_ATTR_AS4BYTE_NEW; goto optattr; case ATTR_OTC: - if (attr_len != 4) { + if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE, + ATTR_PARTIAL)) + goto bad_flags; + if (ibuf_size(&attrbuf) != 4) { /* treat-as-withdraw */ a->flags |= F_ATTR_PARSE_ERR; log_peer_warnx(&peer->conf, "bad OTC, " "path invalidated and prefix withdrawn"); - plen += attr_len; break; } - if (!CHECK_FLAGS(flags, ATTR_OPTIONAL|ATTR_TRANSITIVE, - ATTR_PARTIAL)) - goto bad_flags; switch (peer->role) { case ROLE_PROVIDER: case ROLE_RS: a->flags |= F_ATTR_OTC_LEAK; break; case ROLE_PEER: - memcpy(&tmp32, p, sizeof(tmp32)); - tmp32 = ntohl(tmp32); + if (ibuf_get_n32(&attrbuf, &tmp32) == -1) + goto bad_len; if (tmp32 != peer->conf.remote_as) a->flags |= F_ATTR_OTC_LEAK; break; @@ -2285,10 +2289,9 @@ rde_attr_parse(struct ibuf *buf, struct rde_peer *peer, return (-1); } optattr: - if (attr_optadd(a, flags, type, p, attr_len) == -1) + if (attr_optadd(a, flags, type, ibuf_data(&attrbuf), + ibuf_size(&attrbuf)) == -1) goto bad_list; - - plen += attr_len; break; }