mirror of
https://git.hardenedbsd.org/hardenedbsd/HardenedBSD.git
synced 2024-12-04 15:13:58 +01:00
ena: Support LLQ entry size recommendation from device
This commit adds support for receiving LLQ entry size recommendation
from the device. The driver will use the recommended entry size, unless
the user specifically chooses to use regular or large LLQ entry.
Also added enum ena_llq_header_size_policy_t and llq_plociy field in
order to support the new feature.
Approved by: cperciva (mentor)
Sponsored by: Amazon, Inc.
(cherry picked from commit b1c38df05d
)
This commit is contained in:
parent
d5141ea592
commit
2e84b718b6
@ -137,17 +137,17 @@ This can further lead to OS instability, together with ENA driver reset and NVMe
|
||||
timeouts.
|
||||
If network performance is critical and memory capacity is sufficient, the 9k
|
||||
mbufs can be used.
|
||||
.It Va hw.ena.force_large_llq_headers
|
||||
Force the driver to use large LLQ headers (224 bytes).
|
||||
The default is 0.
|
||||
.It Va hw.ena.force_large_llq_header
|
||||
Force the driver to use large (224 bytes) or regular (96 bytes) LLQ header size.
|
||||
The default value is 2 and the recommended LLQ header size will be used.
|
||||
If the node value is set to 0, the regular size LLQ header will be used, which
|
||||
is 96B.
|
||||
In some cases, the packet header can be bigger than this (for example -
|
||||
IPv6 with multiple extensions).
|
||||
In such a situation, the large LLQ headers should be used by setting this node
|
||||
value to 1.
|
||||
This will take effect only if the device supports both LLQ and large LLQ
|
||||
headers.
|
||||
In such a situation, the large LLQ header size which is 224B should be used,
|
||||
and can be forced by setting this node value to 1.
|
||||
Using large LLQ header size will take effect only if the device supports
|
||||
both LLQ and large LLQ headers.
|
||||
Otherwise, it will fallback to the no LLQ mode or regular header size.
|
||||
.Pp
|
||||
Increasing LLQ header size reduces the size of the Tx queue by half, so it may
|
||||
|
@ -156,7 +156,7 @@ static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
|
||||
static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
|
||||
static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
|
||||
struct ena_com_dev_get_features_ctx *);
|
||||
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
|
||||
static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *);
|
||||
static void ena_config_host_info(struct ena_com_dev *, device_t);
|
||||
static int ena_attach(device_t);
|
||||
static int ena_detach(device_t);
|
||||
@ -2757,27 +2757,32 @@ ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_default_llq_configurations(struct ena_llq_configurations *llq_config,
|
||||
struct ena_admin_feature_llq_desc *llq)
|
||||
ena_set_llq_configurations(struct ena_llq_configurations *llq_config,
|
||||
struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter)
|
||||
{
|
||||
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
|
||||
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
|
||||
llq_config->llq_num_decs_before_header =
|
||||
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
|
||||
if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
|
||||
0 && ena_force_large_llq_header) {
|
||||
llq_config->llq_ring_entry_size =
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_256B;
|
||||
llq_config->llq_ring_entry_size_value = 256;
|
||||
if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0) {
|
||||
if ((ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_LARGE) ||
|
||||
(ena_force_large_llq_header == ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT &&
|
||||
llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B)) {
|
||||
llq_config->llq_ring_entry_size =
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_256B;
|
||||
llq_config->llq_ring_entry_size_value = 256;
|
||||
adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
|
||||
}
|
||||
} else {
|
||||
llq_config->llq_ring_entry_size =
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
||||
llq_config->llq_ring_entry_size_value = 128;
|
||||
adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
|
||||
ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter)
|
||||
{
|
||||
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
|
||||
struct ena_com_dev *ena_dev = ctx->ena_dev;
|
||||
@ -2832,22 +2837,20 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
|
||||
max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
|
||||
|
||||
/*
|
||||
* When forcing large headers, we multiply the entry size by 2,
|
||||
* When using large headers, we multiply the entry size by 2,
|
||||
* and therefore divide the queue size by 2, leaving the amount
|
||||
* of memory used by the queues unchanged.
|
||||
*/
|
||||
if (ena_force_large_llq_header) {
|
||||
if ((llq->entry_size_ctrl_supported &
|
||||
ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
|
||||
ena_dev->tx_mem_queue_type ==
|
||||
if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
|
||||
if (ena_dev->tx_mem_queue_type ==
|
||||
ENA_ADMIN_PLACEMENT_POLICY_DEV) {
|
||||
max_tx_queue_size /= 2;
|
||||
ena_log(ctx->pdev, INFO,
|
||||
"Forcing large headers and decreasing maximum Tx queue size to %d\n",
|
||||
"Using large headers and decreasing maximum Tx queue size to %d\n",
|
||||
max_tx_queue_size);
|
||||
} else {
|
||||
ena_log(ctx->pdev, WARN,
|
||||
"Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
|
||||
"Using large headers failed: LLQ is disabled or device does not support large headers\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -3003,7 +3006,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
|
||||
|
||||
*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
|
||||
|
||||
set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
|
||||
ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter);
|
||||
|
||||
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
|
||||
&llq_config);
|
||||
@ -3862,7 +3865,7 @@ ena_attach(device_t pdev)
|
||||
/* Calculate initial and maximum IO queue number and size */
|
||||
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
|
||||
&get_feat_ctx);
|
||||
rc = ena_calc_io_queue_size(&calc_queue_ctx);
|
||||
rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter);
|
||||
if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
|
||||
rc = EFAULT;
|
||||
goto err_com_free;
|
||||
|
@ -173,6 +173,15 @@ enum ena_flags_t {
|
||||
ENA_FLAGS_NUMBER = ENA_FLAG_RSS_ACTIVE
|
||||
};
|
||||
|
||||
enum ena_llq_header_size_policy_t {
|
||||
/* Policy for Regular LLQ entry size (128B) */
|
||||
ENA_LLQ_HEADER_SIZE_POLICY_REGULAR,
|
||||
/* Policy for Large LLQ entry size (256B) */
|
||||
ENA_LLQ_HEADER_SIZE_POLICY_LARGE,
|
||||
/* Policy for device recommended LLQ entry size */
|
||||
ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT
|
||||
};
|
||||
|
||||
BITSET_DEFINE(_ena_state, ENA_FLAGS_NUMBER);
|
||||
typedef struct _ena_state ena_state_t;
|
||||
|
||||
@ -459,6 +468,8 @@ struct ena_adapter {
|
||||
uint8_t mac_addr[ETHER_ADDR_LEN];
|
||||
/* mdio and phy*/
|
||||
|
||||
uint8_t llq_policy;
|
||||
|
||||
ena_state_t flags;
|
||||
|
||||
/* IRQ CPU affinity */
|
||||
|
@ -148,17 +148,17 @@ SYSCTL_INT(_hw_ena, OID_AUTO, enable_9k_mbufs, CTLFLAG_RDTUN,
|
||||
&ena_enable_9k_mbufs, 0, "Use 9 kB mbufs for Rx descriptors");
|
||||
|
||||
/*
|
||||
* Force the driver to use large LLQ (Low Latency Queue) header. Defaults to
|
||||
* false. This option may be important for platforms, which often handle packet
|
||||
* headers on Tx with total header size greater than 96B, as it may
|
||||
* reduce the latency.
|
||||
* Force the driver to use large or regular LLQ (Low Latency Queue) header size.
|
||||
* Defaults to ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT. This option may be
|
||||
* important for platforms, which often handle packet headers on Tx with total
|
||||
* header size greater than 96B, as it may reduce the latency.
|
||||
* It also reduces the maximum Tx queue size by half, so it may cause more Tx
|
||||
* packet drops.
|
||||
*/
|
||||
bool ena_force_large_llq_header = false;
|
||||
SYSCTL_BOOL(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN,
|
||||
int ena_force_large_llq_header = ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT;
|
||||
SYSCTL_INT(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN,
|
||||
&ena_force_large_llq_header, 0,
|
||||
"Increases maximum supported header size in LLQ mode to 224 bytes, while reducing the maximum Tx queue size by half.\n");
|
||||
"Change default LLQ entry size received from the device\n");
|
||||
|
||||
int ena_rss_table_size = ENA_RX_RSS_TABLE_SIZE;
|
||||
|
||||
|
@ -46,6 +46,6 @@ extern int ena_enable_9k_mbufs;
|
||||
#define ena_mbuf_sz (ena_enable_9k_mbufs ? MJUM9BYTES : MJUMPAGESIZE)
|
||||
|
||||
/* Force the driver to use large LLQ (Low Latency Queue) headers. */
|
||||
extern bool ena_force_large_llq_header;
|
||||
extern int ena_force_large_llq_header;
|
||||
|
||||
#endif /* !(ENA_SYSCTL_H) */
|
||||
|
Loading…
Reference in New Issue
Block a user