diff options
Diffstat (limited to 'drivers/net/ethernet/amazon')
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_eth_com.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_ethtool.c | 66 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 261 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.h | 15 |
4 files changed, 243 insertions, 103 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 689313ee25a8..372b259279ec 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -10,6 +10,10 @@ /* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ #define ENA_COMP_HEAD_THRESH 4 +/* we allow 2 DMA descriptors per LLQ entry */ +#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc)) +#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) +#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) struct ena_com_tx_ctx { struct ena_com_tx_meta ena_meta; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 1d4f2f4d10f2..d671df4b76bc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -476,6 +476,21 @@ static void ena_get_ringparam(struct net_device *netdev, ring->tx_max_pending = adapter->max_tx_ring_size; ring->rx_max_pending = adapter->max_rx_ring_size; + if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + bool large_llq_supported = adapter->large_llq_header_supported; + + kernel_ring->tx_push = true; + kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size; + if (large_llq_supported) + kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER; + else + kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER; + } else { + kernel_ring->tx_push = false; + kernel_ring->tx_push_buf_max_len = 0; + kernel_ring->tx_push_buf_len = 0; + } + ring->tx_pending = adapter->tx_ring[0].ring_size; ring->rx_pending = adapter->rx_ring[0].ring_size; } @@ -486,7 +501,8 @@ static int ena_set_ringparam(struct net_device *netdev, struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(netdev); - u32 new_tx_size, new_rx_size; + u32 new_tx_size, new_rx_size, new_tx_push_buf_len; + bool changed = false; new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ? ENA_MIN_RING_SIZE : ring->tx_pending; @@ -496,11 +512,51 @@ static int ena_set_ringparam(struct net_device *netdev, ENA_MIN_RING_SIZE : ring->rx_pending; new_rx_size = rounddown_pow_of_two(new_rx_size); - if (new_tx_size == adapter->requested_tx_ring_size && - new_rx_size == adapter->requested_rx_ring_size) + changed |= new_tx_size != adapter->requested_tx_ring_size || + new_rx_size != adapter->requested_rx_ring_size; + + /* This value is ignored if LLQ is not supported */ + new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size; + + if ((adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) != + kernel_ring->tx_push) { + NL_SET_ERR_MSG_MOD(extack, "Push mode state cannot be modified"); + return -EINVAL; + } + + /* Validate that the push buffer is supported on the underlying device */ + if (kernel_ring->tx_push_buf_len) { + enum ena_admin_placement_policy_type placement; + + new_tx_push_buf_len = kernel_ring->tx_push_buf_len; + + placement = adapter->ena_dev->tx_mem_queue_type; + if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST) + return -EOPNOTSUPP; + + if (new_tx_push_buf_len != ENA_LLQ_HEADER && + new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) { + bool large_llq_sup = adapter->large_llq_header_supported; + char large_llq_size_str[40]; + + snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER); + + NL_SET_ERR_MSG_FMT_MOD(extack, + "Supported tx push buff values: [%lu%s]", + ENA_LLQ_HEADER, + large_llq_sup ? large_llq_size_str : ""); + + return -EINVAL; + } + + changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size; + } + + if (!changed) return 0; - return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size); + return ena_update_queue_params(adapter, new_tx_size, new_rx_size, + new_tx_push_buf_len); } static u32 ena_flow_hash_to_flow_type(u16 hash_fields) @@ -909,6 +965,8 @@ static int ena_set_tunable(struct net_device *netdev, static const struct ethtool_ops ena_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN | + ETHTOOL_RING_USE_TX_PUSH, .get_link_ksettings = ena_get_link_ksettings, .get_drvinfo = ena_get_drvinfo, .get_msglevel = ena_get_msglevel, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index cbfe7f977270..e6a6efaeb87c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1898,7 +1898,6 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) { u32 total_done = 0; u16 next_to_clean; - u32 tx_bytes = 0; int tx_pkts = 0; u16 req_id; int rc; @@ -1936,7 +1935,6 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) "tx_poll: q %d skb %p completed\n", xdp_ring->qid, xdpf); - tx_bytes += xdpf->len; tx_pkts++; total_done += tx_info->tx_descs; @@ -2809,11 +2807,13 @@ static int ena_close(struct net_device *netdev) return 0; } -int ena_update_queue_sizes(struct ena_adapter *adapter, - u32 new_tx_size, - u32 new_rx_size) +int ena_update_queue_params(struct ena_adapter *adapter, + u32 new_tx_size, + u32 new_rx_size, + u32 new_llq_header_len) { - bool dev_was_up; + bool dev_was_up, large_llq_changed = false; + int rc = 0; dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); ena_close(adapter->netdev); @@ -2823,7 +2823,21 @@ int ena_update_queue_sizes(struct ena_adapter *adapter, 0, adapter->xdp_num_queues + adapter->num_io_queues); - return dev_was_up ? ena_up(adapter) : 0; + + large_llq_changed = adapter->ena_dev->tx_mem_queue_type == + ENA_ADMIN_PLACEMENT_POLICY_DEV; + large_llq_changed &= + new_llq_header_len != adapter->ena_dev->tx_max_header_size; + + /* a check that the configuration is valid is done by caller */ + if (large_llq_changed) { + adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; + + ena_destroy_device(adapter, false); + rc = ena_restore_device(adapter); + } + + return dev_was_up && !rc ? ena_up(adapter) : rc; } int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak) @@ -3364,6 +3378,98 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_xdp_xmit = ena_xdp_xmit, }; +static void ena_calc_io_queue_size(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; + struct ena_com_dev *ena_dev = adapter->ena_dev; + u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; + u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; + u32 max_tx_queue_size; + u32 max_rx_queue_size; + + /* If this function is called after driver load, the ring sizes have already + * been configured. Take it into account when recalculating ring size. + */ + if (adapter->tx_ring->ring_size) + tx_queue_size = adapter->tx_ring->ring_size; + + if (adapter->rx_ring->ring_size) + rx_queue_size = adapter->rx_ring->ring_size; + + if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { + struct ena_admin_queue_ext_feature_fields *max_queue_ext = + &get_feat_ctx->max_queue_ext.max_queue_ext; + max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, + max_queue_ext->max_rx_sq_depth); + max_tx_queue_size = max_queue_ext->max_tx_cq_depth; + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + max_tx_queue_size = min_t(u32, max_tx_queue_size, + llq->max_llq_depth); + else + max_tx_queue_size = min_t(u32, max_tx_queue_size, + max_queue_ext->max_tx_sq_depth); + + adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_tx_descs); + adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queue_ext->max_per_packet_rx_descs); + } else { + struct ena_admin_queue_feature_desc *max_queues = + &get_feat_ctx->max_queues; + max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, + max_queues->max_sq_depth); + max_tx_queue_size = max_queues->max_cq_depth; + + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + max_tx_queue_size = min_t(u32, max_tx_queue_size, + llq->max_llq_depth); + else + max_tx_queue_size = min_t(u32, max_tx_queue_size, + max_queues->max_sq_depth); + + adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queues->max_packet_tx_descs); + adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, + max_queues->max_packet_rx_descs); + } + + max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); + max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); + + /* When forcing large headers, we multiply the entry size by 2, and therefore divide + * the queue size by 2, leaving the amount of memory used by the queues unchanged. + */ + if (adapter->large_llq_header_enabled) { + if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && + ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + max_tx_queue_size /= 2; + dev_info(&adapter->pdev->dev, + "Forcing large headers and decreasing maximum TX queue size to %d\n", + max_tx_queue_size); + } else { + dev_err(&adapter->pdev->dev, + "Forcing large headers failed: LLQ is disabled or device does not support large headers\n"); + + adapter->large_llq_header_enabled = false; + } + } + + tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, + max_tx_queue_size); + rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, + max_rx_queue_size); + + tx_queue_size = rounddown_pow_of_two(tx_queue_size); + rx_queue_size = rounddown_pow_of_two(rx_queue_size); + + adapter->max_tx_ring_size = max_tx_queue_size; + adapter->max_rx_ring_size = max_rx_queue_size; + adapter->requested_tx_ring_size = tx_queue_size; + adapter->requested_rx_ring_size = rx_queue_size; +} + static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { @@ -3387,13 +3493,30 @@ static int ena_device_validate_params(struct ena_adapter *adapter, return 0; } -static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +static void set_default_llq_configurations(struct ena_adapter *adapter, + struct ena_llq_configurations *llq_config, + struct ena_admin_feature_llq_desc *llq) { + struct ena_com_dev *ena_dev = adapter->ena_dev; + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; - llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; - llq_config->llq_ring_entry_size_value = 128; + + adapter->large_llq_header_supported = + !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); + adapter->large_llq_header_supported &= + !!(llq->entry_size_ctrl_supported & + ENA_ADMIN_LIST_ENTRY_SIZE_256B); + + if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && + adapter->large_llq_header_enabled) { + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; + llq_config->llq_ring_entry_size_value = 256; + } else { + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_ring_entry_size_value = 128; + } } static int ena_set_queues_placement_policy(struct pci_dev *pdev, @@ -3412,6 +3535,13 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev, return 0; } + if (!ena_dev->mem_bar) { + netdev_err(ena_dev->net_device, + "LLQ is advertised as supported but device doesn't expose mem bar\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc)) { dev_err(&pdev->dev, @@ -3427,15 +3557,8 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev { bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); - if (!has_mem_bar) { - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - dev_err(&pdev->dev, - "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - } - + if (!has_mem_bar) return 0; - } ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, pci_resource_start(pdev, ENA_MEM_BAR), @@ -3447,10 +3570,11 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev return 0; } -static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, +static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, bool *wd_state) { + struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_llq_configurations llq_config; struct device *dev = &pdev->dev; bool readless_supported; @@ -3535,7 +3659,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); - set_default_llq_configurations(&llq_config); + set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq); rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); @@ -3544,6 +3668,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, goto err_admin_init; } + ena_calc_io_queue_size(adapter, get_feat_ctx); + return 0; err_admin_init: @@ -3638,17 +3764,25 @@ static int ena_restore_device(struct ena_adapter *adapter) struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; struct pci_dev *pdev = adapter->pdev; + struct ena_ring *txr; + int rc, count, i; bool wd_state; - int rc; set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); - rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); + rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "Can not initialize device\n"); goto err; } adapter->wd_state = wd_state; + count = adapter->xdp_num_queues + adapter->num_io_queues; + for (i = 0 ; i < count; i++) { + txr = &adapter->tx_ring[i]; + txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; + txr->tx_max_header_size = ena_dev->tx_max_header_size; + } + rc = ena_device_validate_params(adapter, &get_feat_ctx); if (rc) { dev_err(&pdev->dev, "Validation of device parameters failed\n"); @@ -4162,72 +4296,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) pci_release_selected_regions(pdev, release_bars); } - -static void ena_calc_io_queue_size(struct ena_adapter *adapter, - struct ena_com_dev_get_features_ctx *get_feat_ctx) -{ - struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; - struct ena_com_dev *ena_dev = adapter->ena_dev; - u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; - u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; - u32 max_tx_queue_size; - u32 max_rx_queue_size; - - if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { - struct ena_admin_queue_ext_feature_fields *max_queue_ext = - &get_feat_ctx->max_queue_ext.max_queue_ext; - max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, - max_queue_ext->max_rx_sq_depth); - max_tx_queue_size = max_queue_ext->max_tx_cq_depth; - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) - max_tx_queue_size = min_t(u32, max_tx_queue_size, - llq->max_llq_depth); - else - max_tx_queue_size = min_t(u32, max_tx_queue_size, - max_queue_ext->max_tx_sq_depth); - - adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queue_ext->max_per_packet_tx_descs); - adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queue_ext->max_per_packet_rx_descs); - } else { - struct ena_admin_queue_feature_desc *max_queues = - &get_feat_ctx->max_queues; - max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, - max_queues->max_sq_depth); - max_tx_queue_size = max_queues->max_cq_depth; - - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) - max_tx_queue_size = min_t(u32, max_tx_queue_size, - llq->max_llq_depth); - else - max_tx_queue_size = min_t(u32, max_tx_queue_size, - max_queues->max_sq_depth); - - adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queues->max_packet_tx_descs); - adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, - max_queues->max_packet_rx_descs); - } - - max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); - max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); - - tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, - max_tx_queue_size); - rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, - max_rx_queue_size); - - tx_queue_size = rounddown_pow_of_two(tx_queue_size); - rx_queue_size = rounddown_pow_of_two(rx_queue_size); - - adapter->max_tx_ring_size = max_tx_queue_size; - adapter->max_rx_ring_size = max_rx_queue_size; - adapter->requested_tx_ring_size = tx_queue_size; - adapter->requested_rx_ring_size = rx_queue_size; -} - /* ena_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ena_pci_tbl @@ -4310,7 +4378,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, adapter); - rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); + rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); + if (rc) { + dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); + goto err_netdev_destroy; + } + + rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) @@ -4318,12 +4392,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_netdev_destroy; } - rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); - if (rc) { - dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); - goto err_device_destroy; - } - /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ @@ -4331,7 +4399,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); - ena_calc_io_queue_size(adapter, &get_feat_ctx); if (unlikely(!max_num_io_queues)) { rc = -EFAULT; goto err_device_destroy; @@ -4364,6 +4431,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Failed to query interrupt moderation feature\n"); goto err_device_destroy; } + ena_init_io_rings(adapter, 0, adapter->xdp_num_queues + @@ -4488,6 +4556,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) rtnl_lock(); /* lock released inside the below if-else block */ adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; ena_destroy_device(adapter, true); + if (shutdown) { netif_device_detach(netdev); dev_close(netdev); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 2cb141079474..5a0d4ee76172 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -334,6 +334,14 @@ struct ena_adapter { u32 msg_enable; + /* large_llq_header_enabled is used for two purposes: + * 1. Indicates that large LLQ has been requested. + * 2. Indicates whether large LLQ is set or not after device + * initialization / configuration. + */ + bool large_llq_header_enabled; + bool large_llq_header_supported; + u16 max_tx_sgl_size; u16 max_rx_sgl_size; @@ -388,9 +396,10 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); int ena_update_hw_stats(struct ena_adapter *adapter); -int ena_update_queue_sizes(struct ena_adapter *adapter, - u32 new_tx_size, - u32 new_rx_size); +int ena_update_queue_params(struct ena_adapter *adapter, + u32 new_tx_size, + u32 new_rx_size, + u32 new_llq_header_len); int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count); |