aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h104
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c258
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_trace.h229
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c628
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h131
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h83
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h414
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h78
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c564
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h166
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c135
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c326
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c335
18 files changed, 2429 insertions, 1058 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index 3a423836a565..a393f4a07f06 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -29,8 +29,11 @@
#
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_I40EVF) += i40evf.o
i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
- i40e_txrx.o i40e_common.o i40e_adminq.o
+ i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 96385156b824..8b0d4b255dea 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -797,8 +797,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
*/
if (i40evf_asq_done(hw))
break;
- usleep_range(1000, 2000);
- total_delay++;
+ udelay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 1f9b3b5d946d..e0bfaa3d4a21 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -151,7 +151,7 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index eeb9864bc5b1..83e63e55c4b4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -132,6 +132,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -139,6 +143,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -177,10 +185,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Pipeline Personalization Profile */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -515,7 +528,7 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -558,6 +571,57 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -640,6 +704,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -691,6 +757,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1364,6 +1431,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Pipeline Personalization Profile */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ppp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_PPP_GET_CONF 0x1
+#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1839,11 +1936,12 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index aa63b7fb993d..1dd1938f594f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -27,7 +27,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/**
* i40e_set_mac_type - Sets MAC type
@@ -64,11 +64,11 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
+ case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -305,7 +305,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
- u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
@@ -333,12 +332,18 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
if (buf_len < len)
len = buf_len;
/* write the full 16-byte chunks */
- for (i = 0; i < (len - 16); i += 16)
- i40e_debug(hw, mask, "\t0x%04X %16ph\n", i, buf + i);
- /* write whatever's left over without overrunning the buffer */
- if (i < len)
- i40e_debug(hw, mask, "\t0x%04X %*ph\n",
- i, len - i, buf + i);
+ if (hw->debug_mask & mask) {
+ char prefix[20];
+
+ snprintf(prefix, 20,
+ "i40evf %02x:%02x.%x: \t0x",
+ hw->bus.bus_id,
+ hw->bus.device,
+ hw->bus.func);
+
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+ }
}
}
@@ -954,7 +959,9 @@ u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
@@ -1015,7 +1022,9 @@ void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
@@ -1046,7 +1055,7 @@ do_retry:
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@@ -1084,9 +1093,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg)
+ struct virtchnl_vf_resource *msg)
{
- struct i40e_virtchnl_vsi_resource *vsi_res;
+ struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@@ -1096,11 +1105,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_L2;
- hw->dev_caps.fcoe = (msg->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = 0;
for (i = 0; i < msg->num_vsis; i++) {
- if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
ether_addr_copy(hw->mac.perm_addr,
vsi_res->default_mac_addr);
ether_addr_copy(hw->mac.addr,
@@ -1120,6 +1128,218 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
{
- return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
+
+/**
+ * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ppp_resp *resp;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->profile_track_id = cpu_to_le32(track_id);
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40evf_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40evf_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40evf_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ i40e_status status = 0;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_PPP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+
+ status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 21dcaee1ad1d..0469e4bfd3ec 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -43,12 +43,12 @@
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index ba6c6bda0e22..c9836bba487d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -29,7 +29,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -87,10 +87,10 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
- struct i40e_virtchnl_vf_resource *msg);
+ struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -122,4 +122,21 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40evf_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40evf_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
new file mode 100644
index 000000000000..9a5100b2b7c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h
@@ -0,0 +1,229 @@
+/*******************************************************************************
+ *
+ * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver
+ * Copyright(c) 2013 - 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <[email protected]>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Modeled on trace-events-sample.h */
+
+/* The trace subsystem name for i40evf will be "i40evf".
+ *
+ * This file is named i40e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM i40evf
+
+/* See trace-events-sample.h for a detailed description of why this
+ * guard clause is different from most normal include files.
+ */
+#if !defined(_I40E_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _I40E_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+/**
+ * i40e_trace() macro enables shared code to refer to trace points
+ * like:
+ *
+ * trace_i40e{,vf}_example(args...)
+ *
+ * ... as:
+ *
+ * i40e_trace(example, args...)
+ *
+ * ... to resolve to the PF or VF version of the tracepoint without
+ * ifdefs, and to allow tracepoints to be disabled entirely at build
+ * time.
+ *
+ * Trace point should always be referred to in the driver via this
+ * macro.
+ *
+ * Similarly, i40e_trace_enabled(trace_name) wraps references to
+ * trace_i40e{,vf}_<trace_name>_enabled() functions.
+ */
+#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
+#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
+
+#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
+
+#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
+
+/* Events common to PF and VF. Corresponding versions will be defined
+ * for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
+ * macro above will select the right trace point name for the driver
+ * being built from shared code.
+ */
+
+/* Events related to a vsi & ring */
+DECLARE_EVENT_CLASS(
+ i40evf_tx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf),
+
+ /* The convention here is to make the first fields in the
+ * TP_STRUCT match the TP_PROTO exactly. This enables the use
+ * of the args struct generated by the tplist tool (from the
+ * bcc-tools package) to be used for those fields. To access
+ * fields other than the tracepoint args will require the
+ * tplist output to be adjusted.
+ */
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, buf)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->buf = buf;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p buf %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->buf)
+);
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DEFINE_EVENT(
+ i40evf_tx_template, i40evf_clean_tx_irq_unmap,
+ TP_PROTO(struct i40e_ring *ring,
+ struct i40e_tx_desc *desc,
+ struct i40e_tx_buffer *buf),
+
+ TP_ARGS(ring, desc, buf));
+
+DECLARE_EVENT_CLASS(
+ i40evf_rx_template,
+
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb),
+
+ TP_STRUCT__entry(
+ __field(void*, ring)
+ __field(void*, desc)
+ __field(void*, skb)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->desc = desc;
+ __entry->skb = skb;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s ring: %p desc: %p skb %p",
+ __get_str(devname), __entry->ring,
+ __entry->desc, __entry->skb)
+);
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DEFINE_EVENT(
+ i40evf_rx_template, i40evf_clean_rx_irq_rx,
+ TP_PROTO(struct i40e_ring *ring,
+ union i40e_32byte_rx_desc *desc,
+ struct sk_buff *skb),
+
+ TP_ARGS(ring, desc, skb));
+
+DECLARE_EVENT_CLASS(
+ i40evf_xmit_template,
+
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring),
+
+ TP_STRUCT__entry(
+ __field(void*, skb)
+ __field(void*, ring)
+ __string(devname, ring->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skb = skb;
+ __entry->ring = ring;
+ __assign_str(devname, ring->netdev->name);
+ ),
+
+ TP_printk(
+ "netdev: %s skb: %p ring: %p",
+ __get_str(devname), __entry->skb,
+ __entry->ring)
+);
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+DEFINE_EVENT(
+ i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
+ TP_PROTO(struct sk_buff *skb,
+ struct i40e_ring *ring),
+
+ TP_ARGS(skb, ring));
+
+/* Events unique to the VF. */
+
+#endif /* _I40E_TRACE_H_ */
+/* This must be outside ifdef _I40E_TRACE_H */
+
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE i40e_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index df67ef37b7f3..12b02e530503 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -28,6 +28,7 @@
#include <net/busy_poll.h>
#include "i40evf.h"
+#include "i40e_trace.h"
#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
@@ -137,10 +138,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- if (!in_sw)
- head = i40e_get_head(ring);
- else
- head = ring->next_to_clean;
+ head = ring->next_to_clean;
tail = readl(ring->tail);
if (head != tail)
@@ -165,7 +163,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
- struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = vsi->work_limit;
@@ -174,8 +171,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_desc = I40E_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
-
do {
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
@@ -186,8 +181,10 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* prevent any other reads prior to eop_desc */
read_barrier_depends();
- /* we have caught up to head, no work left to do */
- if (tx_head == tx_desc)
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
break;
/* clear next_to_watch to prevent false hangs */
@@ -212,6 +209,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
+ i40e_trace(clean_tx_irq_unmap,
+ tx_ring, tx_desc, tx_buf);
tx_buf++;
tx_desc++;
@@ -267,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &vsi->state) &&
+ !test_bit(__I40E_VSI_DOWN, vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +284,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &vsi->state)) {
+ !test_bit(__I40E_VSI_DOWN, vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -464,10 +463,6 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
- /* add u32 for head writeback, align after this takes care of
- * guaranteeing this is at least one cache line in size
- */
- tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -493,7 +488,6 @@ err:
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
unsigned long bi_size;
u16 i;
@@ -501,19 +495,34 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->skb) {
- dev_kfree_skb(rx_bi->skb);
- rx_bi->skb = NULL;
- }
if (!rx_bi->page)
continue;
- dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_bi->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ rx_bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
+
+ __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
rx_bi->page = NULL;
rx_bi->page_offset = 0;
@@ -614,6 +623,17 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_alloc_mapped_page - recycle or make a new page
* @rx_ring: ring to use
* @bi: rx_buffer struct to modify
@@ -634,27 +654,33 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ I40E_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, 0);
+ __free_pages(page, i40e_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_page_failed++;
return false;
}
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = i40e_rx_offset(rx_ring);
+
+ /* initialize pagecnt_bias to 1 representing we fully own page */
+ bi->pagecnt_bias = 1;
return true;
}
@@ -701,6 +727,12 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!i40e_alloc_mapped_page(rx_ring, bi))
goto no_buffers;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -741,8 +773,6 @@ no_buffers:
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- *
- * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
@@ -805,13 +835,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* If there is an outer header present that might contain a checksum
- * we need to bump the checksum level by 1 to reflect the fact that
- * we are indicating we validated the inner checksum.
- */
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
- skb->csum_level = 1;
-
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
@@ -894,51 +917,12 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
{
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
-
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
skb_record_rx_queue(skb, rx_ring->queue_index);
-}
-
-/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
/**
@@ -956,10 +940,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
**/
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- i40e_pull_tail(rx_ring, skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -987,169 +967,284 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- *new_buff = *old_buff;
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
/**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
*/
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ return (page_to_nid(page) == numa_mem_id()) &&
+ !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page. We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack. We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet. If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size). This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer. Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* Is any reuse possible? */
+ if (unlikely(!i40e_page_is_reusable(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ return false;
+#else
+#define I40E_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(!pagecnt_bias)) {
+ page_ref_add(page, USHRT_MAX);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
}
/**
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * It will just attach the page as a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset.
**/
-static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = I40E_RXBUFFER_2048;
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
- unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+ unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
#endif
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!i40e_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
rx_buffer->page_offset, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(i40e_page_is_reserved(page)))
- return false;
-
+ /* page is being used so we must update the page offset */
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
-
- /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
-
- if (rx_buffer->page_offset > last_offset)
- return false;
#endif
-
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
- */
- get_page(rx_buffer->page);
-
- return true;
}
/**
- * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
*/
-static inline
-struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc)
+static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
+ const unsigned int size)
{
struct i40e_rx_buffer *rx_buffer;
- struct sk_buff *skb;
- struct page *page;
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
+ prefetchw(rx_buffer->page);
- skb = rx_buffer->skb;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
- if (likely(!skb)) {
- void *page_addr = page_address(page) + rx_buffer->page_offset;
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buffer->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return rx_buffer;
+}
+
+/**
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- return NULL;
- }
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > I40E_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset + headlen,
+ size, truesize);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
} else {
- rx_buffer->skb = NULL;
+ /* buffer is unused, reset bias back to rx_buffer */
+ rx_buffer->pagecnt_bias++;
}
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- I40E_RXBUFFER_2048,
- DMA_FROM_DEVICE);
+ return skb;
+}
+
+/**
+ * i40e_build_skb - Build skb around an existing buffer
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buffer: Rx buffer to pull data from
+ * @size: size of buffer to add to skb
+ *
+ * This function builds an skb around an existing Rx buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead.
+ */
+static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(I40E_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+ /* build an skb around the page buffer */
+ skb = build_skb(va - I40E_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, I40E_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* buffer is used by skb, update page_offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
- /* pull page into skb */
- if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+/**
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the bufer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buffer->page = NULL;
-
- return skb;
}
/**
@@ -1180,8 +1275,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_bi[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1202,12 +1295,14 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < budget)) {
+ struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- struct sk_buff *skb;
+ unsigned int size;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1224,22 +1319,40 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ * hardware wrote DD then the length will be non-zero
*/
- if (!i40e_test_staterr(rx_desc,
- BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
+ * any other fields out of the rx_desc until we have
+ * verified the descriptor has been written back.
*/
dma_rmb();
- skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
- if (!skb)
+ size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ if (!size)
+ break;
+
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ /* retrieve a buffer from the ring */
+ if (skb)
+ i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, rx_buffer, size);
+ else
+ skb = i40e_construct_skb(rx_ring, rx_buffer, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
+ }
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
@@ -1252,11 +1365,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/
if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
+ skb = NULL;
continue;
}
- if (i40e_cleanup_headers(rx_ring, skb))
+ if (i40e_cleanup_headers(rx_ring, skb)) {
+ skb = NULL;
continue;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1272,12 +1388,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
i40e_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_packets++;
}
+ rx_ring->skb = skb;
+
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1305,18 +1425,18 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
-static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->rx_rings[idx].rx_itr_setting);
+ return adapter->rx_rings[idx].rx_itr_setting;
}
-static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
{
struct i40evf_adapter *adapter = vsi->back;
- return !!(adapter->tx_rings[idx].tx_itr_setting);
+ return adapter->tx_rings[idx].tx_itr_setting;
}
/**
@@ -1342,8 +1462,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
- rx_itr_setting = get_rx_itr_enabled(vsi, idx);
- tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+ rx_itr_setting = get_rx_itr(vsi, idx);
+ tx_itr_setting = get_tx_itr(vsi, idx);
if (q_vector->itr_countdown > 0 ||
(!ITR_IS_DYNAMIC(rx_itr_setting) &&
@@ -1389,7 +1509,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
enable_int:
- if (!test_bit(__I40E_DOWN, &vsi->state))
+ if (!test_bit(__I40E_VSI_DOWN, vsi->state))
wr32(hw, INTREG(vector - 1), txval);
if (q_vector->itr_countdown)
@@ -1418,7 +1538,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
int budget_per_ring;
int work_done = 0;
- if (test_bit(__I40E_DOWN, &vsi->state)) {
+ if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
napi_complete(napi);
return 0;
}
@@ -1549,14 +1669,16 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @skb: ptr to the skb we're sending
+ * @first: pointer to first Tx buffer for xmit
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss)
{
+ struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
union {
struct iphdr *v4;
@@ -1569,6 +1691,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
+ u16 gso_segs, gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1607,7 +1730,8 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from outer checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.udp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1628,15 +1752,23 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ /* pull values out of skb_shinfo */
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* update GSO size and bytecount with header size */
+ first->gso_segs = gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
- cd_mss = skb_shinfo(skb)->gso_size;
+ cd_mss = gso_size;
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -1949,8 +2081,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
- u16 gso_segs;
- u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -1958,15 +2088,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_SHIFT;
}
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
-
- /* multiply data chunks by size of headers */
- first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
- first->gso_segs = gso_segs;
- first->skb = skb;
first->tx_flags = tx_flags;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1995,7 +2116,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2017,7 +2137,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++;
i++;
- desc_count++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
@@ -2043,46 +2162,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
- /* write last descriptor with EOP bit */
- td_cmd |= I40E_TX_DESC_CMD_EOP;
-
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
- */
- desc_count |= ++tx_ring->packet_stride;
-
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
- /* write last descriptor with RS bit set */
- td_cmd |= I40E_TX_DESC_CMD_RS;
- tx_ring->packet_stride = 0;
- }
-
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= I40E_TXD_CMD;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
@@ -2098,7 +2179,7 @@ do_rs:
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2149,10 +2230,14 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* prefetch the data, we'll need it later */
prefetch(skb->data);
+ i40e_trace(xmit_frame_ring, skb, tx_ring);
+
count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
- if (__skb_linearize(skb))
- goto out_drop;
+ if (__skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2168,6 +2253,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
@@ -2175,16 +2266,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
- /* record the location of the first descriptor for this packet */
- first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
/* setup IPv4/IPv6 offloads */
if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
@@ -2211,7 +2299,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
out_drop:
- dev_kfree_skb_any(skb);
+ i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index a5fc789f78eb..901282c87cf6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -104,10 +104,9 @@ enum i40e_dyn_idx_t {
/* Supported Rx Buffer Sizes (a multiple of 128) */
#define I40E_RXBUFFER_256 256
+#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
#define I40E_RXBUFFER_2048 2048
-#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
-#define I40E_RXBUFFER_4096 4096
-#define I40E_RXBUFFER_8192 8192
+#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
@@ -120,6 +119,61 @@ enum i40e_dyn_idx_t {
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
+ */
+#if (PAGE_SIZE < 8192)
+#define I40E_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
+
+static inline int i40e_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int i40e_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (I40E_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = I40E_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return i40e_compute_pad(rx_buf_len);
+}
+
+#define I40E_SKB_PAD i40e_skb_pad()
+#else
+#define I40E_2K_TOO_SMALL_WITH_PADDING false
+#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
/**
* i40e_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -239,10 +293,14 @@ struct i40e_tx_buffer {
};
struct i40e_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+#else
+ __u16 page_offset;
+#endif
+ __u16 pagecnt_bias;
};
struct i40e_queue_stats {
@@ -322,7 +380,8 @@ struct i40e_ring {
u8 packet_stride;
u16 flags;
-#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
+#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -340,8 +399,31 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
+ struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
+ * return before it sees the EOP for
+ * the current packet, we save that skb
+ * here and resume receiving this
+ * packet the next time
+ * i40evf_clean_rx_ring_irq() is called
+ * for this ring.
+ */
} ____cacheline_internodealigned_in_smp;
+static inline bool ring_uses_build_skb(struct i40e_ring *ring)
+{
+ return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
+}
+
+static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
+static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
+{
+ ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
+}
+
enum i40e_latency_range {
I40E_LOWEST_LATENCY = 0,
I40E_LOW_LATENCY = 1,
@@ -363,6 +445,17 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
+static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->rx_buf_len > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
+
bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -378,20 +471,6 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: Tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
-/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -453,19 +532,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
-
-/**
- * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
- * @ptype: the packet type field from Rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
/**
- * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c85e8a31c072..bde7f24af1c6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -78,6 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -100,7 +101,6 @@ enum i40e_debug_mask {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@@ -159,6 +159,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -443,6 +444,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -1395,4 +1397,83 @@ enum i40e_reset_type {
#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
I40E_FD_INSET_FLEX_WORD57_SHIFT)
+
+/* Version format for PPP */
+struct i40e_ppp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_PPP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ppp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ppp_version version;
+ u32 size;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ u32 track_id;
+ char name[I40E_PPP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ppp_version version;
+ char name[I40E_PPP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ppp_version version;
+ u8 op;
+#define I40E_PPP_ADD_TRACKID 0x01
+#define I40E_PPP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_PPP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
deleted file mode 100644
index fc374f833aa9..000000000000
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ /dev/null
@@ -1,414 +0,0 @@
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <[email protected]>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
-
-#ifndef _I40E_VIRTCHNL_H_
-#define _I40E_VIRTCHNL_H_
-
-#include "i40e_type.h"
-
-/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the various i40e drivers.
- *
- * Admin queue buffer usage:
- * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
- * flags, retval, datalen, and data addr are all used normally.
- * Firmware copies the cookie fields when sending messages between the PF and
- * VF, but uses all other fields internally. Due to this limitation, we
- * must send all messages as "indirect", i.e. using an external buffer.
- *
- * All the vsi indexes are relative to the VF. Each VF can have maximum of
- * three VSIs. All the queue indexes are relative to the VSI. Each VF can
- * have a maximum of sixteen queues for all of its VSIs.
- *
- * The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value is of
- * i40e_status_code type, defined in the i40e_type.h.
- *
- * In general, VF driver initialization should roughly follow the order of these
- * opcodes. The VF driver must first validate the API version of the PF driver,
- * then request a reset, then get resources, then configure queues and
- * interrupts. After these operations are complete, the VF driver may start
- * its queues, optionally add MAC and VLAN filters, and process traffic.
- */
-
-/* Opcodes for VF-PF communication. These are placed in the v_opcode field
- * of the virtchnl_msg structure.
- */
-enum i40e_virtchnl_ops {
-/* The PF sends status change events to VFs using
- * the I40E_VIRTCHNL_OP_EVENT opcode.
- * VFs send requests to the PF using the other ops.
- */
- I40E_VIRTCHNL_OP_UNKNOWN = 0,
- I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF = 2,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
- I40E_VIRTCHNL_OP_ADD_VLAN = 12,
- I40E_VIRTCHNL_OP_DEL_VLAN = 13,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
- I40E_VIRTCHNL_OP_GET_STATS = 15,
- I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
- I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
- I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
-
-};
-
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct i40e_virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- i40e_status v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-/* Message descriptions and data structures.*/
-
-/* I40E_VIRTCHNL_OP_VERSION
- * VF posts its version number to the PF. PF responds with its version number
- * in the same format, along with a return code.
- * Reply from PF has its major/minor versions also in param0 and param1.
- * If there is a major version mismatch, then the VF cannot operate.
- * If there is a minor version mismatch, then the VF can operate but should
- * add a warning to the system log.
- *
- * This enum element MUST always be specified as == 1, regardless of other
- * changes in the API. The PF must always respond to this message without
- * error regardless of version mismatch.
- */
-#define I40E_VIRTCHNL_VERSION_MAJOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR 1
-#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
-
-struct i40e_virtchnl_version_info {
- u32 major;
- u32 minor;
-};
-
-/* I40E_VIRTCHNL_OP_RESET_VF
- * VF sends this request to PF with no parameters
- * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
- * until reset completion is indicated. The admin queue must be reinitialized
- * after this operation.
- *
- * When reset is complete, PF must ensure that all queues in all VSIs associated
- * with the VF are stopped, all queue configurations in the HMC are set to 0,
- * and all MAC and VLAN filters (except the default MAC address) on all VSIs
- * are cleared.
- */
-
-/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * Version 1.0 VF sends this request to PF with no parameters
- * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
- * PF responds with an indirect message containing
- * i40e_virtchnl_vf_resource and one or more
- * i40e_virtchnl_vsi_resource structures.
- */
-
-struct i40e_virtchnl_vsi_resource {
- u16 vsi_id;
- u16 num_queue_pairs;
- enum i40e_vsi_type vsi_type;
- u16 qset_handle;
- u8 default_mac_addr[ETH_ALEN];
-};
-/* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
-
-#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
-
-struct i40e_virtchnl_vf_resource {
- u16 num_vsis;
- u16 num_queue_pairs;
- u16 max_vectors;
- u16 max_mtu;
-
- u32 vf_offload_flags;
- u32 rss_key_size;
- u32 rss_lut_size;
-
- struct i40e_virtchnl_vsi_resource vsi_res[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
- * VF sends this message to set up parameters for one TX queue.
- * External data buffer contains one instance of i40e_virtchnl_txq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Tx queue config info */
-struct i40e_virtchnl_txq_info {
- u16 vsi_id;
- u16 queue_id;
- u16 ring_len; /* number of descriptors, multiple of 8 */
- u16 headwb_enabled;
- u64 dma_ring_addr;
- u64 dma_headwb_addr;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
- * VF sends this message to set up parameters for one RX queue.
- * External data buffer contains one instance of i40e_virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
- */
-
-/* Rx queue config info */
-struct i40e_virtchnl_rxq_info {
- u16 vsi_id;
- u16 queue_id;
- u32 ring_len; /* number of descriptors, multiple of 32 */
- u16 hdr_size;
- u16 splithdr_enabled;
- u32 databuffer_size;
- u32 max_pkt_size;
- u64 dma_ring_addr;
- enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
- * associated with the specified VSI.
- * PF configures queues and returns status.
- * If the number of queues specified is greater than the number of queues
- * associated with the VSI, an error is returned and no queues are configured.
- */
-struct i40e_virtchnl_queue_pair_info {
- /* NOTE: vsi_id and queue_id should be identical for both queues. */
- struct i40e_virtchnl_txq_info txq;
- struct i40e_virtchnl_rxq_info rxq;
-};
-
-struct i40e_virtchnl_vsi_queue_config_info {
- u16 vsi_id;
- u16 num_queue_pairs;
- struct i40e_virtchnl_queue_pair_info qpair[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
- * VF uses this message to map vectors to queues.
- * The rxq_map and txq_map fields are bitmaps used to indicate which queues
- * are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
- * PF configures interrupt mapping and returns status.
- */
-struct i40e_virtchnl_vector_map {
- u16 vsi_id;
- u16 vector_id;
- u16 rxq_map;
- u16 txq_map;
- u16 rxitr_idx;
- u16 txitr_idx;
-};
-
-struct i40e_virtchnl_irq_map_info {
- u16 num_vectors;
- struct i40e_virtchnl_vector_map vecmap[1];
-};
-
-/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
- * I40E_VIRTCHNL_OP_DISABLE_QUEUES
- * VF sends these message to enable or disable TX/RX queue pairs.
- * The queues fields are bitmaps indicating which queues to act upon.
- * (Currently, we only support 16 queues per VF, but we make the field
- * u32 to allow for expansion.)
- * PF performs requested action and returns status.
- */
-struct i40e_virtchnl_queue_select {
- u16 vsi_id;
- u16 pad;
- u32 rx_queues;
- u32 tx_queues;
-};
-
-/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
- * VF sends this message in order to add one or more unicast or multicast
- * address filters for the specified VSI.
- * PF adds the filters and returns status.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
- * VF sends this message in order to remove one or more unicast or multicast
- * filters for the specified VSI.
- * PF removes the filters and returns status.
- */
-
-struct i40e_virtchnl_ether_addr {
- u8 addr[ETH_ALEN];
- u8 pad[2];
-};
-
-struct i40e_virtchnl_ether_addr_list {
- u16 vsi_id;
- u16 num_elements;
- struct i40e_virtchnl_ether_addr list[1];
-};
-
-/* I40E_VIRTCHNL_OP_ADD_VLAN
- * VF sends this message to add one or more VLAN tag filters for receives.
- * PF adds the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-/* I40E_VIRTCHNL_OP_DEL_VLAN
- * VF sends this message to remove one or more VLAN tag filters for receives.
- * PF removes the filters and returns status.
- * If a port VLAN is configured by the PF, this operation will return an
- * error to the VF.
- */
-
-struct i40e_virtchnl_vlan_filter_list {
- u16 vsi_id;
- u16 num_elements;
- u16 vlan_id[1];
-};
-
-/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
- * VF sends VSI id and flags.
- * PF returns status code in retval.
- * Note: we assume that broadcast accept mode is always enabled.
- */
-struct i40e_virtchnl_promisc_info {
- u16 vsi_id;
- u16 flags;
-};
-
-#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
-#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
-
-/* I40E_VIRTCHNL_OP_GET_STATS
- * VF sends this message to request stats for the selected VSI. VF uses
- * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
- * field is ignored by the PF.
- *
- * PF replies with struct i40e_eth_stats in an external buffer.
- */
-
-/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
- * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
- * VF sends these messages to configure RSS. Only supported if both PF
- * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
- * configuration negotiation. If this is the case, then the RSS fields in
- * the VF resource struct are valid.
- * Both the key and LUT are initialized to 0 by the PF, meaning that
- * RSS is effectively disabled until set up by the VF.
- */
-struct i40e_virtchnl_rss_key {
- u16 vsi_id;
- u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
-};
-
-struct i40e_virtchnl_rss_lut {
- u16 vsi_id;
- u16 lut_entries;
- u8 lut[1]; /* RSS lookup table*/
-};
-
-/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * I40E_VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
- * By default, the PF sets these to all possible traffic types that the
- * hardware supports. The VF can query this value if it wants to change the
- * traffic types that are hashed by the hardware.
- * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
- */
-struct i40e_virtchnl_rss_hena {
- u64 hena;
-};
-
-/* I40E_VIRTCHNL_OP_EVENT
- * PF sends this message to inform the VF driver of events that may affect it.
- * No direct response is expected from the VF, though it may generate other
- * messages in response to this one.
- */
-enum i40e_virtchnl_event_codes {
- I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
- I40E_VIRTCHNL_EVENT_LINK_CHANGE,
- I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
- I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
-};
-#define I40E_PF_EVENT_SEVERITY_INFO 0
-#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
-
-struct i40e_virtchnl_pf_event {
- enum i40e_virtchnl_event_codes event;
- union {
- struct {
- enum i40e_aq_link_speed link_speed;
- bool link_status;
- } link_event;
- } event_data;
-
- int severity;
-};
-
-/* VF reset states - these are written into the RSTAT register:
- * I40E_VFGEN_RSTAT1 on the PF
- * I40E_VFGEN_RSTAT on the VF
- * When the PF initiates a reset, it writes 0
- * When the reset is complete, it writes 1
- * When the PF detects that the VF has recovered, it writes 2
- * VF checks this register periodically to determine if a reset has occurred,
- * then polls it to know when the reset is complete.
- * If either the PF or VF reads the register while the hardware
- * is in a reset state, it will return DEADBEEF, which, when masked
- * will result in 3.
- */
-enum i40e_vfr_states {
- I40E_VFR_INPROGRESS = 0,
- I40E_VFR_COMPLETED,
- I40E_VFR_VFACTIVE,
- I40E_VFR_UNKNOWN,
-};
-
-#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fffe4cf2c20b..6cc92089fecb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -43,12 +43,19 @@
#include <net/udp.h>
#include "i40e_type.h"
-#include "i40e_virtchnl.h"
+#include <linux/avf/virtchnl.h>
#include "i40e_txrx.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
+/* VSI state flags shared with common code */
+enum i40evf_vsi_state_t {
+ __I40E_VSI_DOWN,
+ /* This must be last as it determines the size of the BITMAP */
+ __I40E_VSI_STATE_SIZE__,
+};
+
/* dummy struct to make common code less painful */
struct i40e_vsi {
struct i40evf_adapter *back;
@@ -56,10 +63,11 @@ struct i40e_vsi {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_VSI_STATE_SIZE__);
int base_vector;
u16 work_limit;
u16 qs_handle;
+ void *priv; /* client driver data reference. */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -71,10 +79,6 @@ struct i40e_vsi {
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
-
-/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_2048 2048
-#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
#define I40EVF_AQ_LEN 32
#define I40EVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
@@ -169,15 +173,15 @@ enum i40evf_state_t {
enum i40evf_critical_section_t {
__I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+ __I40EVF_IN_CLIENT_TASK,
};
-/* make common code happy */
-#define __I40E_DOWN __I40EVF_DOWN
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
struct work_struct reset_task;
struct work_struct adminq_task;
+ struct delayed_work client_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -195,14 +199,16 @@ struct i40evf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
+ int num_iwarp_msix;
+ int iwarp_base_vector;
+ u32 client_pending;
+ struct i40e_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IN_NETPOLL BIT(4)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
#define I40EVF_FLAG_RESET_PENDING BIT(9)
#define I40EVF_FLAG_RESET_NEEDED BIT(10)
@@ -210,15 +216,18 @@ struct i40evf_adapter {
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_PROMISC_ON BIT(15)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
+#define I40EVF_FLAG_PROMISC_ON BIT(18)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
+#define I40EVF_FLAG_LEGACY_RX BIT(20)
/* duplicates for common code */
-#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
-#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE
#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
+#define I40E_FLAG_LEGACY_RX I40EVF_FLAG_LEGACY_RX
/* flags for admin queue service task */
u32 aq_required;
#define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
@@ -245,7 +254,6 @@ struct i40evf_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
- struct net_device_stats net_stats;
struct i40e_hw hw; /* defined in i40e_type.h */
@@ -255,25 +263,26 @@ struct i40evf_adapter {
struct work_struct watchdog_task;
bool netdev_registered;
bool link_up;
- enum i40e_aq_link_speed link_speed;
- enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+ enum virtchnl_link_speed link_speed;
+ enum virtchnl_ops current_op;
+#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_IWARP : \
0)
+#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ)
#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
- (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
+ (VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
- struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
- struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
- struct i40e_virtchnl_version_info pf_version;
+ VIRTCHNL_VF_OFFLOAD_VLAN)
+ struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
u16 msg_enable;
@@ -291,6 +300,12 @@ struct i40evf_adapter {
/* Ethtool Private Flags */
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40evf_adapter *vf;
+};
+
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
extern const char i40evf_driver_version[];
@@ -333,7 +348,14 @@ void i40evf_set_hena(struct i40evf_adapter *adapter);
void i40evf_set_rss_key(struct i40evf_adapter *adapter);
void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40evf_adapter *adapter);
+int i40evf_lan_add_device(struct i40evf_adapter *adapter);
+int i40evf_lan_del_device(struct i40evf_adapter *adapter);
+void i40evf_client_subtask(struct i40evf_adapter *adapter);
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
+void i40evf_notify_client_open(struct i40e_vsi *vsi);
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
new file mode 100644
index 000000000000..93cf5fd17d91
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -0,0 +1,564 @@
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+#include "i40evf_client.h"
+
+static
+const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
+static struct i40e_client *vf_registered_client;
+static LIST_HEAD(i40evf_devices);
+static DEFINE_MUTEX(i40evf_device_mutex);
+
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static struct i40e_ops i40evf_lan_ops = {
+ .virtchnl_send = i40evf_client_virtchnl_send,
+ .setup_qvlist = i40evf_client_setup_qvlist,
+};
+
+/**
+ * i40evf_notify_client_message - call the client message receive callback
+ * @vsi: the VSI associated with this client
+ * @msg: message buffer
+ * @len: length of message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
+{
+ struct i40e_client_instance *cinst;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtchnl_receive function\n");
+ return;
+ }
+ cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
+ msg, len);
+}
+
+/**
+ * i40evf_notify_client_l2_params - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+
+ cinst = vsi->back->cinst;
+ memset(&params, 0, sizeof(params));
+ params.mtu = vsi->netdev->mtu;
+ params.link_up = vsi->back->link_up;
+ params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change function\n");
+ return;
+ }
+ cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
+ &params);
+}
+
+/**
+ * i40evf_notify_client_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40evf_notify_client_open(struct i40e_vsi *vsi)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+ int ret;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open function\n");
+ return;
+ }
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state))) {
+ ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+ }
+}
+
+/**
+ * i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * @ldev: pointer to L2 context.
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ I40E_SUCCESS, NULL, 0, NULL);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_notify_client_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_client_instance *cinst = adapter->cinst;
+
+ if (!cinst || !cinst->client || !cinst->client->ops ||
+ !cinst->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close function\n");
+ return;
+ }
+ cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+}
+
+/**
+ * i40evf_client_add_instance - add a client instance to the instance list
+ * @adapter: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cinst ptr on success, NULL on failure
+ **/
+static struct i40e_client_instance *
+i40evf_client_add_instance(struct i40evf_adapter *adapter)
+{
+ struct i40e_client_instance *cinst = NULL;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ int i;
+
+ if (!vf_registered_client)
+ goto out;
+
+ if (adapter->cinst) {
+ cinst = adapter->cinst;
+ goto out;
+ }
+
+ cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
+ if (!cinst)
+ goto out;
+
+ cinst->lan_info.vf = (void *)adapter;
+ cinst->lan_info.netdev = vsi->netdev;
+ cinst->lan_info.pcidev = adapter->pdev;
+ cinst->lan_info.fid = 0;
+ cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
+ cinst->lan_info.hw_addr = adapter->hw.hw_addr;
+ cinst->lan_info.ops = &i40evf_lan_ops;
+ cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
+ cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
+ cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
+
+ cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_entries =
+ &adapter->msix_entries[adapter->iwarp_base_vector];
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ cinst->lan_info.params.qos.prio_qos[i].tc = 0;
+ cinst->lan_info.params.qos.prio_qos[i].qs_handle =
+ vsi->qs_handle;
+ }
+
+ mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
+
+ cinst->client = vf_registered_client;
+ adapter->cinst = cinst;
+out:
+ return cinst;
+}
+
+/**
+ * i40evf_client_del_instance - removes a client instance from the list
+ * @adapter: pointer to the board struct
+ * @client: pointer to the client struct
+ *
+ **/
+static
+void i40evf_client_del_instance(struct i40evf_adapter *adapter)
+{
+ kfree(adapter->cinst);
+ adapter->cinst = NULL;
+}
+
+/**
+ * i40evf_client_subtask - client maintenance work
+ * @adapter: board private structure
+ **/
+void i40evf_client_subtask(struct i40evf_adapter *adapter)
+{
+ struct i40e_client *client = vf_registered_client;
+ struct i40e_client_instance *cinst;
+ int ret = 0;
+
+ if (adapter->state < __I40EVF_DOWN)
+ return;
+
+ /* first check client is registered */
+ if (!client)
+ return;
+
+ /* Add the client instance to the instance list */
+ cinst = i40evf_client_add_instance(adapter);
+ if (!cinst)
+ return;
+
+ dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
+ client->name);
+
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ /* Send an Open request to the client */
+
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cinst->lan_info, client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cinst->state);
+ else
+ /* remove client instance */
+ i40evf_client_del_instance(adapter);
+ }
+}
+
+/**
+ * i40evf_lan_add_device - add a lan device struct to the list of lan devices
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40evf_lan_add_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->vf = adapter;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40evf_devices);
+ dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+
+out:
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_lan_del_device - removes a lan device from the device list
+ * @adapter: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_lan_del_device(struct i40evf_adapter *adapter)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
+ if (ldev->vf == adapter) {
+ dev_info(&adapter->pdev->dev,
+ "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
+ adapter->hw.bus.bus_id, adapter->hw.bus.device,
+ adapter->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40evf_device_mutex);
+ return ret;
+}
+
+/**
+ * i40evf_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cinst;
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ cinst = adapter->cinst;
+ if (!cinst)
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state)) {
+ if (client->ops && client->ops->close)
+ client->ops->close(&cinst->lan_info, client,
+ false);
+ i40evf_client_release_qvlist(&cinst->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
+
+ dev_warn(&adapter->pdev->dev,
+ "Client %s instance closed\n", client->name);
+ }
+ /* delete the client instance */
+ i40evf_client_del_instance(adapter);
+ dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ **/
+static void i40evf_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40evf_adapter *adapter;
+
+ mutex_lock(&i40evf_device_mutex);
+ list_for_each_entry(ldev, &i40evf_devices, list) {
+ adapter = ldev->vf;
+ /* Signal the watchdog to service the client */
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
+ mutex_unlock(&i40evf_device_mutex);
+}
+
+/**
+ * i40evf_client_virtchnl_send - send a message to the PF instance
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len)
+{
+ struct i40evf_adapter *adapter = ldev->vf;
+ i40e_status err;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+ I40E_SUCCESS, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct i40evf_adapter *adapter = ldev->vf;
+ struct i40e_qv_info *qv_info;
+ i40e_status err;
+ u32 v_idx, i;
+ u32 msg_size;
+
+ if (adapter->aq_required)
+ return -EAGAIN;
+
+ /* A quick check on whether the vectors belong to the client */
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if ((v_idx >=
+ (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
+ (v_idx < adapter->iwarp_base_vector))
+ return -EINVAL;
+ }
+
+ v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ msg_size = sizeof(struct virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct virtchnl_iwarp_qv_info) *
+ (v_qvlist_info->num_vectors - 1));
+
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ err = i40e_aq_send_msg_to_pf(&adapter->hw,
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ err, adapter->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ err = -EBUSY;
+ for (i = 0; i < 5; i++) {
+ msleep(100);
+ if (!(adapter->client_pending &
+ BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ err = 0;
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+/**
+ * i40evf_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40evf: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (vf_registered_client) {
+ pr_info("i40evf: Client %s has already been registered!\n",
+ client->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
+ pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40evf_client_interface_version_str);
+ ret = -EIO;
+ goto out;
+ }
+
+ vf_registered_client = client;
+
+ i40evf_client_prepare(client);
+
+ pr_info("i40evf: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_register_client);
+
+/**
+ * i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40evf_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ i40evf_client_release(client);
+
+ if (vf_registered_client != client) {
+ pr_info("i40evf: Client %s has not been registered\n",
+ client->name);
+ ret = -ENODEV;
+ goto out;
+ }
+ vf_registered_client = NULL;
+ pr_info("i40evf: Unregistered client %s\n", client->name);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40evf_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
new file mode 100644
index 000000000000..7d283c7506a5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -0,0 +1,166 @@
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40EVF_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40EVF_CLIENT_VERSION_MAJOR 0
+#define I40EVF_CLIENT_VERSION_MINOR 01
+#define I40EVF_CLIENT_VERSION_BUILD 00
+#define I40EVF_CLIENT_VERSION_STR \
+ __stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
+ __stringify(I40EVF_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+ u16 link_up; /* boolean */
+};
+
+/* Structure to hold LAN device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *vf; /* cast to i40evf_adapter */
+
+ /* All L2 params that could change during the life span of the device
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+};
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ u32 (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever the driver is
+ * ready to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be closed when netdev is unavailable or when unregister
+ * call comes in. If the close happens due to a reset, set the reset
+ * bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mss */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ /* called when a message is received from the PF */
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u8 *msg, u16 len);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40EVF_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ u8 type;
+#define I40E_CLIENT_IWARP 0
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+/* used by clients */
+int i40evf_register_client(struct i40e_client *client);
+int i40evf_unregister_client(struct i40e_client *client);
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 272d600c1ed0..9bb2cc7dd4e4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,52 +63,74 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+/* For now we have one and only one private flag and it is only defined
+ * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
+ * of leaving all this code sitting around empty we will strip it unless
+ * our one private flag is actually available.
+ */
+struct i40evf_priv_flags {
+ char flag_string[ETH_GSTRING_LEN];
+ u32 flag;
+ bool read_only;
+};
+
+#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
+ .flag_string = _name, \
+ .flag = _flag, \
+ .read_only = _read_only, \
+}
+
+static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
+ I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
+
/**
- * i40evf_get_settings - Get Link Speed and Duplex settings
+ * i40evf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @cmd: ethtool command
*
* Reports speed/duplex settings. Because this is a VF, we don't know what
* kind of link we really have, so we fake it.
**/
-static int i40evf_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int i40evf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- ecmd->supported = 0;
- ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->transceiver = XCVR_DUMMY1;
- ecmd->port = PORT_NONE;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
/* Set speed and duplex */
switch (adapter->link_speed) {
case I40E_LINK_SPEED_40GB:
- ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ cmd->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
#ifdef SPEED_25000
- ethtool_cmd_speed_set(ecmd, SPEED_25000);
+ cmd->base.speed = SPEED_25000;
#else
netdev_info(netdev,
"Speed is 25G, display not supported by this version of ethtool.\n");
#endif
break;
case I40E_LINK_SPEED_20GB:
- ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ cmd->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ cmd->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ cmd->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
break;
default:
break;
}
- ecmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
return 0;
}
@@ -125,6 +147,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
+ else if (sset == ETH_SS_PRIV_FLAGS)
+ return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -190,7 +214,83 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
+ } else if (sset == ETH_SS_PRIV_FLAGS) {
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40evf_gstrings_priv_flags[i].flag_string);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 i, ret_flags = 0;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->flag & adapter->flags)
+ ret_flags |= BIT(i);
+ }
+
+ return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u64 changed_flags;
+ u32 i;
+
+ changed_flags = adapter->flags;
+
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ const struct i40evf_priv_flags *priv_flags;
+
+ priv_flags = &i40evf_gstrings_priv_flags[i];
+
+ if (priv_flags->read_only)
+ continue;
+
+ if (flags & BIT(i))
+ adapter->flags |= priv_flags->flag;
+ else
+ adapter->flags &= ~(priv_flags->flag);
+ }
+
+ /* check for flags that changed */
+ changed_flags ^= adapter->flags;
+
+ /* Process any additional changes needed as a result of flag changes. */
+
+ /* issue a reset to force legacy-rx change to take effect */
+ if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
+ if (netif_running(netdev)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
}
+
+ return 0;
}
/**
@@ -239,6 +339,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -643,7 +744,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops i40evf_ethtool_ops = {
- .get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = i40evf_get_ringparam,
@@ -651,6 +751,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
+ .get_priv_flags = i40evf_get_priv_flags,
+ .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
@@ -663,6 +765,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
.get_rxfh_key_size = i40evf_get_rxfh_key_size,
+ .get_link_ksettings = i40evf_get_link_ksettings,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index c0fc53361800..7c213a347909 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -26,6 +26,14 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
+/* All i40evf tracepoints are defined by the include below, which must
+ * be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
+
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
@@ -36,9 +44,9 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MAJOR 3
+#define DRV_VERSION_MINOR 0
+#define DRV_VERSION_BUILD 0
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -59,7 +67,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_ADAPTIVE_VF), 0},
/* required last entry */
{0, }
};
@@ -490,7 +498,7 @@ static void i40evf_netpoll(struct net_device *netdev)
int i;
/* if interface is down do nothing */
- if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+ if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
return;
for (i = 0; i < q_vectors; i++)
@@ -686,12 +694,39 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
**/
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
+ unsigned int rx_buf_len = I40E_RXBUFFER_2048;
struct i40e_hw *hw = &adapter->hw;
int i;
+ /* Legacy Rx will always default to a 2048 buffer size. */
+#if (PAGE_SIZE < 8192)
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX)) {
+ struct net_device *netdev = adapter->netdev;
+
+ /* For jumbo frames on systems with 4K pages we have to use
+ * an order 1 page, so we might as well increase the size
+ * of our Rx buffer to make better use of the available space
+ */
+ rx_buf_len = I40E_RXBUFFER_3072;
+
+ /* We use a 1536 buffer size for configurations with
+ * standard Ethernet mtu. On x86 this gives us enough room
+ * for shared info and 192 bytes of padding.
+ */
+ if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+ (netdev->mtu <= ETH_DATA_LEN))
+ rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ }
+#endif
+
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
+ adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+
+ if (adapter->flags & I40EVF_FLAG_LEGACY_RX)
+ clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
+ else
+ set_ring_build_skb_enabled(&adapter->rx_rings[i]);
}
}
@@ -1054,11 +1089,13 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
- clear_bit(__I40E_DOWN, &adapter->vsi.state);
+ clear_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_napi_enable_all(adapter);
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_OPEN;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
}
@@ -1095,7 +1132,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
@@ -1161,6 +1198,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
{
if (!adapter->vsi_res)
return;
+ adapter->num_active_queues = 0;
kfree(adapter->tx_rings);
adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
@@ -1177,18 +1215,22 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
**/
static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
- int i;
+ int i, num_active_queues;
+
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
- adapter->tx_rings = kcalloc(adapter->num_active_queues,
+ adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
- adapter->rx_rings = kcalloc(adapter->num_active_queues,
+ adapter->rx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (i = 0; i < num_active_queues; i++) {
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
@@ -1210,6 +1252,8 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
+ adapter->num_active_queues = num_active_queues;
+
return 0;
err_out:
@@ -1236,13 +1280,13 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
}
pairs = adapter->num_active_queues;
- /* It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
+ * us much good if we have more vectors than CPUs. However, we already
+ * limit the total number of queues by the number of CPUs so we do not
+ * need any further limiting here.
*/
- v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
- v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
+ v_budget = min_t(int, pairs + NONQ_VECS,
+ (int)adapter->vf_res->max_vectors);
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
@@ -1275,7 +1319,7 @@ static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
adapter->current_op);
@@ -1374,7 +1418,7 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
else
adapter->hena = I40E_DEFAULT_RSS_HENA;
@@ -1473,6 +1517,13 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
{
int err;
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
rtnl_lock();
err = i40evf_set_interrupt_capability(adapter);
rtnl_unlock();
@@ -1489,23 +1540,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = i40evf_alloc_queues(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
return 0;
-err_alloc_queues:
- i40evf_free_q_vectors(adapter);
err_alloc_q_vectors:
i40evf_reset_interrupt_capability(adapter);
err_set_interrupt:
+ i40evf_free_queues(adapter);
+err_alloc_queues:
return err;
}
@@ -1552,8 +1596,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((reg_val == I40E_VFR_VFACTIVE) ||
- (reg_val == I40E_VFR_COMPLETED)) {
+ if ((reg_val == VIRTCHNL_VFR_VFACTIVE) ||
+ (reg_val == VIRTCHNL_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
@@ -1569,7 +1613,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
return;
}
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1585,7 +1629,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_required = 0;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1671,13 +1715,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
- i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ i40evf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
goto watchdog_done;
}
@@ -1686,6 +1730,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+ schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -1717,7 +1762,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev);
adapter->link_up = false;
@@ -1774,10 +1819,17 @@ static void i40evf_reset_task(struct work_struct *work)
u32 reg_val;
int i = 0, err;
- while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
&adapter->crit_section))
usleep_range(500, 1000);
-
+ if (CLIENT_ENABLED(adapter)) {
+ adapter->flags &= ~(I40EVF_FLAG_CLIENT_NEEDS_OPEN |
+ I40EVF_FLAG_CLIENT_NEEDS_CLOSE |
+ I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
+ I40EVF_FLAG_SERVICE_CLIENT_REQUESTED);
+ cancel_delayed_work_sync(&adapter->client_task);
+ i40evf_notify_client_close(&adapter->vsi, true);
+ }
i40evf_misc_irq_disable(adapter);
if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED;
@@ -1810,7 +1862,7 @@ static void i40evf_reset_task(struct work_struct *work)
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (reg_val == I40E_VFR_VFACTIVE)
+ if (reg_val == VIRTCHNL_VFR_VFACTIVE)
break;
}
@@ -1820,6 +1872,7 @@ static void i40evf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
i40evf_disable_vf(adapter);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -1843,7 +1896,7 @@ continue_reset:
/* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
@@ -1862,9 +1915,8 @@ continue_reset:
}
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- /* Open RDMA Client again */
- adapter->aq_required |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -1905,7 +1957,7 @@ static void i40evf_adminq_task(struct work_struct *work)
container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- struct i40e_virtchnl_msg *v_msg;
+ struct virtchnl_msg *v_msg;
i40e_status ret;
u32 val, oldval;
u16 pending;
@@ -1918,14 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work)
if (!event.msg_buf)
goto out;
- v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
- v_msg->v_retval, event.msg_buf,
+ (i40e_status)v_msg->v_retval,
+ event.msg_buf,
event.msg_len);
if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
@@ -1981,6 +2034,48 @@ out:
}
/**
+ * i40evf_client_task - worker thread to perform client work
+ * @work: pointer to work_struct containing our data
+ *
+ * This task handles client interactions. Because client calls can be
+ * reentrant, we can't handle them in the watchdog.
+ **/
+static void i40evf_client_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, client_task.work);
+
+ /* If we can't get the client bit, just give up. We'll be rescheduled
+ * later.
+ */
+
+ if (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section))
+ return;
+
+ if (adapter->flags & I40EVF_FLAG_SERVICE_CLIENT_REQUESTED) {
+ i40evf_client_subtask(adapter);
+ adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
+ i40evf_notify_client_close(&adapter->vsi, false);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
+ i40evf_notify_client_open(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
+ goto out;
+ }
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ }
+out:
+ clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+}
+
+/**
* i40evf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
@@ -2148,31 +2243,23 @@ static int i40evf_close(struct net_device *netdev)
return 0;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+ if (CLIENT_ENABLED(adapter))
+ adapter->flags |= I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
i40evf_down(adapter);
adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
+ /* We explicitly don't free resources here because the hardware is
+ * still active and can DMA into memory. Resources are cleared in
+ * i40evf_virtchnl_completion() after we get confirmation from the PF
+ * driver that the rings have been stopped.
+ */
return 0;
}
/**
- * i40evf_get_stats - Get System Network Statistics
- * @netdev: network interface device structure
- *
- * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
- **/
-static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
-
- /* only return the current stats */
- return &adapter->net_stats;
-}
-
-/**
* i40evf_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2184,6 +2271,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
struct i40evf_adapter *adapter = netdev_priv(netdev);
netdev->mtu = new_mtu;
+ if (CLIENT_ENABLED(adapter)) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
+ }
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -2265,7 +2356,7 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev);
features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ if (adapter->vf_res->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
features |= I40EVF_VLAN_FEATURES;
return features;
}
@@ -2274,7 +2365,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
.ndo_start_xmit = i40evf_xmit_frame,
- .ndo_get_stats = i40evf_get_stats,
.ndo_set_rx_mode = i40evf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40evf_set_mac,
@@ -2303,8 +2393,8 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
for (i = 0; i < 100; i++) {
rstat = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if ((rstat == I40E_VFR_VFACTIVE) ||
- (rstat == I40E_VFR_COMPLETED))
+ if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
+ (rstat == VIRTCHNL_VFR_COMPLETED))
return 0;
usleep_range(10, 20);
}
@@ -2320,14 +2410,16 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
int i;
+ netdev_features_t hw_enc_features;
+ netdev_features_t hw_features;
/* got VF config message back from PF, now we can parse it */
for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
@@ -2335,46 +2427,52 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
return -ENODEV;
}
- netdev->hw_enc_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HIGHDMA |
- NETIF_F_SOFT_FEATURES |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
+ hw_enc_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ /* advertise to stack only if offloads for encapsulated packets is
+ * supported
+ */
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
+ hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6 |
- NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL |
- NETIF_F_SCTP_CRC |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM |
0;
- if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
- netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ if (!(vfres->vf_offload_flags &
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
+ netdev->gso_partial_features |=
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= hw_enc_features;
+ }
/* record features VLANs can make use of */
- netdev->vlan_features |= netdev->hw_enc_features |
- NETIF_F_TSO_MANGLEID;
+ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
/* Write features and hw_features separately to avoid polluting
- * with, or dropping, features that are set when we registgered.
+ * with, or dropping, features that are set when we registered.
*/
- netdev->hw_features |= netdev->hw_enc_features;
+ hw_features = hw_enc_features;
- netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
- netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->hw_features |= hw_features;
- /* disable VLAN features if not supported */
- if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
- netdev->features ^= I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2383,7 +2481,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vfres->vf_offload_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
adapter->rss_key_size = vfres->rss_key_size;
adapter->rss_lut_size = vfres->rss_lut_size;
} else {
@@ -2469,8 +2567,8 @@ static void i40evf_init_task(struct work_struct *work)
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
adapter->pf_version.major,
adapter->pf_version.minor,
- I40E_VIRTCHNL_VERSION_MAJOR,
- I40E_VIRTCHNL_VERSION_MINOR);
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2484,9 +2582,9 @@ static void i40evf_init_task(struct work_struct *work)
case __I40EVF_INIT_GET_RESOURCES:
/* aq msg sent, awaiting reply */
if (!adapter->vf_res) {
- bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource));
+ sizeof(struct virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
if (!adapter->vf_res)
goto err;
@@ -2515,12 +2613,9 @@ static void i40evf_init_task(struct work_struct *work)
goto err_alloc;
}
- if (hw->mac.type == I40E_MAC_X722_VF)
- adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
-
if (i40evf_process_config(adapter))
goto err_alloc;
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
@@ -2548,9 +2643,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
- adapter->num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
@@ -2558,7 +2650,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
err = i40evf_request_misc_irq(adapter);
@@ -2577,13 +2669,19 @@ static void i40evf_init_task(struct work_struct *work)
adapter->netdev_registered = true;
netif_tx_stop_all_queues(netdev);
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_add_device(adapter);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
+ err);
+ }
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
adapter->state = __I40EVF_DOWN;
- set_bit(__I40E_DOWN, &adapter->vsi.state);
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
@@ -2727,6 +2825,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ hw->bus.bus_id = pdev->bus->number;
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -2740,6 +2839,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->client_task, i40evf_client_task);
INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
schedule_delayed_work(&adapter->init_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
@@ -2852,14 +2952,21 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40evf_mac_filter *f, *ftmp;
struct i40e_hw *hw = &adapter->hw;
+ int err;
cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
-
+ cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+ if (CLIENT_ALLOWED(adapter)) {
+ err = i40evf_lan_del_device(adapter);
+ if (err)
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ err);
+ }
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
@@ -2871,7 +2978,8 @@ static void i40evf_remove(struct pci_dev *pdev)
i40evf_request_reset(adapter);
msleep(50);
}
-
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
i40evf_misc_irq_disable(adapter);
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 2059a8e88908..d2bb250a71af 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -26,6 +26,7 @@
#include "i40evf.h"
#include "i40e_prototype.h"
+#include "i40evf_client.h"
/* busy wait delay in msec */
#define I40EVF_BUSY_WAIT_DELAY 10
@@ -41,7 +42,7 @@
* Send message to PF and print status if failure.
**/
static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+ enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
@@ -67,12 +68,12 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
**/
int i40evf_send_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info vvi;
+ struct virtchnl_version_info vvi;
- vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
- vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ vvi.major = VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = VIRTCHNL_VERSION_MINOR;
- return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
sizeof(vvi));
}
@@ -87,10 +88,10 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
**/
int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_version_info *pf_vvi;
+ struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
@@ -108,8 +109,8 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_VERSION)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_VERSION)
break;
}
@@ -118,19 +119,19 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
- if (op != I40E_VIRTCHNL_OP_VERSION) {
+ if (op != VIRTCHNL_OP_VERSION) {
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
op);
err = -EIO;
goto out_alloc;
}
- pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
adapter->pf_version = *pf_vvi;
- if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
+ if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
+ ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
+ (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
err = -EIO;
out_alloc:
@@ -151,24 +152,25 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
{
u32 caps;
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
- adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
- caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
-
- adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+ caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN |
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+ VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
+ VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps));
else
return i40evf_send_pf_msg(adapter,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
}
@@ -186,12 +188,12 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
- enum i40e_virtchnl_ops op;
+ enum virtchnl_ops op;
i40e_status err;
u16 len;
- len = sizeof(struct i40e_virtchnl_vf_resource) +
- I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ len = sizeof(struct virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf) {
@@ -207,8 +209,8 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter)
if (err)
goto out_alloc;
op =
- (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES)
+ (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
+ if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
break;
}
@@ -230,24 +232,29 @@ out:
**/
void i40evf_configure_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vsi_queue_config_info *vqci;
- struct i40e_virtchnl_queue_pair_info *vqpi;
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
int pairs = adapter->num_active_queues;
- int i, len;
+ int i, len, max_frame = I40E_MAX_RXBUFFER;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
- (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct virtchnl_vsi_queue_config_info) +
+ (sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_KERNEL);
if (!vqci)
return;
+ /* Limit maximum frame size when jumbo frames is not enabled */
+ if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
+ (adapter->netdev->mtu <= ETH_DATA_LEN))
+ max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -259,22 +266,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
- vqpi->txq.headwb_enabled = 1;
- vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
- (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
-
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
- vqpi->rxq.max_pkt_size = adapter->netdev->mtu
- + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
- vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ vqpi->rxq.max_pkt_size = max_frame;
+ vqpi->rxq.databuffer_size =
+ ALIGN(adapter->rx_rings[i].rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
vqpi++;
}
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
kfree(vqci);
}
@@ -287,20 +291,20 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
**/
void i40evf_enable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -312,20 +316,20 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
**/
void i40evf_disable_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
vqs.vsi_id = adapter->vsi_res->vsi_id;
vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
vqs.rx_queues = vqs.tx_queues;
adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@@ -338,23 +342,23 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
**/
void i40evf_map_queues(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_irq_map_info *vimi;
+ struct virtchnl_irq_map_info *vimi;
int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ len = sizeof(struct virtchnl_irq_map_info) +
(adapter->num_msix_vectors *
- sizeof(struct i40e_virtchnl_vector_map));
+ sizeof(struct virtchnl_vector_map));
vimi = kzalloc(len, GFP_KERNEL);
if (!vimi)
return;
@@ -375,7 +379,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->vecmap[v_idx].rxq_map = 0;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vimi, len);
kfree(vimi);
}
@@ -390,12 +394,12 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
**/
void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
int len, i = 0, count = 0;
struct i40evf_mac_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
adapter->current_op);
@@ -409,17 +413,17 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
@@ -440,7 +444,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -455,12 +459,12 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
**/
void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_ether_addr_list *veal;
+ struct virtchnl_ether_addr_list *veal;
struct i40evf_mac_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
adapter->current_op);
@@ -474,17 +478,17 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+ adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_ether_addr_list)) /
- sizeof(struct i40e_virtchnl_ether_addr);
- len = sizeof(struct i40e_virtchnl_ether_addr_list) +
- (count * sizeof(struct i40e_virtchnl_ether_addr));
+ sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr);
+ len = sizeof(struct virtchnl_ether_addr_list) +
+ (count * sizeof(struct virtchnl_ether_addr));
more = true;
}
veal = kzalloc(len, GFP_KERNEL);
@@ -505,7 +509,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
(u8 *)veal, len);
kfree(veal);
}
@@ -520,12 +524,12 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
**/
void i40evf_add_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct i40evf_vlan_filter *f;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
adapter->current_op);
@@ -540,16 +544,16 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -570,7 +574,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -584,12 +588,12 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
**/
void i40evf_del_vlans(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct virtchnl_vlan_filter_list *vvfl;
struct i40evf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
adapter->current_op);
@@ -604,16 +608,16 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
count = (I40EVF_MAX_AQ_BUF_SIZE -
- sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(struct virtchnl_vlan_filter_list)) /
sizeof(u16);
- len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ len = sizeof(struct virtchnl_vlan_filter_list) +
(count * sizeof(u16));
more = true;
}
@@ -635,7 +639,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
}
if (!more)
adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
kfree(vvfl);
}
@@ -648,25 +652,25 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
**/
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
- struct i40e_virtchnl_promisc_info vpi;
+ struct virtchnl_promisc_info vpi;
int promisc_all;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
adapter->current_op);
return;
}
- promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
- I40E_FLAG_VF_MULTICAST_PROMISC;
+ promisc_all = FLAG_VF_UNICAST_PROMISC |
+ FLAG_VF_MULTICAST_PROMISC;
if ((flags & promisc_all) == promisc_all) {
adapter->flags |= I40EVF_FLAG_PROMISC_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
- if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
@@ -678,10 +682,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
}
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
(u8 *)&vpi, sizeof(vpi));
}
@@ -693,19 +697,19 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
**/
void i40evf_request_stats(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_queue_select vqs;
+ struct virtchnl_queue_select vqs;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* no error message, this isn't crucial */
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
- if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs)))
/* if the request failed, don't lock out others */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -716,15 +720,15 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
**/
void i40evf_get_hena(struct i40evf_adapter *adapter)
{
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
adapter->current_op);
return;
}
- adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
NULL, 0);
}
@@ -736,18 +740,18 @@ void i40evf_get_hena(struct i40evf_adapter *adapter)
**/
void i40evf_set_hena(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_hena vrh;
+ struct virtchnl_rss_hena vrh;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
adapter->current_op);
return;
}
vrh.hena = adapter->hena;
- adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&vrh, sizeof(vrh));
}
@@ -759,16 +763,16 @@ void i40evf_set_hena(struct i40evf_adapter *adapter)
**/
void i40evf_set_rss_key(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_key *vrk;
+ struct virtchnl_rss_key *vrk;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_key) +
+ len = sizeof(struct virtchnl_rss_key) +
(adapter->rss_key_size * sizeof(u8)) - 1;
vrk = kzalloc(len, GFP_KERNEL);
if (!vrk)
@@ -777,9 +781,9 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
vrk->key_len = adapter->rss_key_size;
memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)vrk, len);
kfree(vrk);
}
@@ -792,16 +796,16 @@ void i40evf_set_rss_key(struct i40evf_adapter *adapter)
**/
void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
{
- struct i40e_virtchnl_rss_lut *vrl;
+ struct virtchnl_rss_lut *vrl;
int len;
- if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
adapter->current_op);
return;
}
- len = sizeof(struct i40e_virtchnl_rss_lut) +
+ len = sizeof(struct virtchnl_rss_lut) +
(adapter->rss_lut_size * sizeof(u8)) - 1;
vrl = kzalloc(len, GFP_KERNEL);
if (!vrl)
@@ -809,9 +813,9 @@ void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
vrl->vsi_id = adapter->vsi.id;
vrl->lut_entries = adapter->rss_lut_size;
memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
- adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)vrl, len);
kfree(vrl);
}
@@ -867,8 +871,8 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
void i40evf_request_reset(struct i40evf_adapter *adapter)
{
/* Don't check CURRENT_OP - this is always higher priority */
- i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
/**
@@ -884,17 +888,17 @@ void i40evf_request_reset(struct i40evf_adapter *adapter)
* This function handles the reply messages.
**/
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
- enum i40e_virtchnl_ops v_opcode,
+ enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
- if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
- struct i40e_virtchnl_pf_event *vpe =
- (struct i40e_virtchnl_pf_event *)msg;
+ if (v_opcode == VIRTCHNL_OP_EVENT) {
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)msg;
switch (vpe->event) {
- case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
vpe->event_data.link_event.link_speed;
if (adapter->link_up !=
@@ -911,7 +915,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_print_link_message(adapter);
}
break;
- case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ case VIRTCHNL_EVENT_RESET_IMPENDING:
dev_info(&adapter->pdev->dev, "PF reset warning received\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
@@ -928,19 +932,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
if (v_retval) {
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_ADD_VLAN:
dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
@@ -952,27 +956,27 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
}
switch (v_opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS: {
+ case VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;
- adapter->net_stats.rx_packets = stats->rx_unicast +
- stats->rx_multicast +
- stats->rx_broadcast;
- adapter->net_stats.tx_packets = stats->tx_unicast +
- stats->tx_multicast +
- stats->tx_broadcast;
- adapter->net_stats.rx_bytes = stats->rx_bytes;
- adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_discards;
- adapter->net_stats.tx_dropped = stats->tx_discards;
+ netdev->stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ netdev->stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ netdev->stats.rx_bytes = stats->rx_bytes;
+ netdev->stats.tx_bytes = stats->tx_bytes;
+ netdev->stats.tx_errors = stats->tx_errors;
+ netdev->stats.rx_dropped = stats->rx_discards;
+ netdev->stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
- u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+ case VIRTCHNL_OP_GET_VF_RESOURCES: {
+ u16 len = sizeof(struct virtchnl_vf_resource) +
I40E_MAX_VF_VSI *
- sizeof(struct i40e_virtchnl_vsi_resource);
+ sizeof(struct virtchnl_vsi_resource);
memcpy(adapter->vf_res, msg, min(msglen, len));
i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
/* restore current mac address */
@@ -980,18 +984,18 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_process_config(adapter);
}
break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
if (adapter->state == __I40EVF_DOWN_PENDING)
adapter->state = __I40EVF_DOWN;
break;
- case I40E_VIRTCHNL_OP_VERSION:
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
/* Don't display an error if we get these out of sequence.
* If the firmware needed to get kicked, we'll get these and
* it's no problem.
@@ -999,9 +1003,22 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
- struct i40e_virtchnl_rss_hena *vrh =
- (struct i40e_virtchnl_rss_hena *)msg;
+ case VIRTCHNL_OP_IWARP:
+ /* Gobble zero-length replies from the PF. They indicate that
+ * a previous message was received OK, and the client doesn't
+ * care about that.
+ */
+ if (msglen && CLIENT_ENABLED(adapter))
+ i40evf_notify_client_message(&adapter->vsi,
+ msg, msglen);
+ break;
+
+ case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ adapter->client_pending &=
+ ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena;
else
@@ -1010,10 +1027,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
default:
- if (v_opcode != adapter->current_op)
+ if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
adapter->current_op, v_opcode);
break;
} /* switch v_opcode */
- adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}