diff options
Diffstat (limited to 'drivers')
124 files changed, 6205 insertions, 2715 deletions
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7fde4d5c2b28..3c45358844eb 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info, static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { - marker = NULL; - port = NULL; /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */ } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e1ccefce9a9d..0c627b4733ca 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3779,7 +3779,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) struct slave *slave; struct list_head *iter; struct bond_up_slave *new_arr, *old_arr; - int slaves_in_agg; int agg_id = 0; int ret = 0; @@ -3810,7 +3809,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) } goto out; } - slaves_in_agg = ad_info.ports; agg_id = ad_info.aggregator_id; } bond_for_each_slave(bond, slave, iter) { diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7ad0a4d8e475..4c483d937481 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -46,13 +46,13 @@ config NET_DSA_MV88E6171 ethernet switches chips. config NET_DSA_MV88E6352 - tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support" + tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX select NET_DSA_TAG_EDSA ---help--- - This enables support for the Marvell 88E6172, 88E6176 and 88E6352 - ethernet switch chips. + This enables support for the Marvell 88E6172, 88E6176, 88E6320, + 88E6321 and 88E6352 ethernet switch chips. config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 079897b3a955..289e20443d83 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -901,15 +901,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, struct fixed_phy_status *status) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - u32 duplex, pause, speed; + u32 duplex, pause; u32 reg; duplex = core_readl(priv, CORE_DUPSTS); pause = core_readl(priv, CORE_PAUSESTS); - speed = core_readl(priv, CORE_SPDSTS); - - speed >>= (port * SPDSTS_SHIFT); - speed &= SPDSTS_MASK; status->link = 0; @@ -944,18 +940,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, reg &= ~LINK_STS; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); - switch (speed) { - case SPDSTS_10: - status->speed = SPEED_10; - break; - case SPDSTS_100: - status->speed = SPEED_100; - break; - case SPDSTS_1000: - status->speed = SPEED_1000; - break; - } - if ((pause & (1 << port)) && (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { status->asym_pause = 1; diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index 632815c10a40..cfece5ae9d5f 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr) return "Marvell 88E6172"; if ((ret & 0xfff0) == PORT_SWITCH_ID_6176) return "Marvell 88E6176"; + if (ret == PORT_SWITCH_ID_6320_A1) + return "Marvell 88E6320 (A1)"; + if (ret == PORT_SWITCH_ID_6320_A2) + return "Marvell 88e6320 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6320) + return "Marvell 88E6320"; + if (ret == PORT_SWITCH_ID_6321_A1) + return "Marvell 88E6321 (A1)"; + if (ret == PORT_SWITCH_ID_6321_A2) + return "Marvell 88e6321 (A2)"; + if ((ret & 0xfff0) == PORT_SWITCH_ID_6321) + return "Marvell 88E6321"; if (ret == PORT_SWITCH_ID_6352_A0) return "Marvell 88E6352 (A0)"; if (ret == PORT_SWITCH_ID_6352_A1) @@ -84,11 +96,12 @@ static int mv88e6352_setup_global(struct dsa_switch *ds) static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) { + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; int ret; *temp = 0; - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27); + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27); if (ret < 0) return ret; @@ -99,11 +112,12 @@ static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp) static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) { + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; int ret; *temp = 0; - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); if (ret < 0) return ret; @@ -114,23 +128,25 @@ static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp) static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp) { + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; int ret; - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); if (ret < 0) return ret; temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - return mv88e6xxx_phy_page_write(ds, 0, 6, 26, + return mv88e6xxx_phy_page_write(ds, phy, 6, 26, (ret & 0xe0ff) | (temp << 8)); } static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm) { + int phy = mv88e6xxx_6320_family(ds) ? 3 : 0; int ret; *alarm = false; - ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26); + ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26); if (ret < 0) return ret; @@ -394,5 +410,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .fdb_getnext = mv88e6xxx_port_fdb_getnext, }; -MODULE_ALIAS("platform:mv88e6352"); MODULE_ALIAS("platform:mv88e6172"); +MODULE_ALIAS("platform:mv88e6176"); +MODULE_ALIAS("platform:mv88e6320"); +MODULE_ALIAS("platform:mv88e6321"); +MODULE_ALIAS("platform:mv88e6352"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 561342466076..5158375b7abd 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -517,6 +517,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds) return false; } +bool mv88e6xxx_6320_family(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + switch (ps->id) { + case PORT_SWITCH_ID_6320: + case PORT_SWITCH_ID_6321: + return true; + } + return false; +} + static bool mv88e6xxx_6351_family(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); @@ -565,7 +577,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) { int ret; - if (mv88e6xxx_6352_family(ds)) + if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds)) port = (port + 1) << 5; /* Snapshot the hardware statistics counters for this port. */ @@ -1377,7 +1389,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || - mv88e6xxx_6065_family(ds)) { + mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) { /* MAC Forcing register: don't force link, speed, * duplex or flow control state to any particular * values on physical ports, but force the CPU port @@ -1423,7 +1435,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_IGMP_MLD_SNOOP | PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | PORT_CONTROL_STATE_FORWARDING; @@ -1431,7 +1443,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) reg |= PORT_CONTROL_DSA_TAG; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else @@ -1441,14 +1454,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6185_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) { if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) { + mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || + mv88e6xxx_6320_family(ds)) { if (ds->dsa_port_mask & (1 << port)) reg |= PORT_CONTROL_FRAME_MODE_DSA; if (port == dsa_upstream_port(ds)) @@ -1473,11 +1487,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds)) + mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds)) + mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds)) reg |= PORT_CONTROL_2_JUMBO_10240; if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) { @@ -1514,7 +1528,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Do not limit the period of time that this port can * be paused for by the remote end or the period of * time that this port can pause the remote end. @@ -1564,7 +1579,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Rate Control: disable ingress rate limiting. */ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL, 0x0001); @@ -1976,7 +1992,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT)); if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) { + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ @@ -1995,7 +2012,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) { + mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) || + mv88e6xxx_6320_family(ds)) { /* Disable ingress rate limiting by resetting all * ingress rate limit registers to their initial * state. diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index a650b2656de9..64786cb89a93 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -89,7 +89,12 @@ #define PORT_SWITCH_ID_6182 0x1a60 #define PORT_SWITCH_ID_6185 0x1a70 #define PORT_SWITCH_ID_6240 0x2400 -#define PORT_SWITCH_ID_6320 0x1250 +#define PORT_SWITCH_ID_6320 0x1150 +#define PORT_SWITCH_ID_6320_A1 0x1151 +#define PORT_SWITCH_ID_6320_A2 0x1152 +#define PORT_SWITCH_ID_6321 0x3100 +#define PORT_SWITCH_ID_6321_A1 0x3101 +#define PORT_SWITCH_ID_6321_A2 0x3102 #define PORT_SWITCH_ID_6350 0x3710 #define PORT_SWITCH_ID_6351 0x3750 #define PORT_SWITCH_ID_6352 0x3520 @@ -410,6 +415,7 @@ int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); +bool mv88e6xxx_6320_family(struct dsa_switch *ds); extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; extern struct dsa_switch_driver mv88e6352_switch_driver; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cd4ae76bbff2..a1f9785f0209 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1,6 +1,8 @@ -/* bnx2x.h: Broadcom Everest network driver. +/* bnx2x.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,7 +32,7 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.710.51-0" +#define DRV_MODULE_VERSION "1.712.30-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -1422,6 +1424,7 @@ enum { SUB_MF_MODE_UNKNOWN = 0, SUB_MF_MODE_UFP, SUB_MF_MODE_NPAR1_DOT_5, + SUB_MF_MODE_BD, }; struct bnx2x { @@ -1636,6 +1639,8 @@ struct bnx2x { u8 mf_sub_mode; #define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ bp->mf_sub_mode == SUB_MF_MODE_UFP) +#define IS_MF_BD(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_BD) u8 wol; @@ -2577,6 +2582,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp); void bnx2x_update_mng_version(struct bnx2x *bp); +void bnx2x_update_mfw_dump(struct bnx2x *bp); + #define MCPR_SCRATCH_BASE(bp) \ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a90d7364334f..6088c86c4872 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1,6 +1,8 @@ -/* bnx2x_cmn.c: Broadcom Everest network driver. +/* bnx2x_cmn.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); - if (!CHIP_IS_E1x(bp)) + if (!CHIP_IS_E1x(bp)) { + /* valid only for TUNN_MODE_VXLAN tunnel mode */ + __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags); + /* valid only for TUNN_MODE_GRE tunnel mode */ - __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags); + } } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->mode = TPA_MODE_DISABLED; } +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state) +{ + u32 cur; + + if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp)) + return; + + cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]); + DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n", + cur, state); + + SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state); +} + int bnx2x_load_cnic(struct bnx2x *bp) { int i, rc, port = BP_PORT(bp); @@ -2873,6 +2894,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* mark driver is loaded in shmem2 */ u32 val; val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]); + val &= ~DRV_FLAGS_MTU_MASK; + val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT); SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2); @@ -2885,10 +2908,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return -EBUSY; } + /* Update driver data for On-Chip MFW dump. */ + if (IS_PF(bp)) + bnx2x_update_mfw_dump(bp); + /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) bnx2x_dcbx_init(bp, false); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE); + DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n"); return 0; @@ -2956,6 +2986,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) DP(NETIF_MSG_IFUP, "Starting NIC unload\n"); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + /* mark driver is unloaded in shmem2 */ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 val; @@ -3677,7 +3710,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); pbd_e2->data.tunnel_data.flags |= - ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; + ETH_TUNNEL_DATA_IPV6_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); @@ -4184,6 +4217,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default) +{ + int mfw_vn = BP_FW_MB_IDX(bp); + u32 tmp; + + /* If the shmem shouldn't affect configuration, reflect */ + if (!IS_MF_BD(bp)) { + int i; + + for (i = 0; i < BNX2X_MAX_PRIORITY; i++) + c2s_map[i] = i; + *c2s_default = 0; + + return; + } + + tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[0] = tmp & 0xff; + c2s_map[1] = (tmp >> 8) & 0xff; + c2s_map[2] = (tmp >> 16) & 0xff; + c2s_map[3] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + c2s_map[4] = tmp & 0xff; + c2s_map[5] = (tmp >> 8) & 0xff; + c2s_map[6] = (tmp >> 16) & 0xff; + c2s_map[7] = (tmp >> 24) & 0xff; + + tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]); + tmp = (__force u32)be32_to_cpu((__force __be32)tmp); + *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff; +} + /** * bnx2x_setup_tc - routine to configure net_device for multi tc * @@ -4194,8 +4262,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) { - int cos, prio, count, offset; struct bnx2x *bp = netdev_priv(dev); + u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def; + int cos, prio, count, offset; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); @@ -4219,12 +4288,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return -EINVAL; } + bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def); + /* configure priority to traffic class mapping */ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { - netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + int outer_prio = c2s_map[prio]; + + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]); DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "mapping priority %d to tc %d\n", - prio, bp->prio_to_cos[prio]); + outer_prio, bp->prio_to_cos[outer_prio]); } /* Use this configuration to differentiate tc0 from other COSes @@ -4278,6 +4351,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) rc = bnx2x_set_eth_mac(bp, true); + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return rc; } @@ -4831,6 +4907,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) */ dev->mtu = new_mtu; + if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return bnx2x_reload_if_running(dev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 03b7404d5b9b..821346ce50eb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1,6 +1,8 @@ -/* bnx2x_cmn.h: Broadcom Everest network driver. +/* bnx2x_cmn.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features); */ void bnx2x_tx_timeout(struct net_device *dev); +/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration + * c2s_map should have BNX2X_MAX_PRIORITY entries. + * @bp: driver handle + * @c2s_map: should have BNX2X_MAX_PRIORITY entries for mapping + * @c2s_default: entry for non-tagged configuration + */ +void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default); + /*********************** Inlines **********************************/ /*********************** Fast path ********************************/ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) @@ -931,14 +941,33 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->mf_mode = bp->mf_mode; start_params->sd_vlan_tag = bp->mf_ov; + /* Configure Ethertype for BD mode */ + if (IS_MF_BD(bp)) { + DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n"); + start_params->sd_vlan_eth_type = ETH_P_8021AD; + REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD); + REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD); + + bnx2x_get_c2s_mapping(bp, start_params->c2s_pri, + &start_params->c2s_pri_default); + start_params->c2s_pri_valid = 1; + + DP(NETIF_MSG_IFUP, + "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n", + start_params->c2s_pri[0], start_params->c2s_pri[1], + start_params->c2s_pri[2], start_params->c2s_pri[3], + start_params->c2s_pri[4], start_params->c2s_pri[5], + start_params->c2s_pri[6], start_params->c2s_pri[7], + start_params->c2s_pri_default); + } + if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->tunnel_mode = TUNN_MODE_GRE; - start_params->gre_tunnel_type = IPGRE_TUNNEL; - start_params->inner_gre_rss_en = 1; + start_params->inner_rss = 1; if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { start_params->class_fail_ethtype = ETH_P_FIP; @@ -1339,4 +1368,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp); void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, u32 verbose); +/** + * bnx2x_set_os_driver_state - write driver state for management FW usage + * + * @bp: driver handle + * @state: OS_DRIVER_STATE_* value reflecting current driver state + */ +void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state); #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 6e4294ed1fc9..7ccf6684e0a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -1,15 +1,17 @@ -/* bnx2x_dcb.c: Broadcom Everest network driver. +/* bnx2x_dcb.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, if (bp->dcbx_port_params.ets.cos_params[cos]. pri_bitmask & pri_bit) tt2cos[pri].cos = cos; + + pfc_fw_cfg->dcb_outer_pri[pri] = ttp[pri]; } /* we never want the FW to add a 0 vlan tag */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index c6939ecb02c5..9a9517c0f703 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -1,15 +1,17 @@ -/* bnx2x_dcb.h: Broadcom Everest network driver. +/* bnx2x_dcb.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 741aa130c19f..eccfa13b0f2d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -1,15 +1,17 @@ -/* bnx2x_dump.h: Broadcom Everest network driver. +/* bnx2x_dump.h: QLogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 76b9052a961c..6b2050a198df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1,6 +1,8 @@ -/* bnx2x_ethtool.c: Broadcom Everest network driver. +/* bnx2x_ethtool.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) } else bp->wol = 0; + if (SHMEM2_HAS(bp, curr_cfg)) + SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS); + return 0; } @@ -3562,17 +3567,8 @@ static int bnx2x_get_ts_info(struct net_device *dev, info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 7636e3c18771..226ab29f4cb6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -1,6 +1,8 @@ -/* bnx2x_fw_defs.h: Broadcom Everest network driver. +/* bnx2x_fw_defs.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -372,7 +374,7 @@ #define MAX_COS_NUMBER 4 #define MAX_TRAFFIC_TYPES 8 #define MAX_PFC_PRIORITIES 8 - +#define MAX_VLAN_PRIORITIES 8 /* used by array traffic_type_to_priority[] to mark traffic type \ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index 8aafd9b5d6a2..9e3b5a1e9f4f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -1,6 +1,8 @@ /* bnx2x_fw_file_hdr.h: FW binary file header structure. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 058bc7328220..08a08fa49caa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1,6 +1,8 @@ -/* bnx2x_hsi.h: Broadcom Everest network driver. +/* bnx2x_hsi.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -729,6 +731,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 @@ -786,6 +789,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858 0x00001200 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 @@ -864,6 +868,7 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE 0x00000500 #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 @@ -2064,6 +2069,26 @@ struct ncsi_oem_fcoe_features { #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 }; +enum curr_cfg_method_e { + CURR_CFG_MET_NONE = 0, /* default config */ + CURR_CFG_MET_OS = 1, + CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */ +}; + +struct mdump_driver_info { + u32 epoc; + u32 drv_ver; + u32 fw_ver; + + u32 valid_dump; + #define FIRST_DUMP_VALID (1 << 0) + #define SECOND_DUMP_VALID (1 << 1) + + u32 flags; + #define ENABLE_ALL_TRIGGERS (0x7fffffff) + #define TRIGGER_MDUMP_ONCE (1 << 31) +}; + struct ncsi_oem_data { u32 driver_version[4]; struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; @@ -2187,6 +2212,8 @@ struct shmem2_region { #define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008 +#define DRV_FLAGS_MTU_MASK 0xffff0000 +#define DRV_FLAGS_MTU_SHIFT 16 u32 extended_dev_info_shared_cfg_size; @@ -2251,6 +2278,7 @@ struct shmem2_region { u32 reserved4; /* Offset 0x150 */ u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 + #define LINK_ATTR_84858 0x00000002 #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 @@ -2268,6 +2296,74 @@ struct shmem2_region { /* We use indication for each PF (0..3) */ #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_)) + union { /* For various OEMs */ /* Offset 0x1a0 */ + u8 storage_boot_prog[E2_FUNC_MAX]; + #define STORAGE_BOOT_PROG_MASK 0x000000FF + #define STORAGE_BOOT_PROG_NONE 0x00000000 + #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED 0x00000002 + #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS 0x00000002 + #define STORAGE_BOOT_PROG_TARGET_FOUND 0x00000004 + #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS 0x00000008 + #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND 0x00000008 + #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT 0x00000010 + #define STORAGE_BOOT_PROG_IMG_DOWNLOADED 0x00000020 + #define STORAGE_BOOT_PROG_OS_HANDOFF 0x00000040 + #define STORAGE_BOOT_PROG_COMPLETED 0x00000080 + + u32 oem_i2c_data_addr; + }; + + /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */ + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + u32 c2s_pcp_map_lower[E2_FUNC_MAX]; /* 0x1a4 */ + + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + u32 c2s_pcp_map_upper[E2_FUNC_MAX]; /* 0x1b4 */ + + /* For PCP default value get the MSB byte of the map default */ + u32 c2s_pcp_map_default[E2_FUNC_MAX]; /* 0x1c4 */ + + /* FC_NPIV table offset in NVRAM */ + u32 fc_npiv_nvram_tbl_addr[PORT_MAX]; /* 0x1d4 */ + + /* Shows last method that changed configuration of this device */ + enum curr_cfg_method_e curr_cfg; /* 0x1dc */ + + /* Storm FW version, shold be kept in the format 0xMMmmbbdd: + * MM - Major, mm - Minor, bb - Build ,dd - Drop + */ + u32 netproc_fw_ver; /* 0x1e0 */ + + /* Option ROM SMASH CLP version */ + u32 clp_ver; /* 0x1e4 */ + + u32 pcie_bus_num; /* 0x1e8 */ + + u32 sriov_switch_mode; /* 0x1ec */ + #define SRIOV_SWITCH_MODE_NONE 0x0 + #define SRIOV_SWITCH_MODE_VEB 0x1 + #define SRIOV_SWITCH_MODE_VEPA 0x2 + + u8 rsrv2[E2_FUNC_MAX]; /* 0x1f0 */ + + u32 img_inv_table_addr; /* Address to INV_TABLE_P */ /* 0x1f4 */ + + u32 mtu_size[E2_FUNC_MAX]; /* 0x1f8 */ + + u32 os_driver_state[E2_FUNC_MAX]; /* 0x208 */ + #define OS_DRIVER_STATE_NOT_LOADED 0 /* not installed */ + #define OS_DRIVER_STATE_LOADING 1 /* transition state */ + #define OS_DRIVER_STATE_DISABLED 2 /* installed but disabled */ + #define OS_DRIVER_STATE_ACTIVE 3 /* installed and active */ + + /* mini dump driver info */ + struct mdump_driver_info drv_info; /* 0x218 */ }; @@ -2898,8 +2994,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 10 -#define BCM_5710_FW_REVISION_VERSION 51 +#define BCM_5710_FW_MINOR_VERSION 12 +#define BCM_5710_FW_REVISION_VERSION 30 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3901,7 +3997,11 @@ struct eth_fast_path_rx_cqe { __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; - __le32 reserved1[7]; + u8 tunn_type; + u8 tunn_inner_hdrs_offset; + __le16 reserved1; + __le32 tunn_tenant_id; + __le32 padding[5]; u32 marker; }; @@ -4012,8 +4112,8 @@ struct eth_tunnel_data { __le16 pseudo_csum; u8 ip_hdr_start_inner_w; u8 flags; -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) -#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 }; @@ -4120,16 +4220,12 @@ struct eth_rss_update_ramrod_data { #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) -#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) -#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) -#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10 u8 rss_result_mask; u8 reserved3; __le16 reserved4; @@ -4314,6 +4410,18 @@ enum eth_tunnel_non_lso_csum_location { MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; +enum eth_tunn_type { + TUNN_TYPE_NONE, + TUNN_TYPE_VXLAN, + TUNN_TYPE_L2_GRE, + TUNN_TYPE_IPV4_GRE, + TUNN_TYPE_IPV6_GRE, + TUNN_TYPE_L2_GENEVE, + TUNN_TYPE_IPV4_GENEVE, + TUNN_TYPE_IPV6_GENEVE, + MAX_ETH_TUNN_TYPE +}; + /* * Tx regular BD structure */ @@ -4758,6 +4866,9 @@ struct afex_vif_list_ramrod_data { __le16 reserved1; }; +struct c2s_pri_trans_table_entry { + u8 val[MAX_VLAN_PRIORITIES]; +}; /* * cfc delete event data @@ -5246,6 +5357,7 @@ struct flow_control_configuration { u8 dont_add_pri_0_en; u8 reserved1; __le32 reserved2; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; @@ -5260,18 +5372,25 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; - u8 sd_accept_mf_clss_fail; + u8 no_added_tags; + __le16 reserved0; + __le32 reserved1; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; + u8 sd_accept_mf_clss_fail; + u8 sd_accept_mf_clss_fail_match_ethtype; __le16 sd_accept_mf_clss_fail_ethtype; __le16 sd_vlan_eth_type; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; - u8 sd_accept_mf_clss_fail_match_ethtype; - u8 no_added_tags; + u8 c2s_pri_tt_valid; + u8 c2s_pri_default; + u8 reserved2[6]; + struct c2s_pri_trans_table_entry c2s_pri_trans_table; }; struct function_update_data { @@ -5289,11 +5408,12 @@ struct function_update_data { u8 tx_switch_suspend; u8 echo; u8 update_tunn_cfg_flg; - u8 tunnel_mode; - u8 gre_tunnel_type; - u8 tunn_clss_en; - u8 inner_gre_rss_en; + u8 inner_clss_vxlan; + u8 inner_clss_l2gre; + u8 inner_clss_l2geneve; + u8 inner_rss; __le16 vxlan_dst_port; + __le16 geneve_dst_port; u8 sd_vlan_force_pri_change_flg; u8 sd_vlan_force_pri_flg; u8 sd_vlan_force_pri_val; @@ -5302,6 +5422,8 @@ struct function_update_data { u8 reserved1; __le16 sd_vlan_tag; __le16 sd_vlan_eth_type; + __le16 reserved0; + __le32 reserved2; }; /* @@ -5330,15 +5452,6 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; - -/* GRE Tunnel Mode */ -enum gre_tunnel_type { - NVGRE_TUNNEL, - L2GRE_TUNNEL, - IPGRE_TUNNEL, - MAX_GRE_TUNNEL_TYPE -}; - /* * Dynamic Host-Coalescing - Driver(host) counters */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index d6e1975b7b69..46ee2c01f4c5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -1,7 +1,9 @@ -/* bnx2x_init.h: Broadcom Everest network driver. +/* bnx2x_init.h: Qlogic Everest network driver. * Structures and macroes needed during the initialization. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 5669ed2e87d0..1835d2e451c0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -1,8 +1,10 @@ -/* bnx2x_init_ops.h: Broadcom Everest network driver. +/* bnx2x_init_ops.h: Qlogic Everest network driver. * Static functions needed during the initialization. * This file is "included" in bnx2x_main.c. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index a0b03c27e0a3..d946bba43726 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner @@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8481/BCM84823/BCM84833 PHY SECTION */ /******************************************************************/ +static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy) +{ + return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) || + (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)); +} + static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct bnx2x *bp, u8 port) @@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, }; u16 fw_ver1; - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, phy->ver_addr); @@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, reg_set[i].val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; else offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; @@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; switch (action) { case PHY_INIT: - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, bp, params->port); } @@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, /* Always write this if this is not 84833/4. * For 84833/4, write it only when it's a forced speed. */ - if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) || + if (!bnx2x_is_8483x_8485x(phy) || ((autoneg_val & (1<<12)) == 0)) bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, @@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } -#define PHY84833_CMDHDLR_WAIT 300 -#define PHY84833_CMDHDLR_MAX_ARGS 5 +#define PHY848xx_CMDHDLR_WAIT 300 +#define PHY848xx_CMDHDLR_MAX_ARGS 5 + +static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + int idx; + u16 val; + struct bnx2x *bp = params->bp; + + /* Step 1: Poll the STATUS register to see whether the previous command + * is in progress or the system is busy (CMD_IN_PROGRESS or + * SYSTEM_BUSY). If previous command is in progress or system is busy, + * check again until the previous command finishes execution and the + * system is available for taking command + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) && + (val != PHY84858_STATUS_CMD_SYSTEM_BUSY)) + break; + usleep_range(1000, 2000); + } + if (idx >= PHY848xx_CMDHDLR_WAIT) { + DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); + return -EINVAL; + } + + /* Step2: If any parameters are required for the function, write them + * to the required DATA registers + */ + + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + cmd_args[idx]); + } + + /* Step3: When the firmware is ready for commands, write the 'Command + * code' to the CMD register + */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + + /* Step4: Once the command has been written, poll the STATUS register + * to check whether the command has completed (CMD_COMPLETED_PASS/ + * CMD_FOR_CMDS or CMD_COMPLETED_ERROR). + */ + + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_STATUS, &val); + if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 2000); + } + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "FW cmd failed.\n"); + return -EINVAL; + } + /* Step5: Once the command has completed, read the specficied DATA + * registers for any saved results for the command, if applicable + */ + + /* Gather returning data */ + for (idx = 0; idx < argc; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_848xx_CMD_HDLR_DATA1 + idx, + &cmd_args[idx]); + } + + return 0; +} + static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct link_params *params, u16 fw_cmd, u16 cmd_args[], int argc) @@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Write CMD_OPEN_OVERRIDE to STATUS reg */ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_OPEN_OVERRIDE); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; usleep_range(1000, 2000); } - if (idx >= PHY84833_CMDHDLR_WAIT) { + if (idx >= PHY848xx_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); return -EINVAL; } @@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, /* Prepare argument(s) and issue command */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_COMMAND, fw_cmd); - for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) { + MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); + for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, &val); + MDIO_848xx_CMD_HDLR_STATUS, &val); if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; usleep_range(1000, 2000); } - if ((idx >= PHY84833_CMDHDLR_WAIT) || - (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { + if ((idx >= PHY848xx_CMDHDLR_WAIT) || + (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { DP(NETIF_MSG_LINK, "FW cmd failed.\n"); return -EINVAL; } /* Gather returning data */ for (idx = 0; idx < argc; idx++) { bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_DATA1 + idx, + MDIO_848xx_CMD_HDLR_DATA1 + idx, &cmd_args[idx]); } bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_84833_CMD_HDLR_STATUS, + MDIO_848xx_CMD_HDLR_STATUS, PHY84833_STATUS_CMD_CLEAR_COMPLETE); return 0; } -static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, + struct link_params *params, + u16 fw_cmd, + u16 cmd_args[], int argc) +{ + struct bnx2x *bp = params->bp; + + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) || + (REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + link_attr_sync[params->port])) & + LINK_ATTR_84858)) { + return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } else { + return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, + argc); + } +} + +static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 pair_swap; - u16 data[PHY84833_CMDHDLR_MAX_ARGS]; + u16 data[PHY848xx_CMDHDLR_MAX_ARGS]; int status; struct bnx2x *bp = params->bp; @@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, /* Only the second argument is used for this command */ data[1] = (u16)pair_swap; - status = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS); + status = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_PAIR_SWAP, data, + PHY848xx_CMDHDLR_MAX_ARGS); if (status == 0) DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); @@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); /* Prevent Phy from working in EEE and advertising it */ - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; @@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; @@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, u8 port, initialize = 1; u16 val; u32 actual_phy_selection; - u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; + u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS]; int rc = 0; usleep_range(1000, 2000); @@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to come out of reset */ msleep(50); - if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && - (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (!bnx2x_is_8483x_8485x(phy)) { /* BCM84823 requires that XGXS links up first @ 10G for normal * behavior. */ @@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_program_serdes(¶ms->phy[INT_PHY], params, vars); vars->line_speed = temp; } + /* Check if this is actually BCM84858 */ + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) { + u16 hw_rev; + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_848xx_ID_MSB, &hw_rev); + if (hw_rev == BCM84858_PHY_ID) { + params->link_attr_sync |= LINK_ATTR_84858; + bnx2x_update_link_attr(params, params->link_attr_sync); + } + } + + /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | @@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { - bnx2x_84833_pair_swap_cfg(phy, params, vars); + if (bnx2x_is_8483x_8485x(phy)) { + bnx2x_848xx_pair_swap_cfg(phy, params, vars); /* Keep AutogrEEEn disabled. */ cmd_args[0] = 0x0; cmd_args[1] = 0x0; cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1; cmd_args[3] = PHY84833_CONSTANT_LATENCY; - rc = bnx2x_84833_cmd_hdlr(phy, params, - PHY84833_CMD_SET_EEE_MODE, cmd_args, - PHY84833_CMDHDLR_MAX_ARGS); + rc = bnx2x_848xx_cmd_hdlr(phy, params, + PHY848xx_CMD_SET_EEE_MODE, cmd_args, + PHY848xx_CMDHDLR_MAX_ARGS); if (rc) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } @@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) { + if (bnx2x_is_8483x_8485x(phy)) { /* Bring PHY out of super isolate mode as the final step. */ bnx2x_cl45_read_and_write(bp, phy, MDIO_CTL_DEVAD, @@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) + if (bnx2x_is_8483x_8485x(phy)) bnx2x_eee_an_resolve(phy, params, vars); } @@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = { .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; +static const struct bnx2x_phy phy_84858 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func +}; + static const struct bnx2x_phy phy_54618se = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, .addr = 0xff, @@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: *phy = phy_84834; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: + *phy = phy_84858; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; @@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || - (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) && - (phy->ver_addr)) { + if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) { /* Remove 100Mb link supported for BCM84833/4 when phy fw * version lower than or equal to 1.39 */ @@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858: /* GPIO3's are linked, and so both need to be toggled * to obtain required 2us pulse. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index d9cce4c3899b..b7d251108c19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -1,13 +1,15 @@ /* Copyright 2008-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Written by Yaniv Rosner diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c27af12314ed..78e55fe616e3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1,6 +1,8 @@ -/* bnx2x_main.c: Broadcom Everest network driver. +/* bnx2x_main.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -81,11 +83,11 @@ #define TX_TIMEOUT (5*HZ) static char version[] = - "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " + "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Eliezer Tamir"); -MODULE_DESCRIPTION("Broadcom NetXtreme II " +MODULE_DESCRIPTION("QLogic " "BCM57710/57711/57711E/" "57712/57712_MF/57800/57800_MF/57810/57810_MF/" "57840/57840_MF Driver"); @@ -163,27 +165,27 @@ enum bnx2x_board_type { static struct { char *name; } board_info[] = { - [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, - [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, - [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, - [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, - [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, - [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, - [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, - [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, - [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, - [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, - [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, - [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, - [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, - [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, - [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, - [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, - [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, - [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, - [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, - [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } + [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" }, + [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" }, + [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" }, + [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" }, + [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" }, + [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" }, + [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" }, + [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" }, + [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" }, + [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" }, + [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" }, + [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" }, + [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" }, + [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" }, + [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }, + [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" }, + [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" }, + [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" }, + [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" }, + [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" } }; #ifndef PCI_DEVICE_ID_NX2_57710 @@ -2916,7 +2918,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) func_params.f_obj = &bp->func_obj; func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; - if (IS_MF_UFP(bp)) { + if (IS_MF_UFP(bp) || IS_MF_BD(bp)) { int func = BP_ABS_FUNC(bp); u32 val; @@ -2943,16 +2945,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", bp->mf_ov); goto fail; + } else { + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", + bp->mf_ov); } - - DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); - - bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); - - return; + } else { + goto fail; } - /* not supported by SW yet */ + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + return; fail: bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); } @@ -3707,6 +3709,34 @@ out: ethver, iscsiver, fcoever); } +void bnx2x_update_mfw_dump(struct bnx2x *bp) +{ + struct timeval epoc; + u32 drv_ver; + u32 valid_dump; + + if (!SHMEM2_HAS(bp, drv_info)) + return; + + /* Update Driver load time */ + do_gettimeofday(&epoc); + SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec); + + drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); + SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); + + SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + + /* Check & notify On-Chip dump. */ + valid_dump = SHMEM2_RD(bp, drv_info.valid_dump); + + if (valid_dump & FIRST_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n"); + + if (valid_dump & SECOND_DUMP_VALID) + DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n"); +} + static void bnx2x_oem_event(struct bnx2x *bp, u32 event) { u32 cmd_ok, cmd_fail; @@ -5568,6 +5598,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAIT4_PORT): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", @@ -7429,6 +7461,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) } else BNX2X_ERR("Bootcode is missing - can not initialize link\n"); + if (SHMEM2_HAS(bp, netproc_fw_ver)) + SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM)); + return 0; } @@ -11678,7 +11713,7 @@ static void validate_set_si_mode(struct bnx2x *bp) static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); - int vn; + int vn, mfw_vn; u32 val = 0, val2 = 0; int rc = 0; @@ -11768,6 +11803,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_mode = 0; bp->mf_sub_mode = 0; vn = BP_VN(bp); + mfw_vn = BP_FW_MB_IDX(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", @@ -11824,6 +11860,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_BD; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + + if (SHMEM2_HAS(bp, mtu_size)) { + int mtu_idx = BP_FW_MB_IDX(bp); + u16 mtu_size; + u32 mtu; + + mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]); + mtu_size = (u16)mtu; + DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n", + mtu_size, mtu); + + /* if valid: update device mtu */ + if (((mtu_size + ETH_HLEN) >= + ETH_MIN_PACKET_SIZE) && + (mtu_size <= + ETH_MAX_JUMBO_PACKET_SIZE)) + bp->dev->mtu = mtu_size; + } + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: bp->mf_mode = MULTI_FUNCTION_SD; bp->mf_sub_mode = SUB_MF_MODE_UFP; @@ -11871,9 +11932,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); - } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) || + (bp->mf_sub_mode == SUB_MF_MODE_BD)) { dev_err(&bp->pdev->dev, - "Unexpected - no valid MF OV for func %d in UFP mode\n", + "Unexpected - no valid MF OV for func %d in UFP/BD mode\n", func); bp->path_has_ovlan = true; } else { @@ -13561,6 +13623,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, bnx2x_register_phc(bp); + if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED); + return 0; init_one_exit: @@ -13623,6 +13688,7 @@ static void __bnx2x_remove(struct pci_dev *pdev, /* Power on: we can't let PCI layer write to us while we are in D3 */ if (IS_PF(bp)) { bnx2x_set_power_state(bp, PCI_D0); + bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED); /* Set endianity registers to reset values in case next driver * boots in different endianty environment. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h index caf1aef651eb..a91ccbf36345 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h @@ -1,6 +1,8 @@ -/* bnx2x_mfw_req.h: Broadcom Everest network driver. +/* bnx2x_mfw_req.h: Qlogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 49d511092c82..4dead49bd5cb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -1,6 +1,8 @@ -/* bnx2x_reg.h: Broadcom Everest network driver. +/* bnx2x_reg.h: Qlogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -2137,6 +2139,10 @@ /* [RW 1] When this bit is set; the LLH will expect all packets to be with e1hov */ #define NIG_REG_LLH_E1HOV_MODE 0x160d8 +/* [RW 16] Outer VLAN type identifier for multi-function mode. In non + * multi-function mode; it will hold the inner VLAN type. Typically 0x8100. + */ +#define NIG_REG_LLH_E1HOV_TYPE_1 0x16028 /* [RW 1] When this bit is set; the LLH will classify the packet before sending it to the BRB or calculating WoL on it. */ #define NIG_REG_LLH_MF_MODE 0x16024 @@ -2953,7 +2959,12 @@ #define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ #define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 -#define PB_REG_CONTROL 0 +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PBF_REG_VLAN_TYPE_0 0x15c06c /* [RW 2] Interrupt mask register #0 read/write */ #define PB_REG_PB_INT_MASK 0x28 /* [R 2] Interrupt register #0 read */ @@ -3372,6 +3383,12 @@ #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 /* [R 8] debug only: TSDM current credit. Transaction based. */ #define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c +/* [RW 16] One of 8 values that should be compared to type in Ethernet + * parsing. If there is a match; the field after Ethernet is the first VLAN. + * Reset value is 0x8100 which is the standard VLAN type. Note that when + * checking second VLAN; type is compared only to 0x8100. + */ +#define PRS_REG_VLAN_TYPE_0 0x401a8 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19) #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20) #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22) @@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G 0x40 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1 +#define MDIO_AN_REG_848xx_ID_MSB 0xffe2 +#define BCM84858_PHY_ID 0x600d +#define MDIO_AN_REG_848xx_ID_LSB 0xffe3 #define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6 #define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 @@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 -/* These are mailbox register set used by 84833. */ -#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 -#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 -#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 -#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 -#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 -#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037 -#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038 -#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039 -#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a -#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b -#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c -#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0 -#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26 -#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27 -#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28 -#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29 -#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30 -#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31 +/* These are mailbox register set used by 84833/84858. */ +#define MDIO_848xx_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG26 0x4037 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG27 0x4038 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG28 0x4039 +#define MDIO_848xx_TOP_CFG_SCRATCH_REG29 0x403a +#define MDIO_848xx_TOP_CFG_SCRATCH_REG30 0x403b +#define MDIO_848xx_TOP_CFG_SCRATCH_REG31 0x403c +#define MDIO_848xx_CMD_HDLR_COMMAND (MDIO_848xx_TOP_CFG_SCRATCH_REG0) +#define MDIO_848xx_CMD_HDLR_STATUS (MDIO_848xx_TOP_CFG_SCRATCH_REG26) +#define MDIO_848xx_CMD_HDLR_DATA1 (MDIO_848xx_TOP_CFG_SCRATCH_REG27) +#define MDIO_848xx_CMD_HDLR_DATA2 (MDIO_848xx_TOP_CFG_SCRATCH_REG28) +#define MDIO_848xx_CMD_HDLR_DATA3 (MDIO_848xx_TOP_CFG_SCRATCH_REG29) +#define MDIO_848xx_CMD_HDLR_DATA4 (MDIO_848xx_TOP_CFG_SCRATCH_REG30) +#define MDIO_848xx_CMD_HDLR_DATA5 (MDIO_848xx_TOP_CFG_SCRATCH_REG31) -/* Mailbox command set used by 84833. */ -#define PHY84833_CMD_SET_PAIR_SWAP 0x8001 -#define PHY84833_CMD_GET_EEE_MODE 0x8008 -#define PHY84833_CMD_SET_EEE_MODE 0x8009 -/* Mailbox status set used by 84833. */ +/* Mailbox command set used by 84833/84858 */ +#define PHY848xx_CMD_SET_PAIR_SWAP 0x8001 +#define PHY848xx_CMD_GET_EEE_MODE 0x8008 +#define PHY848xx_CMD_SET_EEE_MODE 0x8009 +/* Mailbox status set used by 84833 only */ #define PHY84833_STATUS_CMD_RECEIVED 0x0001 #define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002 #define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004 @@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/ #define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 +/* Mailbox status set used by 84858 only */ +#define PHY84858_STATUS_CMD_RECEIVED 0x0001 +#define PHY84858_STATUS_CMD_IN_PROGRESS 0x0002 +#define PHY84858_STATUS_CMD_COMPLETE_PASS 0x0004 +#define PHY84858_STATUS_CMD_COMPLETE_ERROR 0x0008 +#define PHY84858_STATUS_CMD_SYSTEM_BUSY 0xbbbb + /* Warpcore clause 45 addressing */ #define MDIO_WC_DEVAD 0x3 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 4ad415ac8cfe..265fe0a90adc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -1,15 +1,17 @@ -/* bnx2x_sp.c: Broadcom Everest network driver. +/* bnx2x_sp.c: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -4060,8 +4062,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp, if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; - if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) - caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY; + + if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY; /* RSS keys */ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { @@ -5669,10 +5677,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->tunnel_mode = start_params->tunnel_mode; - rdata->gre_tunnel_type = start_params->gre_tunnel_type; - rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; - rdata->vxlan_dst_port = cpu_to_le16(4789); + + rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); + rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); + rdata->inner_clss_l2gre = start_params->inner_clss_l2gre; + rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve; + rdata->inner_clss_vxlan = start_params->inner_clss_vxlan; + rdata->inner_rss = start_params->inner_rss; + rdata->sd_accept_mf_clss_fail = start_params->class_fail; if (start_params->class_fail_ethtype) { rdata->sd_accept_mf_clss_fail_match_ethtype = 1; @@ -5690,6 +5702,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, cpu_to_le16(0x8100); rdata->no_added_tags = start_params->no_added_tags; + + rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid; + if (rdata->c2s_pri_tt_valid) { + memcpy(rdata->c2s_pri_trans_table.val, + start_params->c2s_pri, + MAX_VLAN_PRIORITIES); + rdata->c2s_pri_default = start_params->c2s_pri_default; + } /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -5750,15 +5770,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes)) { rdata->update_tunn_cfg_flg = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + &switch_update_params->changes)) + rdata->inner_clss_l2gre = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + &switch_update_params->changes)) + rdata->inner_clss_vxlan = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, &switch_update_params->changes)) - rdata->tunn_clss_en = 1; - if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + rdata->inner_clss_l2geneve = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, &switch_update_params->changes)) - rdata->inner_gre_rss_en = 1; - rdata->tunnel_mode = switch_update_params->tunnel_mode; - rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; - rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->inner_rss = 1; + rdata->vxlan_dst_port = + cpu_to_le16(switch_update_params->vxlan_dst_port); + rdata->geneve_dst_port = + cpu_to_le16(switch_update_params->geneve_dst_port); } rdata->echo = SWITCH_UPDATE; @@ -5885,6 +5912,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, rdata->traffic_type_to_priority_cos[i] = tx_start_params->traffic_type_to_priority_cos[i]; + for (i = 0; i < MAX_TRAFFIC_TYPES; i++) + rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i]; /* No need for an explicit memory barrier here as long as we * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 86baecb7c60c..324e9f986314 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1,15 +1,17 @@ -/* bnx2x_sp.h: Broadcom Everest network driver. +/* bnx2x_sp.h: Qlogic Everest network driver. * - * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other Qlogic software provided under a + * license other than the GPL, without Qlogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -711,7 +713,10 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, - BNX2X_RSS_GRE_INNER_HDRS, + + BNX2X_RSS_IPV4_VXLAN, + BNX2X_RSS_IPV6_VXLAN, + BNX2X_RSS_TUNN_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -1105,8 +1110,10 @@ enum { BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, - BNX2X_F_UPDATE_TUNNEL_CLSS_EN, - BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN, + BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE, + BNX2X_F_UPDATE_TUNNEL_INNER_RSS, }; /* Allowed Function states */ @@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ - u8 tunnel_mode; + /* UDP dest port for VXLAN */ + u16 vxlan_dst_port; - /* tunneling classification enablement */ - u8 tunn_clss_en; + /* UDP dest port for Geneve */ + u16 geneve_dst_port; - /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_type; + /* Enable inner Rx classifications for L2GRE packets */ + u8 inner_clss_l2gre; - /* Enables Inner GRE RSS on the function, depends on the client RSS - * capailities - */ - u8 inner_gre_rss_en; + /* Enable inner Rx classifications for L2-Geneve packets */ + u8 inner_clss_l2geneve; + + /* Enable inner Rx classification for vxlan packets */ + u8 inner_clss_vxlan; + + /* Enable RSS according to inner header */ + u8 inner_rss; /* Allows accepting of packets failing MF classification, possibly * only matching a given ethertype @@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params { /* Prevent inner vlans from being added by FW */ u8 no_added_tags; + + /* Inner-to-Outer vlan priority mapping */ + u8 c2s_pri[MAX_VLAN_PRIORITIES]; + u8 c2s_pri_default; + u8 c2s_pri_valid; }; struct bnx2x_func_switch_update_params { @@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params { u16 vlan; u16 vlan_eth_type; u8 vlan_force_prio; - u8 tunnel_mode; - u8 gre_tunnel_type; + u16 vxlan_dst_port; + u16 geneve_dst_port; }; struct bnx2x_func_afex_update_params { @@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params { u8 dcb_enabled; u8 dcb_version; u8 dont_add_pri_0_en; + u8 dcb_outer_pri[MAX_TRAFFIC_TYPES]; }; struct bnx2x_func_set_timesync_params { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f67348d16966..5b243bcec3fa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1,15 +1,17 @@ -/* bnx2x_sriov.c: Broadcom Everest network driver. +/* bnx2x_sriov.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> @@ -1338,6 +1340,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, mutex_init(&bp->vfdb->bulletin_mutex); + if (SHMEM2_HAS(bp, sriov_switch_mode)) + SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB); + return 0; failed: DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 66ee62a0401a..2011205ec8d4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -1,15 +1,17 @@ -/* bnx2x_sriov.h: Broadcom Everest network driver. +/* bnx2x_sriov.h: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 69d699f0730a..7e0919aa450e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1,6 +1,8 @@ -/* bnx2x_stats.c: Broadcom Everest network driver. +/* bnx2x_stats.c: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 965539a9dabe..b2644ed13d06 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -1,6 +1,8 @@ -/* bnx2x_stats.h: Broadcom Everest network driver. +/* bnx2x_stats.h: QLogic Everest network driver. * * Copyright (c) 2007-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 06b8c0d8fd3b..31b79bd13292 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1,15 +1,17 @@ -/* bnx2x_vfpf.c: Broadcom Everest network driver. +/* bnx2x_vfpf.c: QLogic Everest network driver. * * Copyright 2009-2013 Broadcom Corporation + * Copyright 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and QLogic execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written + * software in any way with any other QLogic software provided under a + * license other than the GPL, without QLogic's express prior written * consent. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index b86479fc0d2f..f0110f4bd0a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -1,16 +1,22 @@ -/* bnx2x_vfpf.h: Broadcom Everest network driver. +/* bnx2x_vfpf.h: Qlogic Everest network driver. * * Copyright (c) 2011-2013 Broadcom Corporation + * Copyright (c) 2014 QLogic Corporation + * All rights reserved * - * Unless you and Broadcom execute a separate written software license + * Unless you and Qlogic execute a separate written software license * agreement governing use of this software, this software is licensed to you - * under the terms of the GNU General Public License version 2, available - * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * under the terms of the GNU General Public License version 2 (the “GPL”), + * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following + * added to such license: * - * Notwithstanding the above, under no circumstances may you combine this - * software in any way with any other Broadcom software provided under a - * license other than the GPL, without Broadcom's express prior written - * consent. + * As a special exception, the copyright holders of this software give you + * permission to link this software with independent modules, and to copy and + * distribute the resulting executable under terms of your choice, provided that + * you also meet, for each linked independent module, the terms and conditions + * of the license of that module. An independent module is a module which is + * not derived from this software. The special exception does not apply to any + * modifications of the software. * * Maintained by: Ariel Elior <ariel.elior@qlogic.com> * Written by: Ariel Elior <ariel.elior@qlogic.com> diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 64c1e9db6b0b..5bf7ce0ae221 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - if (mode == GENET_POWER_PASSIVE) - bcmgenet_mii_reset(priv->dev); + bcmgenet_phy_power_set(priv->dev, true); } /* ioctl handle special commands that are not present in ethtool. */ @@ -1725,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv) int0_enable |= UMAC_IRQ_TXDMA_DONE; /* Monitor cable plug/unplugged event for internal PHY */ - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; @@ -2632,7 +2631,7 @@ static int bcmgenet_open(struct net_device *dev) /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); /* take MAC out of reset */ @@ -2651,7 +2650,7 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); @@ -2687,18 +2686,20 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq0; } - /* Re-configure the port multiplexer towards the PHY device */ - bcmgenet_mii_config(priv->dev, false); - - phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup, - priv->phy_interface); + ret = bcmgenet_mii_probe(dev); + if (ret) { + netdev_err(dev, "failed to connect to PHY\n"); + goto err_irq1; + } bcmgenet_netif_start(dev); return 0; +err_irq1: + free_irq(priv->irq1, priv); err_irq0: - free_irq(priv->irq0, dev); + free_irq(priv->irq0, priv); err_fini_dma: bcmgenet_fini_dma(priv); err_clk_disable: @@ -2757,7 +2758,7 @@ static int bcmgenet_close(struct net_device *dev) free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); if (!IS_ERR(priv->clk)) @@ -3319,7 +3320,7 @@ static int bcmgenet_suspend(struct device *d) if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); clk_prepare_enable(priv->clk_wol); - } else if (phy_is_internal(priv->phydev)) { + } else if (priv->internal_phy) { ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); } @@ -3348,7 +3349,7 @@ static int bcmgenet_resume(struct device *d) /* If this is an internal GPHY, power it back on now, before UniMAC is * brought out of reset as absolutely no UniMAC activity is allowed */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); bcmgenet_umac_reset(priv); @@ -3363,14 +3364,14 @@ static int bcmgenet_resume(struct device *d) phy_init_hw(priv->phydev); /* Speed settings must be restored */ - bcmgenet_mii_config(priv->dev, false); + bcmgenet_mii_config(priv->dev); /* disable ethernet MAC while updating its registers */ umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_ENERGY_DET_MASK; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 6159deab8c98..7299d1075422 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -593,6 +593,7 @@ struct bcmgenet_priv { /* MDIO bus variables */ wait_queue_head_t wq; struct phy_device *phydev; + bool internal_phy; struct device_node *phy_dn; struct device_node *mdio_dn; struct mii_bus *mii_bus; @@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); /* MDIO routines */ int bcmgenet_mii_init(struct net_device *dev); -int bcmgenet_mii_config(struct net_device *dev, bool init); +int bcmgenet_mii_config(struct net_device *dev); +int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); -void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index adf23d2ac488..0802cd9d2424 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -163,16 +163,6 @@ void bcmgenet_mii_setup(struct net_device *dev) phy_print_status(phydev); } -void bcmgenet_mii_reset(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - - if (priv->phydev) { - phy_init_hw(priv->phydev); - phy_start_aneg(priv->phydev); - } -} - void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -215,7 +205,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev) reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); reg |= EXT_PWR_DN_EN_LD; bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - bcmgenet_mii_reset(dev); } static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) @@ -228,7 +217,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); } -int bcmgenet_mii_config(struct net_device *dev, bool init) +int bcmgenet_mii_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = priv->phydev; @@ -238,10 +227,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) u32 port_ctrl; u32 reg; - priv->ext_phy = !phy_is_internal(priv->phydev) && + priv->ext_phy = !priv->internal_phy && (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->phy_interface = PHY_INTERFACE_MODE_NA; switch (priv->phy_interface) { @@ -259,7 +248,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); - if (phy_is_internal(priv->phydev)) { + if (priv->internal_phy) { phy_name = "internal PHY"; bcmgenet_internal_phy_setup(dev); } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { @@ -321,13 +310,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } - if (init) - dev_info(kdev, "configuring instance for %s\n", phy_name); + dev_info_once(kdev, "configuring instance for %s\n", phy_name); return 0; } -static int bcmgenet_mii_probe(struct net_device *dev) +int bcmgenet_mii_probe(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; @@ -345,22 +333,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->old_pause = -1; if (dn) { - if (priv->phydev) { - pr_info("PHY already attached\n"); - return 0; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { - ret = of_phy_register_fixed_link(dn); - if (ret) - return ret; - - priv->phy_dn = of_node_get(dn); - } - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, phy_flags, priv->phy_interface); if (!phydev) { @@ -386,7 +358,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ - ret = bcmgenet_mii_config(dev, true); + ret = bcmgenet_mii_config(dev); if (ret) { phy_disconnect(priv->phydev); return ret; @@ -397,14 +369,11 @@ static int bcmgenet_mii_probe(struct net_device *dev) /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs */ - if (phy_is_internal(priv->phydev)) + if (priv->internal_phy) priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; else priv->mii_bus->irq[phydev->addr] = PHY_POLL; - pr_info("attached PHY at address %d [%s]\n", - phydev->addr, phydev->drv->name); - return 0; } @@ -490,7 +459,9 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; + const char *phy_mode_str = NULL; char *compat; + int phy_mode; int ret; compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); @@ -513,8 +484,36 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { + ret = of_phy_register_fixed_link(dn); + if (ret) + return ret; + + priv->phy_dn = of_node_get(dn); + } + /* Get the link mode */ - priv->phy_interface = of_get_phy_mode(dn); + phy_mode = of_get_phy_mode(dn); + priv->phy_interface = phy_mode; + + /* We need to specifically look up whether this PHY interface is internal + * or not *before* we even try to probe the PHY driver over MDIO as we + * may have shut down the internal PHY for power saving purposes. + */ + if (phy_mode < 0) { + ret = of_property_read_string(dn, "phy-mode", &phy_mode_str); + if (ret < 0) { + dev_err(kdev, "invalid PHY mode property\n"); + return ret; + } + + priv->phy_interface = PHY_INTERFACE_MODE_NA; + if (!strcasecmp(phy_mode_str, "internal")) + priv->internal_phy = true; + } return 0; } @@ -615,10 +614,6 @@ int bcmgenet_mii_init(struct net_device *dev) ret = bcmgenet_mii_bus_init(priv); if (ret) - goto out_free; - - ret = bcmgenet_mii_probe(dev); - if (ret) goto out; return 0; @@ -626,7 +621,6 @@ int bcmgenet_mii_init(struct net_device *dev) out: of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); -out_free: kfree(priv->mii_bus->irq); mdiobus_free(priv->mii_bus); return ret; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index caeb39561567..a4e3f8655cb8 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2741,8 +2741,7 @@ static const struct macb_config emac_config = { static const struct macb_config zynqmp_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_JUMBO, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -2750,8 +2749,7 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | - MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 629f75d70353..58de4443eac0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -767,6 +767,7 @@ struct adapter { bool tid_release_task_busy; struct dentry *debugfs_root; + u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ spinlock_t stats_lock; spinlock_t win0_lock ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6074680bc985..052c660aca80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = { "Auto Negotiated" }; +static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state) +{ + if (state == CXGB4_DCB_STATE_FW_ALLSYNCED || + state == CXGB4_DCB_STATE_HOST) + return true; + else + return false; +} + /* Initialize a port's Data Center Bridging state. Typically used after a * Link Down event. */ @@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg) struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(dcb->state) || priority >= CXGB4_MAX_PRIORITY) *pfccfg = 0; else @@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg) struct adapter *adap = pi->adapter; int err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED || + if (!cxgb4_dcb_state_synced(pi->dcb.state) || priority >= CXGB4_MAX_PRIORITY) return; @@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return false; return pi->dcb.pfcen != 0; @@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, struct adapter *adap = pi->adapter; int i; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 0; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id, */ static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id) { - return __cxgb4_getapp(dev, app_idtype, app_id, 0); + /* Convert app_idtype to firmware format before querying */ + return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ? + app_idtype : 3, app_id, 0); } /* Write a new Application User Priority Map for the specified Application ID @@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id, int i, err; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return -EINVAL; /* DCB info gets thrown away on link up */ @@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev, struct port_info *pi = netdev2pinfo(dev); struct port_dcb_info *dcb = &pi->dcb; - if (dcb_subtype && !(dcb->msgs & dcb_subtype)) - return 0; + if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED) + if (dcb_subtype && !(dcb->msgs & dcb_subtype)) + return 0; - return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && + return (cxgb4_dcb_state_synced(dcb->state) && (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); } @@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request) /* Can't enable DCB if we haven't successfully negotiated it. */ - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; /* There's currently no mechanism to allow for the firmware DCBX @@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev, struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; info->willing = 0; @@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) struct adapter *adap = pi->adapter; int i, err = 0; - if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED) + if (!cxgb4_dcb_state_synced(pi->dcb.state)) return 1; for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) { @@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table) if (!pcmd.u.dcb.app_priority.protocolid) break; - table[i].selector = pcmd.u.dcb.app_priority.sel_field; + table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1); table[i].protocol = be16_to_cpu(pcmd.u.dcb.app_priority.protocolid); table[i].priority = @@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) for (i = 0; i < CXGB4_MAX_PRIORITY; i++) pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i]; + pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported; + return 0; } @@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc) */ pfc->pfc_en = bitswap_1(pi->dcb.pfcen); + pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported; + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index a11485fbb33f..f701a6f20c6a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx) return 0; } +static int cim_la_show_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC LS0Stat " + "LS0Addr LS0Data LS1Stat LS1Addr LS1Data\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n", + (p[9] >> 16) & 0xff, /* Status */ + p[9] & 0xffff, p[8] >> 16, /* Inst */ + p[8] & 0xffff, p[7] >> 16, /* Data */ + p[7] & 0xffff, p[6] >> 16, /* PC */ + p[2], p[1], p[0], /* LS0 Stat, Addr and Data */ + p[5], p[4], p[3]); /* LS1 Stat, Addr and Data */ + } + return 0; +} + +static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx) +{ + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Status Inst Data PC\n"); + } else { + const u32 *p = v; + + seq_printf(seq, " %02x %08x %08x %08x\n", + p[3] & 0xff, p[2], p[1], p[0]); + seq_printf(seq, " %02x %02x%06x %02x%06x %02x%06x\n", + (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, + p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); + seq_printf(seq, " %02x %04x%04x %04x%04x %04x%04x\n", + (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, + p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, + p[6] >> 16); + } + return 0; +} + static int cim_la_open(struct inode *inode, struct file *file) { int ret; @@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file) if (ret) return ret; - p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1, - cfg & UPDBGLACAPTPCONLY_F ? - cim_la_show_3in1 : cim_la_show); + if (is_t6(adap->params.chip)) { + /* +1 to account for integer division of CIMLA_SIZE/10 */ + p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1, + 10 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? + cim_la_show_pc_t6 : cim_la_show_t6); + } else { + p = seq_open_tab(file, adap->params.cim_la_size / 8, + 8 * sizeof(u32), 1, + cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 : + cim_la_show); + } if (!p) return -ENOMEM; @@ -2340,6 +2388,8 @@ int t4_setup_debugfs(struct adapter *adap) de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); + debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR, + adap->debugfs_root, &adap->use_bd); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 351f3b1bf800..d582e175dfb6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4757,7 +4757,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ cfg_queues(adapter); - adapter->l2t = t4_init_l2t(); + adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); if (!adapter->l2t) { /* We tolerate a lack of L2T, giving up some functionality */ dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 252efc29321f..ac27898c6ab0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -51,24 +51,17 @@ #define VLAN_NONE 0xfff /* identifies sync vs async L2T_WRITE_REQs */ -#define F_SYNC_WR (1 << 12) - -enum { - L2T_STATE_VALID, /* entry is up to date */ - L2T_STATE_STALE, /* entry may be used but needs revalidation */ - L2T_STATE_RESOLVING, /* entry needs address resolution */ - L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ - - /* when state is one of the below the entry is not hashed */ - L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ - L2T_STATE_UNUSED /* entry not in use */ -}; +#define SYNC_WR_S 12 +#define SYNC_WR_V(x) ((x) << SYNC_WR_S) +#define SYNC_WR_F SYNC_WR_V(1) struct l2t_data { + unsigned int l2t_start; /* start index of our piece of the L2T */ + unsigned int l2t_size; /* number of entries in l2tab */ rwlock_t lock; atomic_t nfree; /* number of free entries */ struct l2t_entry *rover; /* starting point for next allocation */ - struct l2t_entry l2tab[L2T_SIZE]; + struct l2t_entry l2tab[0]; /* MUST BE LAST */ }; static inline unsigned int vlan_prio(const struct l2t_entry *e) @@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) /* * To avoid having to check address families we do not allow v4 and v6 * neighbors to be on the same hash chain. We keep v4 entries in the first - * half of available hash buckets and v6 in the second. + * half of available hash buckets and v6 in the second. We need at least two + * entries in our L2T for this scheme to work. */ enum { - L2T_SZ_HALF = L2T_SIZE / 2, - L2T_HASH_MASK = L2T_SZ_HALF - 1 + L2T_MIN_HASH_BUCKETS = 2, }; -static inline unsigned int arp_hash(const u32 *key, int ifindex) +static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key, + int ifindex) { - return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; + unsigned int l2t_size_half = d->l2t_size / 2; + + return jhash_2words(*key, ifindex, 0) % l2t_size_half; } -static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key, + int ifindex) { + unsigned int l2t_size_half = d->l2t_size / 2; u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; - return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); + return (l2t_size_half + + (jhash_2words(xor, ifindex, 0) % l2t_size_half)); } -static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +static unsigned int addr_hash(struct l2t_data *d, const u32 *addr, + int addr_len, int ifindex) { - return addr_len == 4 ? arp_hash(addr, ifindex) : - ipv6_hash(addr, ifindex); + return addr_len == 4 ? arp_hash(d, addr, ifindex) : + ipv6_hash(d, addr, ifindex); } /* @@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n) */ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) { + struct l2t_data *d = adap->l2t; + unsigned int l2t_idx = e->idx + d->l2t_start; struct sk_buff *skb; struct cpl_l2t_write_req *req; @@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, - e->idx | (sync ? F_SYNC_WR : 0) | + l2t_idx | (sync ? SYNC_WR_F : 0) | TID_QID_V(adap->sge.fw_evtq.abs_id))); req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); - req->l2t_idx = htons(e->idx); + req->l2t_idx = htons(l2t_idx); req->vlan = htons(e->vlan); if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); @@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e) */ void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) { + struct l2t_data *d = adap->l2t; unsigned int tid = GET_TID(rpl); - unsigned int idx = tid & (L2T_SIZE - 1); + unsigned int l2t_idx = tid % L2T_SIZE; if (unlikely(rpl->status != CPL_ERR_NONE)) { dev_err(adap->pdev_dev, "Unexpected L2T_WRITE_RPL status %u for entry %u\n", - rpl->status, idx); + rpl->status, l2t_idx); return; } - if (tid & F_SYNC_WR) { - struct l2t_entry *e = &adap->l2t->l2tab[idx]; + if (tid & SYNC_WR_F) { + struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; spin_lock(&e->lock); if (e->state != L2T_STATE_SWITCHING) { @@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d) return NULL; /* there's definitely a free entry */ - for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) if (atomic_read(&e->refcnt) == 0) goto found; @@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *)neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); if (neigh->dev->flags & IFF_LOOPBACK) lport = netdev2pinfo(physdev)->tx_chan + 4; @@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; - int hash = addr_hash(addr, addr_len, ifidx); + int hash = addr_hash(d, addr, addr_len, ifidx); read_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) @@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, return write_l2e(adap, e, 0); } -struct l2t_data *t4_init_l2t(void) +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) { + unsigned int l2t_size; int i; struct l2t_data *d; - d = t4_alloc_mem(sizeof(*d)); + if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) + return NULL; + l2t_size = l2t_end - l2t_start + 1; + if (l2t_size < L2T_MIN_HASH_BUCKETS) + return NULL; + + d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); if (!d) return NULL; + d->l2t_start = l2t_start; + d->l2t_size = l2t_size; + d->rover = d->l2tab; - atomic_set(&d->nfree, L2T_SIZE); + atomic_set(&d->nfree, l2t_size); rwlock_init(&d->lock); - for (i = 0; i < L2T_SIZE; ++i) { + for (i = 0; i < d->l2t_size; ++i) { d->l2tab[i].idx = i; d->l2tab[i].state = L2T_STATE_UNUSED; spin_lock_init(&d->l2tab[i].lock); @@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void) static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) { - struct l2t_entry *l2tab = seq->private; + struct l2t_data *d = seq->private; - return pos >= L2T_SIZE ? NULL : &l2tab[pos]; + return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; } static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) @@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) "Ethernet address VLAN/P LP State Users Port\n"); else { char ip[60]; + struct l2t_data *d = seq->private; struct l2t_entry *e = v; spin_lock_bh(&e->lock); @@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v) else sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", - e->idx, ip, e->dmac, + e->idx + d->l2t_start, ip, e->dmac, e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, l2e_state(e), atomic_read(&e->refcnt), e->neigh ? e->neigh->dev->name : ""); @@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file) struct adapter *adap = inode->i_private; struct seq_file *seq = file->private_data; - seq->private = adap->l2t->l2tab; + seq->private = adap->l2t; } return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h index a30126ce90cb..b38dc526aad5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h @@ -39,6 +39,20 @@ #include <linux/if_ether.h> #include <linux/atomic.h> +enum { L2T_SIZE = 4096 }; /* # of L2T entries */ + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + L2T_STATE_NOARP, /* Netdev down or removed*/ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + struct adapter; struct l2t_data; struct neighbour; @@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl; */ struct l2t_entry { u16 state; /* entry state */ - u16 idx; /* entry index */ + u16 idx; /* entry index within in-memory table */ u32 addr[4]; /* next hop IP or IPv6 address */ int ifindex; /* neighbor's net_device's ifindex */ struct neighbour *neigh; /* associated neighbour */ @@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, u8 port, u8 *eth_addr); -struct l2t_data *t4_init_l2t(void); +struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end); void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); extern const struct file_operations t4_l2t_fops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 942db078f33a..d4248d74f560 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, */ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 wr_mid; + u32 wr_mid, ctrl0; u64 cntrl, *end; int qidx, credits; unsigned int flits, ndesc; @@ -1274,9 +1274,15 @@ out_free: dev_kfree_skb_any(skb); #endif /* CONFIG_CHELSIO_T4_FCOE */ } - cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | - TXPKT_INTF_V(pi->tx_chan) | - TXPKT_PF_V(adap->pf)); + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | + TXPKT_PF_V(adap->pf); +#ifdef CONFIG_CHELSIO_T4_DCB + if (is_t4(adap->params.chip)) + ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); + else + ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); +#endif + cpl->ctrl0 = htonl(ctrl0); cpl->pack = htons(0); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2b52aae7ec86..800bd489dd75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1345,9 +1345,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5a80, 0x5a9c, 0x5b94, 0x5bfc, 0x5c10, 0x5ec0, - 0x5ec8, 0x5ec8, + 0x5ec8, 0x5ecc, 0x6000, 0x6040, - 0x6058, 0x6154, + 0x6058, 0x615c, 0x7700, 0x7798, 0x77c0, 0x7880, 0x78cc, 0x78fc, @@ -1371,20 +1371,22 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x9f00, 0x9f6c, 0x9f80, 0xa020, 0xd004, 0xd03c, + 0xd100, 0xd118, + 0xd200, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, 0x11000, 0x11014, 0x11048, 0x11110, 0x11118, 0x1117c, - 0x11190, 0x11260, + 0x11190, 0x11264, 0x11300, 0x1130c, - 0x12000, 0x1205c, + 0x12000, 0x1206c, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x19124, 0x19150, 0x191b0, 0x191d0, 0x191e8, - 0x19238, 0x192b8, + 0x19238, 0x192bc, 0x193f8, 0x19474, 0x19490, 0x194cc, 0x194f0, 0x194f8, @@ -1466,7 +1468,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x30200, 0x30318, 0x30400, 0x3052c, 0x30540, 0x3061c, - 0x30800, 0x3088c, + 0x30800, 0x30890, 0x308c0, 0x30908, 0x30910, 0x309b8, 0x30a00, 0x30a04, @@ -1544,7 +1546,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x34200, 0x34318, 0x34400, 0x3452c, 0x34540, 0x3461c, - 0x34800, 0x3488c, + 0x34800, 0x34890, 0x348c0, 0x34908, 0x34910, 0x349b8, 0x34a00, 0x34a04, @@ -3687,6 +3689,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map) return 0; } +static unsigned int t4_use_ldst(struct adapter *adap) +{ + return (adap->flags & FW_OK) || !adap->use_bd; +} + /** * t4_fw_tp_pio_rw - Access TP PIO through LDST * @adap: the adapter @@ -3730,7 +3737,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, */ void t4_read_rss_key(struct adapter *adap, u32 *key) { - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); else t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3760,7 +3767,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) rss_key_addr_cnt = 32; - if (adap->flags & FW_OK) + if (t4_use_ldst(adap)) t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); else t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, @@ -3789,7 +3796,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) { - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, 1); else @@ -3829,7 +3836,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - if (adapter->flags & FW_OK) { + if (t4_use_ldst(adapter)) { t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); } else { @@ -3850,7 +3857,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter) { u32 pfmap; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3868,7 +3875,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) { u32 pfmask; - if (adapter->flags & FW_OK) + if (t4_use_ldst(adapter)) t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); else t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, @@ -3924,43 +3931,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, */ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 8, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 4, - TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 4, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 4, - TP_MIB_TCP_V6IN_ERR_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, 2, - TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_chan_drops, 2, - TP_MIB_OFD_CHN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, 2, - TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); } @@ -3974,16 +3963,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) */ void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) { - /* T6 and later has 2 channels */ - if (adap->params.arch.nchan == NCHAN) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 8, TP_MIB_CPL_IN_REQ_0_A); - } else { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - 2, TP_MIB_CPL_IN_REQ_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, - 2, TP_MIB_CPL_OUT_RSP_0_A); - } + int nchan = adap->params.arch.nchan; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + nchan, TP_MIB_CPL_IN_REQ_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, + nchan, TP_MIB_CPL_OUT_RSP_0_A); + } /** @@ -6294,7 +6280,7 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - if (adap->flags & FW_OK) { + if (t4_use_ldst(adap)) { t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, TP_VLAN_PRI_MAP_A, 1); t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index c8488f430d19..640369df8b3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -47,7 +47,6 @@ enum { TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ - L2T_SIZE = 4096, /* # of L2T entries */ PM_NSTATS = 5, /* # of PM stats */ MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 132cb8fc0bf7..b99144afd4ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -660,6 +660,9 @@ struct cpl_tx_pkt { #define TXPKT_OVLAN_IDX_S 12 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S) +#define TXPKT_T5_OVLAN_IDX_S 12 +#define TXPKT_T5_OVLAN_IDX_V(x) ((x) << TXPKT_T5_OVLAN_IDX_S) + #define TXPKT_INTF_S 16 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index d7ca106927b0..8353a6cbfcc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ + CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ @@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ + + /* T6 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x6001), + CH_PCI_ID_TABLE_FENTRY(0x6002), + CH_PCI_ID_TABLE_FENTRY(0x6003), + CH_PCI_ID_TABLE_FENTRY(0x6004), + CH_PCI_ID_TABLE_FENTRY(0x6005), + CH_PCI_ID_TABLE_FENTRY(0x6006), + CH_PCI_ID_TABLE_FENTRY(0x6007), + CH_PCI_ID_TABLE_FENTRY(0x6009), + CH_PCI_ID_TABLE_FENTRY(0x600d), + CH_PCI_ID_TABLE_FENTRY(0x6010), + CH_PCI_ID_TABLE_FENTRY(0x6011), + CH_PCI_ID_TABLE_FENTRY(0x6014), + CH_PCI_ID_TABLE_FENTRY(0x6015), CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index ad53e5ad2acd..1d5e77a566e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) rspq->unhandled_irqs++; val = CIDXINC_V(work_done) | SEINTARM_V(intr_params); - if (is_t4(rspq->adapter->params.chip)) { + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!rspq->bar2_addr)) { t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V((u32)rspq->cntxt_id)); @@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter) } val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params); - if (is_t4(adapter->params.chip)) + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!intrq->bar2_addr)) { t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, val | INGRESSQID_V(intrq->cntxt_id)); - else { + } else { writel(val | INGRESSQID_V(intrq->bar2_qid), intrq->bar2_addr + SGE_UDB_GTS); wmb(); diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f3f1601a76f3..f44a39c40642 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; - ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) + ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; if (rxcoal->use_adaptive_rx_coalesce) ecmd->use_adaptive_rx_coalesce = 1; @@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev, return 0; } +static int enic_coalesce_valid(struct enic *enic, + struct ethtool_coalesce *ec) +{ + u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); + u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_high); + u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, + ec->rx_coalesce_usecs_low); + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -EINVAL; + + if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && + ec->tx_coalesce_usecs) + return -EINVAL; + + if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || + (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) + netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", + coalesce_usecs_max); + + if (ec->rx_coalesce_usecs_high && + (rx_coalesce_usecs_high < + rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) + return -EINVAL; + + return 0; +} + static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { @@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev, u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; + int ret; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; + ret = enic_coalesce_valid(enic, ecmd); + if (ret) + return ret; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); @@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev, rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); - switch (vnic_dev_get_intr_mode(enic->vdev)) { - case VNIC_DEV_INTR_MODE_INTX: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - intr = enic_legacy_io_intr(); - vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSI: - if (tx_coalesce_usecs != rx_coalesce_usecs) - return -EINVAL; - if (ecmd->use_adaptive_rx_coalesce || - ecmd->rx_coalesce_usecs_low || - ecmd->rx_coalesce_usecs_high) - return -EINVAL; - - vnic_intr_coalescing_timer_set(&enic->intr[0], - tx_coalesce_usecs); - break; - case VNIC_DEV_INTR_MODE_MSIX: - if (ecmd->rx_coalesce_usecs_high && - (rx_coalesce_usecs_high < - rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) - return -EINVAL; - + if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], - tx_coalesce_usecs); - } - - rxcoal->use_adaptive_rx_coalesce = - !!ecmd->use_adaptive_rx_coalesce; - if (!rxcoal->use_adaptive_rx_coalesce) - enic_intr_coal_set_rx(enic, rx_coalesce_usecs); - - if (ecmd->rx_coalesce_usecs_high) { - rxcoal->range_end = rx_coalesce_usecs_high; - rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; - rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + - ENIC_AIC_LARGE_PKT_DIFF; + tx_coalesce_usecs); } - break; - default: - break; + enic->tx_coalesce_usecs = tx_coalesce_usecs; + } + rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; + if (!rxcoal->use_adaptive_rx_coalesce) + enic_intr_coal_set_rx(enic, rx_coalesce_usecs); + if (ecmd->rx_coalesce_usecs_high) { + rxcoal->range_end = rx_coalesce_usecs_high; + rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; + rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + + ENIC_AIC_LARGE_PKT_DIFF; } - enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 918a8e42139b..8f646e4e968b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, return 0; } +static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + unsigned int intr = enic_msix_rq_intr(enic, rq->index); + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + u32 timer = cq->tobe_rx_coal_timeval; + + if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { + vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); + cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; + } +} + +static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) +{ + struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; + int index; + u32 timer; + u32 range_start; + u32 traffic; + u64 delta; + ktime_t now = ktime_get(); + + delta = ktime_us_delta(now, cq->prev_ts); + if (delta < ENIC_AIC_TS_BREAK) + return; + cq->prev_ts = now; + + traffic = pkt_size_counter->large_pkt_bytes_cnt + + pkt_size_counter->small_pkt_bytes_cnt; + /* The table takes Mbps + * traffic *= 8 => bits + * traffic *= (10^6 / delta) => bps + * traffic /= 10^6 => Mbps + * + * Combining, traffic *= (8 / delta) + */ + + traffic <<= 3; + traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; + + for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) + if (traffic < mod_table[index].rx_rate) + break; + range_start = (pkt_size_counter->small_pkt_bytes_cnt > + pkt_size_counter->large_pkt_bytes_cnt << 1) ? + rx_coal->small_pkt_range_start : + rx_coal->large_pkt_range_start; + timer = range_start + ((rx_coal->range_end - range_start) * + mod_table[index].range_percent / 100); + /* Damping */ + cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; + + pkt_size_counter->large_pkt_bytes_cnt = 0; + pkt_size_counter->small_pkt_bytes_cnt = 0; +} + static int enic_poll(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; @@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget) if (err) rq_work_done = rq_work_to_do; + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. + */ + enic_calc_int_moderation(enic, &enic->rq[0]); if (rq_work_done < rq_work_to_do) { @@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget) */ napi_complete(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_set_int_moderation(enic, &enic->rq[0]); vnic_intr_unmask(&enic->intr[intr]); } return rq_work_done; } -static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - unsigned int intr = enic_msix_rq_intr(enic, rq->index); - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - u32 timer = cq->tobe_rx_coal_timeval; - - if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { - vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); - cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; - } -} - -static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) -{ - struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; - int index; - u32 timer; - u32 range_start; - u32 traffic; - u64 delta; - ktime_t now = ktime_get(); - - delta = ktime_us_delta(now, cq->prev_ts); - if (delta < ENIC_AIC_TS_BREAK) - return; - cq->prev_ts = now; - - traffic = pkt_size_counter->large_pkt_bytes_cnt + - pkt_size_counter->small_pkt_bytes_cnt; - /* The table takes Mbps - * traffic *= 8 => bits - * traffic *= (10^6 / delta) => bps - * traffic /= 10^6 => Mbps - * - * Combining, traffic *= (8 / delta) - */ - - traffic <<= 3; - traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta; - - for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++) - if (traffic < mod_table[index].rx_rate) - break; - range_start = (pkt_size_counter->small_pkt_bytes_cnt > - pkt_size_counter->large_pkt_bytes_cnt << 1) ? - rx_coal->small_pkt_range_start : - rx_coal->large_pkt_range_start; - timer = range_start + ((rx_coal->range_end - range_start) * - mod_table[index].range_percent / 100); - /* Damping */ - cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; - - pkt_size_counter->large_pkt_bytes_cnt = 0; - pkt_size_counter->small_pkt_bytes_cnt = 0; -} - #ifdef CONFIG_RFS_ACCEL static void enic_free_rx_cpu_rmap(struct enic *enic) { @@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) if (err) work_done = work_to_do; if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - /* Call the function which refreshes - * the intr coalescing timer value based on - * the traffic. This is supported only in - * the case of MSI-x mode + /* Call the function which refreshes the intr coalescing timer + * value based on the traffic. */ enic_calc_int_moderation(enic, &enic->rq[rq]); @@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic) int index = -1; struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; - /* If intr mode is not MSIX, do not do adaptive coalescing */ - if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) { - netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing"); - return; - } - /* 1. Read the link speed from fw * 2. Pick the default range for the speed * 3. Update it in enic->rx_coalesce_setting diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index d1017509b08a..f7b42483921c 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -604,19 +604,7 @@ static struct pci_driver pci_driver = { .probe = ec_bhf_probe, .remove = ec_bhf_remove, }; - -static int __init ec_bhf_init(void) -{ - return pci_register_driver(&pci_driver); -} - -static void __exit ec_bhf_exit(void) -{ - pci_unregister_driver(&pci_driver); -} - -module_init(ec_bhf_init); -module_exit(ec_bhf_exit); +module_pci_driver(pci_driver); module_param(polling_frequency, long, S_IRUGO); MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d12b41b3b19..cb5777bb7429 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -37,7 +37,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "10.6.0.2" +#define DRV_VER "10.6.0.3" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 9eac3227d2ca..ecad46f79653 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb) return wrb->payload.embedded_payload; } -static void be_mcc_notify(struct be_adapter *adapter) +static int be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; if (be_check_error(adapter, BE_ERROR_ANY)) - return; + return -EIO; val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); + + return 0; } /* To check if valid bit is set, check the entire word as we don't know @@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter, return; } + if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE && + subsystem == CMD_SUBSYSTEM_LOWLEVEL) { + complete(&adapter->et_cmd_compl); + return; + } + if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || opcode == OPCODE_COMMON_WRITE_OBJECT) && subsystem == CMD_SUBSYSTEM_COMMON) { @@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto out; status = be_mcc_wait_compl(adapter); if (status == -EIO) @@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) else hdr->version = 2; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); req->cmd_params.params.reset_stats = 0; - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; + adapter->stats_cmd_sent = true; err: @@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); - be_mcc_notify(adapter); - + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, cpu_to_le32(set_eqd[i].delay_multiplier); } - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -1953,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } - status = be_mcc_notify_wait(adapter); + status = be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + sizeof(struct lancer_cmd_req_write_object))); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, @@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; - goto err; + goto err_unlock; } req = embedded_payload(wrb); @@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, req->loopback_type = loopback_type; req->loopback_state = enable; - status = be_mcc_notify_wait(adapter); -err: + status = be_mcc_notify(adapter); + if (status) + goto err_unlock; + + spin_unlock_bh(&adapter->mcc_lock); + + if (!wait_for_completion_timeout(&adapter->et_cmd_compl, + msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) + status = -ETIMEDOUT; + + return status; + +err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); - be_mcc_notify(adapter); + status = be_mcc_notify(adapter); + if (status) + goto err; spin_unlock_bh(&adapter->mcc_lock); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 2716e6f30d9a..a4479f7488d3 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1495,6 +1495,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 { #define BE_PME_D3COLD_CAP 0x80 /********************** LoopBack test *********************/ +#define SET_LB_MODE_TIMEOUT 12000 + struct be_cmd_req_loopback_test { struct be_cmd_req_hdr hdr; u32 loopback_type; @@ -1758,6 +1760,7 @@ struct be_cmd_req_set_mac_list { /*********************** HSW Config ***********************/ #define PORT_FWD_TYPE_VEPA 0x3 #define PORT_FWD_TYPE_VEB 0x2 +#define PORT_FWD_TYPE_PASSTHRU 0x1 #define ENABLE_MAC_SPOOFCHK 0x2 #define DISABLE_MAC_SPOOFCHK 0x3 diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b2476dbfd103..d20ff054c1f7 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -847,10 +847,21 @@ err: static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { - be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); + int ret; + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + loopback_type, 1); + if (ret) + return ret; + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); - be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); + + ret = be_cmd_set_loopback(adapter, adapter->hba_port_num, + BE_NO_LOOPBACK, 1); + if (ret) + return ret; + return *status; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f642426308c..c996dd76f546 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1254,7 +1254,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter, if (is_udp_pkt((*skb))) { struct udphdr *udp = udp_hdr((*skb)); - switch (udp->dest) { + switch (ntohs(udp->dest)) { case DHCP_CLIENT_PORT: os2bmc = is_dhcp_client_filt_enabled(adapter); goto done; @@ -3529,15 +3529,15 @@ err: static int be_setup_wol(struct be_adapter *adapter, bool enable) { + struct device *dev = &adapter->pdev->dev; struct be_dma_mem cmd; - int status = 0; u8 mac[ETH_ALEN]; + int status; eth_zero_addr(mac); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3546,24 +3546,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Could not enable Wake-on-lan\n"); - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, - cmd.dma); - return status; + dev_err(dev, "Could not enable Wake-on-lan\n"); + goto err; } - status = be_cmd_enable_magic_wol(adapter, - adapter->netdev->dev_addr, - &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 1); - pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 0); - pci_enable_wake(adapter->pdev, PCI_D3cold, 0); + ether_addr_copy(mac, adapter->netdev->dev_addr); } - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); +err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -4924,7 +4918,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, { if (!fhdr) { dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return -1; + return false; } /* First letter of the build version is used to identify @@ -5079,9 +5073,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int status = 0; u8 hsw_mode; - if (!sriov_enabled(adapter)) - return 0; - /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { hsw_mode = PORT_FWD_TYPE_VEB; @@ -5091,6 +5082,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, NULL); if (status) return 0; + + if (hsw_mode == PORT_FWD_TYPE_PASSTHRU) + return 0; } return ndo_dflt_bridge_getlink(skb, pid, seq, dev, @@ -5813,7 +5807,6 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); status = be_resume(adapter); @@ -5893,7 +5886,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index a15663ad7f5e..7a8386a8244d 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) break; default: - /* - * register RXMTRL must be set in order to do V1 packets, - * therefore it is not possible to time stamp both V1 Sync and - * Delay_Req messages and hardware does not support - * timestamping all packets => return error - */ fep->hwts_rx_en = 1; config.rx_filter = HWTSTAMP_FILTER_ALL; break; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index ff875028fdff..648ca85c5859 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -109,15 +109,15 @@ #define TX_TIMEOUT (1*HZ) -const char gfar_driver_version[] = "1.3"; +const char gfar_driver_version[] = "2.0"; static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); -static struct sk_buff *gfar_new_skb(struct net_device *dev, - dma_addr_t *bufaddr); +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, bdp->lstatus = cpu_to_be32(lstatus); } -static int gfar_init_bds(struct net_device *ndev) +static void gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; - struct rxbd8 *rxbdp; u32 __iomem *rfbptr; int i, j; - dma_addr_t bufaddr; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev) rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->cur_rx = rx_queue->rx_bd_base; - rx_queue->skb_currx = 0; - rxbdp = rx_queue->rx_bd_base; - for (j = 0; j < rx_queue->rx_ring_size; j++) { - struct sk_buff *skb = rx_queue->rx_skbuff[j]; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0; - if (skb) { - bufaddr = be32_to_cpu(rxbdp->bufPtr); - } else { - skb = gfar_new_skb(ndev, &bufaddr); - if (!skb) { - netdev_err(ndev, "Can't allocate RX buffers\n"); - return -ENOMEM; - } - rx_queue->rx_skbuff[j] = skb; - } - - gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); - rxbdp++; - } + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue)); rx_queue->rfbptr = rfbptr; rfbptr += 2; } - - return 0; } static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; - int i, j, k; + int i, j; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; @@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; - rx_queue->dev = ndev; + rx_queue->ndev = ndev; + rx_queue->dev = dev; addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } @@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev) if (!tx_queue->tx_skbuff) goto cleanup; - for (k = 0; k < tx_queue->tx_ring_size; k++) - tx_queue->tx_skbuff[k] = NULL; + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = - kmalloc_array(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_skbuff), - GFP_KERNEL); - if (!rx_queue->rx_skbuff) + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) goto cleanup; - - for (j = 0; j < rx_queue->rx_ring_size; j++) - rx_queue->rx_skbuff[j] = NULL; } - if (gfar_init_bds(ndev)) - goto cleanup; + gfar_init_bds(ndev); return 0; @@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv) } } -static void gfar_rx_buff_size_config(struct gfar_private *priv) +static void gfar_rx_offload_en(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; - /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0; @@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv) if (priv->hwts_rx_en) priv->uses_rxfcb = 1; - - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - priv->rx_buffer_size = frame_size; } static void gfar_mac_rx_config(struct gfar_private *priv) @@ -609,9 +576,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv) if (!priv->rx_queue[i]) return -ENOMEM; - priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = priv->ndev; + priv->rx_queue[i]->ndev = priv->ndev; } return 0; } @@ -1203,12 +1169,11 @@ void gfar_mac_reset(struct gfar_private *priv) udelay(3); - /* Compute rx_buff_size based on config flags */ - gfar_rx_buff_size_config(priv); + gfar_rx_offload_en(priv); /* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, priv->rx_buffer_size); - gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE); /* Initialize the Minimum Frame Length Register */ gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); @@ -1216,12 +1181,11 @@ void gfar_mac_reset(struct gfar_private *priv) /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS; - /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. */ - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) + if (gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; gfar_write(®s->maccfg2, tempval); @@ -1432,8 +1396,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->needed_headroom = GMAC_FCB_LEN; - priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; @@ -1639,10 +1601,7 @@ static int gfar_restore(struct device *dev) return 0; } - if (gfar_init_bds(ndev)) { - free_skb_resources(priv); - return -ENOMEM; - } + gfar_init_bds(ndev); gfar_mac_reset(priv); @@ -1933,26 +1892,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { - struct rxbd8 *rxbdp; - struct gfar_private *priv = netdev_priv(rx_queue->dev); int i; - rxbdp = rx_queue->rx_bd_base; + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + if (rx_queue->skb) + dev_kfree_skb(rx_queue->skb); for (i = 0; i < rx_queue->rx_ring_size; i++) { - if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), - priv->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_queue->rx_skbuff[i]); - rx_queue->rx_skbuff[i] = NULL; - } + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_single(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; } - kfree(rx_queue->rx_skbuff); - rx_queue->rx_skbuff = NULL; + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; } /* If there are any tx skbs or rx skbs still around, free them. @@ -1977,7 +1942,7 @@ static void free_skb_resources(struct gfar_private *priv) for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_skbuff) + if (rx_queue->rx_buff) free_skb_rx_queue(rx_queue); } @@ -2535,7 +2500,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) struct gfar_private *priv = netdev_priv(dev); int frame_size = new_mtu + ETH_HLEN; - if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } @@ -2589,15 +2554,6 @@ static void gfar_timeout(struct net_device *dev) schedule_work(&priv->reset_task); } -static void gfar_align_skb(struct sk_buff *skb) -{ - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); -} - /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@ -2704,49 +2660,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) netdev_tx_completed_queue(txq, howmany, bytes_sent); } -static struct sk_buff *gfar_alloc_skb(struct net_device *dev) +static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; + struct page *page; + dma_addr_t addr; - skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (!skb) - return NULL; + page = dev_alloc_page(); + if (unlikely(!page)) + return false; - gfar_align_skb(skb); + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page); - return skb; + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; } -static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) +static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; - dma_addr_t addr; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats; - skb = gfar_alloc_skb(dev); - if (!skb) - return NULL; + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); +} - addr = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, addr))) { - dev_kfree_skb_any(skb); - return NULL; +static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) +{ + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } } - *bufaddr = addr; - return skb; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; } -static inline void count_errors(unsigned short status, struct net_device *dev) +static void count_errors(u32 lstatus, struct net_device *ndev) { - struct gfar_private *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors matter */ - if (status & RXBD_TRUNCATED) { + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { stats->rx_length_errors++; atomic64_inc(&estats->rx_trunc); @@ -2754,25 +2746,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev) return; } /* Count the errors, if there were any */ - if (status & (RXBD_LARGE | RXBD_SHORT)) { + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; - if (status & RXBD_LARGE) + if (lstatus & BD_LFLAG(RXBD_LARGE)) atomic64_inc(&estats->rx_large); else atomic64_inc(&estats->rx_short); } - if (status & RXBD_NONOCTET) { + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { stats->rx_frame_errors++; atomic64_inc(&estats->rx_nonoctet); } - if (status & RXBD_CRCERR) { + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } - if (status & RXBD_OVERRUN) { + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { atomic64_inc(&estats->rx_overrun); - stats->rx_crc_errors++; + stats->rx_over_errors++; } } @@ -2823,6 +2815,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) return IRQ_HANDLED; } +static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) +{ + unsigned int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + /* Remove the FCS from the packet length */ + if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + size -= ETH_FCS_LEN; + + if (likely(first)) + skb_put(skb, size); + else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ + if (unlikely(page_count(page) != 1)) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + atomic_inc(&page->_count); + + return true; +} + +static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) +{ + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); +} + +static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) +{ + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; +} + static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@ -2837,10 +2916,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) } /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ -static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) +static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) { - struct gfar_private *priv = netdev_priv(dev); + struct gfar_private *priv = netdev_priv(ndev); struct rxfcb *fcb = NULL; /* fcb is at the beginning if exists */ @@ -2849,10 +2927,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Remove the FCB from the skb * Remove the padded bytes, if there are any */ - if (amount_pull) { - skb_record_rx_queue(skb, fcb->rq); - skb_pull(skb, amount_pull); - } + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN); /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { @@ -2866,24 +2942,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (priv->padding) skb_pull(skb, priv->padding); - if (dev->features & NETIF_F_RXCSUM) + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && be16_to_cpu(fcb->flags) & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(fcb->vlctl)); - - /* Send the packet up the stack */ - napi_gro_receive(napi, skb); - } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@ -2892,91 +2964,88 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { - struct net_device *dev = rx_queue->dev; - struct rxbd8 *bdp, *base; - struct sk_buff *skb; - int pkt_len; - int amount_pull; - int howmany = 0; - struct gfar_private *priv = netdev_priv(dev); + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0; /* Get the first full descriptor */ - bdp = rx_queue->cur_rx; - base = rx_queue->rx_bd_base; + i = rx_queue->next_to_clean; - amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + while (rx_work_limit--) { + u32 lstatus; - while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { - struct sk_buff *newskb; - dma_addr_t bufaddr; + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + } + + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break; + /* order rx buffer descriptor reads */ rmb(); - /* Add another skb for the future */ - newskb = gfar_new_skb(dev, &bufaddr); + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break; - skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + cleaned_cnt++; + howmany++; - dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - priv->rx_buffer_size, DMA_FROM_DEVICE); - - if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && - be16_to_cpu(bdp->length) > priv->rx_buffer_size)) - bdp->status = cpu_to_be16(RXBD_LARGE); - - /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || - !(be16_to_cpu(bdp->status) & RXBD_LAST) || - be16_to_cpu(bdp->status) & RXBD_ERR)) { - count_errors(be16_to_cpu(bdp->status), dev); - - if (unlikely(!newskb)) { - newskb = skb; - bufaddr = be32_to_cpu(bdp->bufPtr); - } else if (skb) - dev_kfree_skb(skb); - } else { - /* Increment the number of packets */ - rx_queue->stats.rx_packets++; - howmany++; - - if (likely(skb)) { - pkt_len = be16_to_cpu(bdp->length) - - ETH_FCS_LEN; - /* Remove the FCS from the packet length */ - skb_put(skb, pkt_len); - rx_queue->stats.rx_bytes += pkt_len; - skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi_rx); + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0; - } else { - netif_warn(priv, rx_err, dev, "Missing skb!\n"); - rx_queue->stats.rx_dropped++; - atomic64_inc(&priv->extra_stats.rx_skbmissing); - } + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev); + + /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; } - rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len; - /* Setup the new bdp */ - gfar_init_rxbdp(rx_queue, bdp, bufaddr); + skb_record_rx_queue(skb, rx_queue->qindex); - /* Update Last Free RxBD pointer for LFC */ - if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) - gfar_write(rx_queue->rfbptr, (u32)bdp); + gfar_process_frame(ndev, skb); - /* Update to the next pointer */ - bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb); - /* update to point at the next skb */ - rx_queue->skb_currx = (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + skb = NULL; } - /* Update the current rxbd pointer to be the next one */ - rx_queue->cur_rx = bdp; + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + bdp = gfar_rxbd_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, (u32)bdp); + } return howmany; } @@ -3552,14 +3621,8 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - bdp = rx_queue->cur_rx; - /* skip to previous bd */ - bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, - rx_queue->rx_bd_base, - rx_queue->rx_ring_size); - - if (rx_queue->rfbptr) - gfar_write(rx_queue->rfbptr, (u32)bdp); + bdp = gfar_rxbd_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, (u32)bdp); } priv->tx_actual_en = 1; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index daa1d37de642..44021243c187 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -71,11 +71,6 @@ struct ethtool_rx_list { /* Number of bytes to align the rx bufs to */ #define RXBUF_ALIGNMENT 64 -/* The number of bytes which composes a unit for the purpose of - * allocating data buffers. ie-for any given MTU, the data buffer - * will be the next highest multiple of 512 bytes. */ -#define INCREMENTAL_BUFFER_SIZE 512 - #define PHY_INIT_TIMEOUT 100000 #define DRV_NAME "gfar-enet" @@ -92,6 +87,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_TX_RING_SIZE 256 #define DEFAULT_RX_RING_SIZE 256 +#define GFAR_RX_BUFF_ALLOC 16 + #define GFAR_RX_MAX_RING_SIZE 256 #define GFAR_TX_MAX_RING_SIZE 256 @@ -103,11 +100,14 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define DEFAULT_RX_BUFFER_SIZE 1536 +#define GFAR_RXB_SIZE 1536 +#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define GFAR_RXB_TRUESIZE 2048 + #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) -#define JUMBO_BUFFER_SIZE 9728 -#define JUMBO_FRAME_SIZE 9600 +#define GFAR_JUMBO_FRAME_SIZE 9600 #define DEFAULT_FIFO_TX_THR 0x100 #define DEFAULT_FIFO_TX_STARVE 0x40 @@ -640,6 +640,7 @@ struct rmon_mib }; struct gfar_extra_stats { + atomic64_t rx_alloc_err; atomic64_t rx_large; atomic64_t rx_short; atomic64_t rx_nonoctet; @@ -651,7 +652,6 @@ struct gfar_extra_stats { atomic64_t eberr; atomic64_t tx_babt; atomic64_t tx_underrun; - atomic64_t rx_skbmissing; atomic64_t tx_timeout; }; @@ -1012,34 +1012,42 @@ struct rx_q_stats { unsigned long rx_dropped; }; +struct gfar_rx_buff { + dma_addr_t dma; + struct page *page; + unsigned int page_offset; +}; + /** * struct gfar_priv_rx_q - per rx queue structure - * @rx_skbuff: skb pointers - * @skb_currx: currently use skb pointer + * @rx_buff: Array of buffer info metadata structs * @rx_bd_base: First rx buffer descriptor - * @cur_rx: Next free rx ring entry + * @next_to_use: index of the next buffer to be alloc'd + * @next_to_clean: index of the next buffer to be cleaned * @qindex: index of this queue - * @dev: back pointer to the dev structure + * @ndev: back pointer to net_device * @rx_ring_size: Rx ring size * @rxcoalescing: enable/disable rx-coalescing * @rxic: receive interrupt coalescing vlaue */ struct gfar_priv_rx_q { - struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES); - dma_addr_t rx_bd_dma_base; + struct gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES); struct rxbd8 *rx_bd_base; - struct rxbd8 *cur_rx; - struct net_device *dev; - struct gfar_priv_grp *grp; + struct net_device *ndev; + struct device *dev; + u16 rx_ring_size; + u16 qindex; + struct gfar_priv_grp *grp; + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + struct sk_buff *skb; struct rx_q_stats stats; - u16 skb_currx; - u16 qindex; - unsigned int rx_ring_size; - /* RX Coalescing values */ + u32 __iomem *rfbptr; unsigned char rxcoalescing; unsigned long rxic; - u32 __iomem *rfbptr; + dma_addr_t rx_bd_dma_base; }; enum gfar_irqinfo_id { @@ -1109,7 +1117,6 @@ struct gfar_private { struct device *dev; struct net_device *ndev; enum gfar_errata errata; - unsigned int rx_buffer_size; u16 uses_rxfcb; u16 padding; @@ -1295,6 +1302,23 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp) bdp->lstatus = cpu_to_be32(lstatus); } +static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq) +{ + if (rxq->next_to_clean > rxq->next_to_use) + return rxq->next_to_clean - rxq->next_to_use - 1; + + return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1; +} + +static inline struct rxbd8 *gfar_rxbd_lastfree(struct gfar_priv_rx_q *rxq) +{ + int i; + + i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1; + + return &rxq->rx_bd_base[i]; +} + irqreturn_t gfar_receive(int irq, void *dev_id); int startup_gfar(struct net_device *dev); void stop_gfar(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index fda12fb32ec7..3020aaabf0e4 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); static const char stat_gstrings[][ETH_GSTRING_LEN] = { + /* extra stats */ + "rx-allocation-errors", "rx-large-frame-errors", "rx-short-frame-errors", "rx-non-octet-errors", @@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = { "ethernet-bus-error", "tx-babbling-errors", "tx-underrun-errors", - "rx-skb-missing-errors", "tx-timeout-errors", + /* rmon stats */ "tx-rx-64-frames", "tx-rx-65-127-frames", "tx-rx-128-255-frames", diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index d49bee38cd31..cc2d8b4b18e3 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = { .remove = hip04_remove, .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .of_match_table = hip04_mac_match, }, }; diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c index b3bac25db99c..fca0a5be1f0f 100644 --- a/drivers/net/ethernet/hisilicon/hip04_mdio.c +++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c @@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = { .remove = hip04_mdio_remove, .driver = { .name = "hip04-mdio", - .owner = THIS_MODULE, .of_match_table = hip04_mdio_match, }, }; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 29bbb628d712..7af870a3c549 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0; module_param(rx_flush, uint, 0644); MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use"); +static bool old_large_send __read_mostly; +module_param(old_large_send, bool, S_IRUGO); +MODULE_PARM_DESC(old_large_send, + "Use old large send method on firmware that supports the new method"); + struct ibmveth_stat { char name[ETH_GSTRING_LEN]; int offset; @@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = { { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) }, { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) }, { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) }, - { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) } + { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }, + { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) } }; /* simple methods of getting data from the current rxq entry */ @@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) return rc1 ? rc1 : rc2; } +static int ibmveth_set_tso(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + unsigned long set_attr, clr_attr, ret_attr; + long ret1, ret2; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + else + clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; + + ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret2 != H_SUCCESS) { + netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", + data, ret2); + + h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + + if (data == 1) + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + rc1 = -EIO; + + } else { + adapter->fw_large_send_support = data; + adapter->large_send = data; + } + } else { + /* Older firmware version of large send offload does not + * support tcp6/ipv6 + */ + if (data == 1) { + dev->features &= ~NETIF_F_TSO6; + netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + } + adapter->large_send = data; + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + static int ibmveth_set_features(struct net_device *dev, netdev_features_t features) { struct ibmveth_adapter *adapter = netdev_priv(dev); int rx_csum = !!(features & NETIF_F_RXCSUM); - int rc; - netdev_features_t changed = features ^ dev->features; - - if (features & NETIF_F_TSO & changed) - netdev_info(dev, "TSO feature requires all partitions to have updated driver"); + int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6)); + int rc1 = 0, rc2 = 0; - if (rx_csum == adapter->rx_csum) - return 0; + if (rx_csum != adapter->rx_csum) { + rc1 = ibmveth_set_csum_offload(dev, rx_csum); + if (rc1 && !adapter->rx_csum) + dev->features = + features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + } - rc = ibmveth_set_csum_offload(dev, rx_csum); - if (rc && !adapter->rx_csum) - dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + if (large_send != adapter->large_send) { + rc2 = ibmveth_set_tso(dev, large_send); + if (rc2 && !adapter->large_send) + dev->features = + features & ~(NETIF_F_TSO | NETIF_F_TSO6); + } - return rc; + return rc1 ? rc1 : rc2; } static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) static int ibmveth_send(struct ibmveth_adapter *adapter, - union ibmveth_buf_desc *descs) + union ibmveth_buf_desc *descs, unsigned long mss) { unsigned long correlator; unsigned int retry_count; @@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter, descs[0].desc, descs[1].desc, descs[2].desc, descs[3].desc, descs[4].desc, descs[5].desc, - correlator, &correlator); + correlator, &correlator, mss, + adapter->fw_large_send_support); } while ((ret == H_BUSY) && (retry_count--)); if (ret != H_SUCCESS && ret != H_DROPPED) { @@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, int last, i; int force_bounce = 0; dma_addr_t dma_addr; + unsigned long mss = 0; /* * veth handles a maximum of 6 segments including the header, so @@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, desc_flags = IBMVETH_BUF_VALID; + if (skb_is_gso(skb) && adapter->fw_large_send_support) + desc_flags |= IBMVETH_BUF_LRG_SND; + if (skb->ip_summed == CHECKSUM_PARTIAL) { unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; @@ -1007,7 +1084,7 @@ retry_bounce: descs[0].fields.flags_len = desc_flags | skb->len; descs[0].fields.address = adapter->bounce_buffer_dma; - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, 0)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1041,16 +1118,23 @@ retry_bounce: descs[i+1].fields.address = dma_addr; } - if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) { - /* Put -1 in the IP checksum to tell phyp it - * is a largesend packet and put the mss in the TCP checksum. - */ - ip_hdr(skb)->check = 0xffff; - tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size); - adapter->tx_large_packets++; + if (skb_is_gso(skb)) { + if (adapter->fw_large_send_support) { + mss = (unsigned long)skb_shinfo(skb)->gso_size; + adapter->tx_large_packets++; + } else if (!skb_is_gso_v6(skb)) { + /* Put -1 in the IP checksum to tell phyp it + * is a largesend packet. Put the mss in + * the TCP checksum. + */ + ip_hdr(skb)->check = 0xffff; + tcp_hdr(skb)->check = + cpu_to_be16(skb_shinfo(skb)->gso_size); + adapter->tx_large_packets++; + } } - if (ibmveth_send(adapter, descs)) { + if (ibmveth_send(adapter, descs, mss)) { adapter->tx_send_failed++; netdev->stats.tx_dropped++; } else { @@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; + long ret; + unsigned long ret_attr; dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n", dev->unit_address); @@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) SET_NETDEV_DEV(netdev, &dev->dev); netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= netdev->hw_features; - /* TSO is disabled by default */ - netdev->hw_features |= NETIF_F_TSO; + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + /* If running older firmware, TSO should not be enabled by default */ + if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && + !old_large_send) { + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= netdev->hw_features; + } else { + netdev->hw_features |= NETIF_F_TSO; + } memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 41dedb1fb2ae..4eade67fe30c 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -40,6 +40,8 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL +#define IBMVETH_ILLAN_LRG_SR_ENABLED 0x0000000000010000UL +#define IBMVETH_ILLAN_LRG_SND_SUPPORT 0x0000000000008000UL #define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL #define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL #define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL @@ -59,13 +61,20 @@ static inline long h_send_logical_lan(unsigned long unit_address, unsigned long desc1, unsigned long desc2, unsigned long desc3, unsigned long desc4, unsigned long desc5, unsigned long desc6, - unsigned long corellator_in, unsigned long *corellator_out) + unsigned long corellator_in, unsigned long *corellator_out, + unsigned long mss, unsigned long large_send_support) { long rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; - rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1, - desc2, desc3, desc4, desc5, desc6, corellator_in); + if (large_send_support) + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in, mss); + else + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, + desc1, desc2, desc3, desc4, desc5, desc6, + corellator_in); *corellator_out = retbuf[0]; @@ -147,11 +156,13 @@ struct ibmveth_adapter { struct ibmveth_rx_q rx_queue; int pool_config; int rx_csum; + int large_send; void *bounce_buffer; dma_addr_t bounce_buffer_dma; u64 fw_ipv6_csum_support; u64 fw_ipv4_csum_support; + u64 fw_large_send_support; /* adapter specific stats */ u64 replenish_task_cycles; u64 replenish_no_mem; @@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields { #endif #define IBMVETH_BUF_VALID 0x80000000 #define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_LRG_SND 0x04000000 #define IBMVETH_BUF_NO_CSUM 0x02000000 #define IBMVETH_BUF_CSUM_GOOD 0x01000000 #define IBMVETH_BUF_LEN_MASK 0x00FFFFFF diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 929e3d72a01e..9101f5c00f37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 struct i40e_aq_desc { __le16 flags; @@ -132,12 +132,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -262,7 +257,6 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -274,8 +268,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -509,7 +501,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -532,7 +525,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -1068,6 +1063,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -2064,6 +2060,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) +#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA +#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) +#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 +#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 0bae22da014d..07032229ee60 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2391,7 +2391,7 @@ i40e_aq_erase_nvm_exit: #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 +#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 @@ -2416,6 +2416,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, u32 valid_functions, num_functions; u32 number, logical_id, phys_id; struct i40e_hw_capabilities *p; + u8 major_rev; u32 i = 0; u16 id; @@ -2433,6 +2434,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, number = le32_to_cpu(cap->number); logical_id = le32_to_cpu(cap->logical_id); phys_id = le32_to_cpu(cap->phys_id); + major_rev = cap->major_rev; switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: @@ -2507,9 +2509,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_MFP_MODE_1: - if (number == 1) - p->mfp_mode_1 = true; + case I40E_DEV_FUNC_CAP_FLEX10: + if (major_rev == 1) { + if (number == 1) { + p->flex10_enable = true; + p->flex10_capable = true; + } + } else { + /* Capability revision >= 2 */ + if (number & 1) + p->flex10_enable = true; + if (number & 2) + p->flex10_capable = true; + } + p->flex10_mode = logical_id; + p->flex10_status = phys_id; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) @@ -2557,7 +2571,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, /* Software override ensuring FCoE is disabled if npar or mfp * mode because it is not supported in these modes. */ - if (p->npar_enable || p->mfp_mode_1) + if (p->npar_enable || p->flex10_enable) p->fcoe = false; /* count the enabled ports (aka the "not disabled" ports) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a68c65b17ea..f2075d5b800c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), /* LPI stats */ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), @@ -1465,17 +1467,8 @@ static int i40e_get_ts_info(struct net_device *dev, info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 9b987ccc9e82..b89856a5e313 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -116,6 +116,7 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. * * This function: * 1. Initializes the pd entry @@ -129,12 +130,14 @@ exit: **/ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index) + u32 pd_index, + struct i40e_dma_mem *rsrc_pg) { i40e_status ret_code = 0; struct i40e_hmc_pd_table *pd_table; struct i40e_hmc_pd_entry *pd_entry; struct i40e_dma_mem mem; + struct i40e_dma_mem *page = &mem; u32 sd_idx, rel_pd_idx; u64 *pd_addr; u64 page_desc; @@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; pd_entry = &pd_table->pd_entry[rel_pd_idx]; if (!pd_entry->valid) { - /* allocate a 4K backing page */ - ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp, - I40E_HMC_PAGED_BP_SIZE, - I40E_HMC_PD_BP_BUF_ALIGNMENT); - if (ret_code) - goto exit; + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + /* allocate a 4K backing page */ + ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp, + I40E_HMC_PAGED_BP_SIZE, + I40E_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + pd_entry->rsrc_pg = false; + } - pd_entry->bp.addr = mem; + pd_entry->bp.addr = *page; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ - page_desc = mem.pa | 0x1; + page_desc = page->pa | 0x1; pd_addr = (u64 *)pd_table->pd_page_addr.va; pd_addr += rel_pd_idx; @@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); /* free memory here */ - ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); + if (!pd_entry->rsrc_pg) + ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr); if (ret_code) goto exit; if (!pd_table->ref_cnt) diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 732a02660330..386416bf7267 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 0079ad7bcd0e..d399eaf5aad5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw, /* update the pd table entry */ ret_code = i40e_add_pd_table_entry(hw, info->hmc_info, - i); + i, NULL); if (ret_code) { pd_error = true; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 48a52b35b614..6ce9086e558a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 6 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1123,6 +1123,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->rx_lpi_count, &nsd->rx_lpi_count); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + nsd->fd_sb_status = true; + else + nsd->fd_sb_status = false; + + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && + !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + nsd->fd_atr_status = true; + else + nsd->fd_atr_status = false; + pf->stat_offsets_loaded = true; } @@ -7673,7 +7685,7 @@ static int i40e_sw_init(struct i40e_pf *pf) } /* MFP mode enabled */ - if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); if (i40e_get_npar_bw_setting(pf)) @@ -7981,7 +7993,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } -#ifdef HAVE_BRIDGE_ATTRIBS /** * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured @@ -7995,7 +8006,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * bridge mode enabled. **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + struct nlmsghdr *nlh, + u16 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8066,14 +8078,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, * Return the mode in which the hardware bridge is operating in * i.e VEB or VEPA. **/ -#ifdef HAVE_BRIDGE_FILTER static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, int nlflags) -#else -static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, int nlflags) -#endif /* HAVE_BRIDGE_FILTER */ { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -8097,7 +8104,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, nlflags, 0, 0, filter_mask, NULL); } -#endif /* HAVE_BRIDGE_ATTRIBS */ + +#define I40E_MAX_TUNNEL_HDR_LEN 80 +/** + * i40e_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @netdev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t i40e_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (skb->encapsulation && + (skb_inner_mac_header(skb) - skb_transport_header(skb) > + I40E_MAX_TUNNEL_HDR_LEN)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; +} static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, @@ -8133,10 +8158,9 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, -#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, -#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -9743,7 +9767,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_shared_code(hw); if (err) { - dev_info(&pdev->dev, "init_shared_code failed: %d\n", err); + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", + err); goto err_pf_reset; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 9a4f2bc70cd2..1fe230d2be5d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2616,6 +2616,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 9a5a75b1e2bc..220371ece7c4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -213,7 +213,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -1134,6 +1144,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 23f95cdbdfcc..fdd7f5e3a66b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) **/ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) { - struct i40e_hw *hw = &pf->hw; - u32 reg; - - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); } /** @@ -542,11 +537,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (vf->port_vlan_id) i40e_vsi_add_pvid(vsi, vf->port_vlan_id); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - vf->port_vlan_id, true, false); + vf->port_vlan_id ? vf->port_vlan_id : -1, + true, false); if (!f) dev_info(&pf->pdev->dev, "Could not allocate VF MAC addr\n"); - f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + f = i40e_add_filter(vsi, brdcast, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); if (!f) dev_info(&pf->pdev->dev, @@ -835,6 +832,7 @@ complete_reset: i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); @@ -2023,7 +2021,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } /* delete the temporary mac address */ - i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, + i40e_del_filter(vsi, vf->default_lan_addr.addr, + vf->port_vlan_id ? vf->port_vlan_id : -1, true, false); /* Delete all the filters for this VSI - we're going to kill it @@ -2088,6 +2087,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, goto error_pvid; } + if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT))) + /* duplicate request, so just return success */ + goto error_pvid; + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e715bccfb5d2..d5bd6f066921 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MINOR 0x0004 #define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { @@ -133,12 +133,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -260,7 +255,6 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -272,8 +266,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -507,7 +499,8 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 +#define I40E_AQC_MC_MAG_EN_VALID 0x100 +#define I40E_AQC_ADDR_VALID_MASK 0x1F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -530,7 +523,9 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 +#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 +#define I40E_AQC_WRITE_TYPE_MASK 0xC000 + __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -1066,6 +1061,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 931c88044300..adc6f71f40a8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -62,6 +62,7 @@ struct i40e_hmc_bp { struct i40e_hmc_pd_entry { struct i40e_hmc_bp bp; u32 sd_index; + bool rsrc_pg; bool valid; }; @@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, - u32 pd_index); + u32 pd_index, + struct i40e_dma_mem *rsrc_pg); i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, struct i40e_hmc_info *hmc_info, u32 idx); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 395f32f226c0..0f0e185b5c66 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1841,6 +1841,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index))) writel(i, tx_ring->tail); + else + prefetchw(tx_desc + 1); return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index c463ec41579c..3969c6548af0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -213,7 +213,17 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool mfp_mode_1; + bool flex10_enable; + bool flex10_capable; + u32 flex10_mode; +#define I40E_FLEX10_MODE_UNKNOWN 0x0 +#define I40E_FLEX10_MODE_DCC 0x1 +#define I40E_FLEX10_MODE_DCI 0x2 + + u32 flex10_status; +#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 +#define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -1109,6 +1119,8 @@ struct i40e_hw_port_stats { u64 fd_atr_match; u64 fd_sb_match; u64 fd_atr_tunnel_match; + u32 fd_atr_status; + u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4ab4ebba07a1..c698523923e4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.25" +#define DRV_VERSION "1.3.2" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -406,7 +406,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) /* The ideal configuration... * We have enough vectors to map one per queue. */ - if (q_vectors == (rxr_remaining * 2)) { + if (q_vectors >= (rxr_remaining * 2)) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); @@ -892,8 +892,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev) break; } } + if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) + found = true; } - if (found) { + if (!found) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } @@ -1856,6 +1858,7 @@ static int i40evf_open(struct net_device *netdev) if (err) goto err_req_irq; + i40evf_add_filter(adapter, adapter->hw.mac.addr); i40evf_configure(adapter); err = i40evf_up_complete(adapter); @@ -1996,7 +1999,6 @@ static void i40evf_init_task(struct work_struct *work) struct i40evf_adapter, init_task.work); struct net_device *netdev = adapter->netdev; - struct i40evf_mac_filter *f; struct i40e_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; int i, err, bufsz; @@ -2130,16 +2132,6 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto err_sw_init; - - ether_addr_copy(f->macaddr, adapter->hw.mac.addr); - f->add = true; - adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; - - list_add(&f->list, &adapter->mac_filter_list); - init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = &i40evf_watchdog_timer; adapter->watchdog_timer.data = (unsigned long)adapter; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b0182dd31346..d19256994e5c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (ret_val) return ret_val; - /* reset page to 0 */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); - if (ret_val) - return ret_val; if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_OTHER; @@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) if (port && (hw->dev_spec._82575.media_port != port)) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + igb_check_for_link_82575(hw); } else { - ret_val = igb_check_for_link_82575(hw); + igb_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; } return 0; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index d5673eb90c54..109cad928e71 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2396,10 +2396,6 @@ static int igb_get_ts_info(struct net_device *dev, info->rx_filters |= (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 2f70a9b152bd..fc7729e78f3d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len; - if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + } + if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as-is */ @@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@ -6791,62 +6807,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, } /** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ -static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 6b87d9634614..b1e364d26aa7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on - * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | - IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ec7b2324b77b..f7aeb560a504 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9aa6104e34ea..3e6a9319c718 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) } #endif /* CONFIG_IXGBE_DCA */ + +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 770e21a64388..58434584b16d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 #define IXGBE_RXDADV_SPH 0x8000 +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ IXGBE_RXD_ERR_LE | \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e71cdde9cb01..b2c86f1b8a9f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } +#define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + +static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data @@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb); if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -649,46 +676,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } /** - * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbevf specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - **/ -static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, - struct sk_buff *skb) -{ - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -/** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, } } - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbevf_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif + unsigned int pull_len; - if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag; + if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); /* page is not reserved, we can reuse buffer as is */ @@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, return false; } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + +add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize); /* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 62e48bc0cb23..fe2299ac4f5c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3027,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev) const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; + const char *managed; int phy_mode; - int fixed_phy = 0; int err; /* Our multiqueue support is not complete, so for now, only @@ -3062,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot register fixed PHY\n"); goto err_free_irq; } - fixed_phy = 1; /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. @@ -3086,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); pp->phy_node = phy_node; pp->phy_interface = phy_mode; - pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && - fixed_phy; + + err = of_property_read_string(dn, "managed", &managed); + pp->use_inband_status = (err == 0 && + strcmp(managed, "in-band-status") == 0); pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7a4f20bb7fcb..12c65e1ad6a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -917,7 +917,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (dev->features & NETIF_F_RXHASH) skb_set_hash(gro_skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); skb_record_rx_queue(gro_skb, cq->ring); skb_mark_napi_id(gro_skb, &cq->napi); @@ -963,7 +965,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, be32_to_cpu(cqe->immed_rss_invalid), - PKT_HASH_TYPE_L3); + (ip_summed == CHECKSUM_UNNECESSARY) ? + PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_L3); if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) && diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 12fbfcb44d8a..d76f4257e305 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2907,6 +2907,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, { u64 dev_flags = dev->flags; int err = 0; + int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), + MLX4_MAX_NUM_VF); if (reset_flow) { dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), @@ -2932,6 +2934,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, } if (!(dev->flags & MLX4_FLAG_SRIOV)) { + if (total_vfs > fw_enabled_sriov_vfs) { + mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", + total_vfs, fw_enabled_sriov_vfs); + err = -ENOMEM; + goto disable_sriov; + } mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); err = pci_enable_sriov(pdev, total_vfs); } @@ -3413,20 +3421,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, goto err_disable_pdev; } } - if (total_vfs >= MLX4_MAX_NUM_VF) { + if (total_vfs > MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, - "Requested more VF's (%d) than allowed (%d)\n", - total_vfs, MLX4_MAX_NUM_VF - 1); + "Requested more VF's (%d) than allowed by hw (%d)\n", + total_vfs, MLX4_MAX_NUM_VF); err = -EINVAL; goto err_disable_pdev; } for (i = 0; i < MLX4_MAX_PORTS; i++) { - if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { dev_err(&pdev->dev, - "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", nvfs[i] + nvfs[2], i + 1, - MLX4_MAX_NUM_VF_P_PORT - 1); + MLX4_MAX_NUM_VF_P_PORT); err = -EINVAL; goto err_disable_pdev; } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index c28111749e1f..2d1b94274079 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev) pci_disable_device(pdev); } -/** - * s2io_starter - Entry point for the driver - * Description: This function is the entry point for the driver. It verifies - * the module loadable parameters and initializes PCI configuration space. - */ - -static int __init s2io_starter(void) -{ - return pci_register_driver(&s2io_driver); -} - -/** - * s2io_closer - Cleanup routine for the driver - * Description: This function is the cleanup routine for the driver. It - * unregisters the driver. - */ - -static __exit void s2io_closer(void) -{ - pci_unregister_driver(&s2io_driver); - DBG_PRINT(INIT_DBG, "cleanup done\n"); -} - -module_init(s2io_starter); -module_exit(s2io_closer); +module_pci_driver(s2io_driver); static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, struct tcphdr **tcp, struct RxD_t *rxdp, diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index d89b6ed82c51..6c5997dc8afc 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp); static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); -static int s2io_starter(void); -static void s2io_closer(void); static void s2io_tx_watchdog(struct net_device *dev); static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 2f6cc423ab1d..7dbab3c20db5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2403,7 +2403,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter, qlcnic_free_tx_rings(adapter); return -ENOMEM; } - memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; spin_lock_init(&tx_ring->tx_clean_lock); } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 78849dd4ef8e..779bb58a068e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -223,9 +223,9 @@ static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_ex_rx_desc *rx_desc = NULL; - struct ravb_tx_desc *tx_desc = NULL; - struct ravb_desc *desc = NULL; + struct ravb_ex_rx_desc *rx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; dma_addr_t dma_addr; @@ -439,7 +439,7 @@ static int ravb_tx_free(struct net_device *ndev, int q) struct net_device_stats *stats = &priv->stats[q]; struct ravb_tx_desc *desc; int free_num = 0; - int entry = 0; + int entry; u32 size; for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { @@ -512,8 +512,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) struct sk_buff *skb; dma_addr_t dma_addr; struct timespec64 ts; - u16 pkt_len = 0; u8 desc_status; + u16 pkt_len; int limit; boguscnt = min(boguscnt, *quota); @@ -1277,8 +1277,8 @@ static void ravb_tx_timeout_work(struct work_struct *work) static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_tstamp_skb *ts_skb = NULL; u16 q = skb_get_queue_mapping(skb); + struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; unsigned long flags; u32 dma_addr; diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 2d8578cade03..7b4c3474acfe 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -202,6 +202,7 @@ enum { ROCKER_CTRL_IPV4_MCAST, ROCKER_CTRL_IPV6_MCAST, ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_DFLT_OVS, ROCKER_CTRL_MAX, }; @@ -321,9 +322,21 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, return ntohs(vlan_id); } +static bool rocker_port_is_slave(const struct rocker_port *rocker_port, + const char *kind) +{ + return rocker_port->bridge_dev && + !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind); +} + static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) { - return !!rocker_port->bridge_dev; + return rocker_port_is_slave(rocker_port, "bridge"); +} + +static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) +{ + return rocker_port_is_slave(rocker_port, "openvswitch"); } #define ROCKER_OP_FLAG_REMOVE BIT(0) @@ -1818,6 +1831,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, } static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) @@ -1874,6 +1911,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, macaddr, NULL, NULL); } +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + static int rocker_port_set_learning(struct rocker_port *rocker_port, enum switchdev_trans trans) { @@ -3243,6 +3288,12 @@ static struct rocker_ctrl { .bridge = true, .copy_to_cpu = true, }, + [ROCKER_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, }; static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, @@ -3755,11 +3806,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, break; case BR_STATE_LEARNING: case BR_STATE_FORWARDING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + if (!rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; want[ROCKER_CTRL_IPV4_MCAST] = true; want[ROCKER_CTRL_IPV6_MCAST] = true; if (rocker_port_is_bridged(rocker_port)) want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else if (rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_DFLT_OVS] = true; else want[ROCKER_CTRL_LOCAL_ARP] = true; break; @@ -3983,7 +4037,8 @@ static int rocker_port_open(struct net_device *dev) napi_enable(&rocker_port->napi_tx); napi_enable(&rocker_port->napi_rx); - rocker_port_set_enable(rocker_port, true); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); netif_start_queue(dev); return 0; @@ -4152,6 +4207,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) return 0; } +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + static int rocker_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -4167,11 +4250,23 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, return err ? -EOPNOTSUPP : 0; } +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, .ndo_start_xmit = rocker_port_xmit, .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4179,6 +4274,7 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, }; /******************** @@ -4726,6 +4822,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); size_t rx_len; + u16 rx_flags = 0; if (!skb) return -ENOENT; @@ -4733,6 +4830,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker, rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); rocker_dma_rx_ring_skb_unmap(rocker, attrs); @@ -4740,6 +4839,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker, skb_put(skb, rx_len); skb->protocol = eth_type_trans(skb, rocker_port->dev); + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + rocker_port->dev->stats.rx_packets++; rocker_port->dev->stats.rx_bytes += skb->len; @@ -4877,6 +4979,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) } rocker->ports[port_number] = rocker_port; + switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE); err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0); @@ -5156,6 +5260,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port, rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); rocker_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, untagged_vid, 0); @@ -5176,6 +5281,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); + switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, + false); rocker_port->bridge_dev = NULL; err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, @@ -5190,23 +5297,39 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port) return err; } + +static int rocker_port_ovs_changed(struct rocker_port *rocker_port, + struct net_device *master) +{ + int err; + + rocker_port->bridge_dev = master; + + err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + if (err) + return err; + err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0); + + return err; +} + static int rocker_port_master_changed(struct net_device *dev) { struct rocker_port *rocker_port = netdev_priv(dev); struct net_device *master = netdev_master_upper_dev_get(dev); int err = 0; - /* There are currently three cases handled here: - * 1. Joining a bridge - * 2. Leaving a previously joined bridge - * 3. Other, e.g. being added to or removed from a bond or openvswitch, - * in which case nothing is done - */ - if (master && master->rtnl_link_ops && - !strcmp(master->rtnl_link_ops->kind, "bridge")) - err = rocker_port_bridge_join(rocker_port, master); - else if (rocker_port_is_bridged(rocker_port)) + /* N.B: Do nothing if the type of master is not supported */ + if (master && master->rtnl_link_ops) { + if (!strcmp(master->rtnl_link_ops->kind, "bridge")) + err = rocker_port_bridge_join(rocker_port, master); + else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch")) + err = rocker_port_ovs_changed(rocker_port, master); + } else if (rocker_port_is_bridged(rocker_port)) { err = rocker_port_bridge_leave(rocker_port); + } else if (rocker_port_is_ovsed(rocker_port)) { + err = rocker_port_ovs_changed(rocker_port, NULL); + } return err; } diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index c61fbf968036..12490b2f6504 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -159,6 +159,7 @@ enum { ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, ROCKER_TLV_CMD_PORT_SETTINGS_MAX = @@ -245,6 +246,7 @@ enum { #define ROCKER_RX_FLAGS_TCP BIT(5) #define ROCKER_RX_FLAGS_UDP BIT(6) #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) enum { ROCKER_TLV_TX_UNSPEC, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 605cc8948594..8505d82290cb 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -49,6 +49,12 @@ enum { */ #define HUNT_FILTER_TBL_ROWS 8192 +#define EFX_EF10_FILTER_ID_INVALID 0xffff +struct efx_ef10_dev_addr { + u8 addr[ETH_ALEN]; + u16 id; +}; + struct efx_ef10_filter_table { /* The RX match field masks supported by this fw & hw, in order of priority */ enum efx_filter_match_flags rx_match_flags[ @@ -69,13 +75,14 @@ struct efx_ef10_filter_table { /* Shadow of net_device address lists, guarded by mac_lock */ #define EFX_EF10_FILTER_DEV_UC_MAX 32 #define EFX_EF10_FILTER_DEV_MC_MAX 256 - struct { - u8 addr[ETH_ALEN]; - u16 id; - } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX], - dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; - int dev_uc_count; /* negative for PROMISC */ - int dev_mc_count; /* negative for PROMISC/ALLMULTI */ + struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; + struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; + int dev_uc_count; + int dev_mc_count; +/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ + u16 ucdef_id; + u16 bcast_id; + u16 mcdef_id; }; /* An arbitrary search limit for the software hash table */ @@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx) * First try to enable it, then if we get EPERM, just * ask if it's already enabled */ - rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); + rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL); if (rc == 0) { nic_data->workaround_35388 = true; } else if (rc == -EPERM) { @@ -2197,6 +2204,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel) GFP_KERNEL); } +static void efx_ef10_ev_fini(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); + MCDI_DECLARE_BUF_ERR(outbuf); + struct efx_nic *efx = channel->efx; + size_t outlen; + int rc; + + MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + + if (rc && rc != -EALREADY) + goto fail; + + return; + +fail: + efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, + outbuf, outlen, rc); +} + static int efx_ef10_ev_init(struct efx_channel *channel) { MCDI_DECLARE_BUF(inbuf, @@ -2208,6 +2238,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel) struct efx_ef10_nic_data *nic_data; bool supports_rx_merge; size_t inlen, outlen; + unsigned int enabled, implemented; dma_addr_t dma_addr; int rc; int i; @@ -2248,30 +2279,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel) rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, outbuf, sizeof(outbuf), &outlen); /* IRQ return is ignored */ - return rc; -} - -static void efx_ef10_ev_fini(struct efx_channel *channel) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); - MCDI_DECLARE_BUF_ERR(outbuf); - struct efx_nic *efx = channel->efx; - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); + if (channel->channel || rc) + return rc; - if (rc && rc != -EALREADY) + /* Successfully created event queue on channel 0 */ + rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); + if (rc == -ENOSYS) { + /* GET_WORKAROUNDS was implemented before the bug26807 + * workaround, thus the latter must be unavailable in this fw + */ + nic_data->workaround_26807 = false; + rc = 0; + } else if (rc) { goto fail; + } else { + nic_data->workaround_26807 = + !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); + + if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && + !nic_data->workaround_26807) { + unsigned int flags; + + rc = efx_mcdi_set_workaround(efx, + MC_CMD_WORKAROUND_BUG26807, + true, &flags); + + if (!rc) { + if (flags & + 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { + netif_info(efx, drv, efx->net_dev, + "other functions on NIC have been reset\n"); + /* MC's boot count has incremented */ + ++nic_data->warm_boot_count; + } + nic_data->workaround_26807 = true; + } else if (rc == -EPERM) { + rc = 0; + } + } + } - return; + if (!rc) + return 0; fail: - efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, - outbuf, outlen, rc); + efx_ef10_ev_fini(channel); + return rc; } static void efx_ef10_ev_remove(struct efx_channel *channel) @@ -3225,6 +3278,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx, filter_id, false); } +static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) +{ + return filter_id % HUNT_FILTER_TBL_ROWS; +} + +static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) +{ + return efx_ef10_filter_remove_internal(efx, 1U << priority, + filter_id, true); +} + static int efx_ef10_filter_get_safe(struct efx_nic *efx, enum efx_filter_priority priority, u32 filter_id, struct efx_filter_spec *spec) @@ -3598,6 +3664,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) goto fail; } + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + efx->filter_state = table; init_waitqueue_head(&table->waitq); return 0; @@ -3700,145 +3770,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -/* Caller must hold efx->filter_sem for read if race against - * efx_ef10_filter_table_remove() is possible - */ -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ + if (id != EFX_EF10_FILTER_ID_INVALID) { \ + filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ + WARN_ON(!table->entry[filter_idx].spec); \ + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \ + } +static void efx_ef10_filter_mark_old(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; - struct net_device *net_dev = efx->net_dev; - struct efx_filter_spec spec; - bool remove_failed = false; - struct netdev_hw_addr *uc; - struct netdev_hw_addr *mc; - unsigned int filter_idx; - int i, n, rc; - - if (!efx_dev_registered(efx)) - return; + unsigned int filter_idx, i; if (!table) return; /* Mark old filters that may need to be removed */ spin_lock_bh(&efx->filter_lock); - n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } - n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count; - for (i = 0; i < n; i++) { - filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS; - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; - } + for (i = 0; i < table->dev_uc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + for (i = 0; i < table->dev_mc_count; i++) + EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); + EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); + EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); spin_unlock_bh(&efx->filter_lock); +} +#undef EFX_EF10_FILTER_DO_MARK_OLD - /* Copy/convert the address lists; add the primary station - * address and broadcast address - */ - netif_addr_lock_bh(net_dev); - if (net_dev->flags & IFF_PROMISC || - netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) { - table->dev_uc_count = -1; - } else { - table->dev_uc_count = 1 + netdev_uc_count(net_dev); - ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); - i = 1; - netdev_for_each_uc_addr(uc, net_dev) { - ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - i++; +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *uc; + int addr_count; + unsigned int i; + + table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; + addr_count = netdev_uc_count(net_dev); + if (net_dev->flags & IFF_PROMISC) + *promisc = true; + table->dev_uc_count = 1 + addr_count; + ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); + i = 1; + netdev_for_each_uc_addr(uc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); + table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) || - netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) { - table->dev_mc_count = -1; - } else { - table->dev_mc_count = 1 + netdev_mc_count(net_dev); - eth_broadcast_addr(table->dev_mc_list[0].addr); - i = 1; - netdev_for_each_mc_addr(mc, net_dev) { - ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - i++; +} + +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct netdev_hw_addr *mc; + unsigned int i, addr_count; + + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->bcast_id = EFX_EF10_FILTER_ID_INVALID; + if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + *promisc = true; + + addr_count = netdev_mc_count(net_dev); + i = 0; + netdev_for_each_mc_addr(mc, net_dev) { + if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { + *promisc = true; + break; } + ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); + table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; + i++; } - netif_addr_unlock_bh(net_dev); - /* Insert/renew unicast filters */ - if (table->dev_uc_count >= 0) { - for (i = 0; i < table->dev_uc_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_uc_list[i].addr); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - /* Fall back to unicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( + table->dev_mc_count = i; +} + +static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, + bool multicast, bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_dev_addr *addr_list; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + unsigned int i, j; + int addr_count; + int rc; + + if (multicast) { + addr_list = table->dev_mc_list; + addr_count = table->dev_mc_count; + } else { + addr_list = table->dev_uc_list; + addr_count = table->dev_uc_count; + } + + /* Insert/renew filters */ + for (i = 0; i < addr_count; i++) { + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, + addr_list[i].addr); + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + if (rollback) { + netif_info(efx, drv, efx->net_dev, + "efx_ef10_filter_insert failed rc=%d\n", + rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->dev_uc_list[i].id); - table->dev_uc_count = -1; - break; + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; + } else { + /* mark as not inserted, and carry on */ + rc = EFX_EF10_FILTER_ID_INVALID; } - table->dev_uc_list[i].id = rc; } + addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); } - if (table->dev_uc_count < 0) { + + if (multicast && rollback) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); - efx_filter_set_uc_def(&spec); + eth_broadcast_addr(baddr); + efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - WARN_ON(1); - table->dev_uc_count = 0; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", rc); + /* Fall back to promiscuous */ + for (j = 0; j < i; j++) { + if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) + continue; + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + addr_list[j].id); + addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + } + return rc; } else { - table->dev_uc_list[0].id = rc; + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } } - /* Insert/renew multicast filters */ - if (table->dev_mc_count >= 0) { - for (i = 0; i < table->dev_mc_count; i++) { + return 0; +} + +static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, + bool rollback) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_filter_spec spec; + u8 baddr[ETH_ALEN]; + int rc; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, + EFX_FILTER_FLAG_RX_RSS, + 0); + + if (multicast) + efx_filter_set_mc_def(&spec); + else + efx_filter_set_uc_def(&spec); + + rc = efx_ef10_filter_insert(efx, &spec, true); + if (rc < 0) { + netif_warn(efx, drv, efx->net_dev, + "%scast mismatch filter insert failed rc=%d\n", + multicast ? "Multi" : "Uni", rc); + } else if (multicast) { + table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + if (!nic_data->workaround_26807) { + /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, EFX_FILTER_FLAG_RX_RSS, 0); + eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - table->dev_mc_list[i].addr); + baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { - /* Fall back to multicast-promisc */ - while (i--) - efx_ef10_filter_remove_safe( - efx, EFX_FILTER_PRI_AUTO, - table->dev_mc_list[i].id); - table->dev_mc_count = -1; - break; + netif_warn(efx, drv, efx->net_dev, + "Broadcast filter insert failed rc=%d\n", + rc); + if (rollback) { + /* Roll back the mc_def filter */ + efx_ef10_filter_remove_unsafe( + efx, EFX_FILTER_PRI_AUTO, + table->mcdef_id); + table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + return rc; + } + } else { + table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); } - table->dev_mc_list[i].id = rc; - } - } - if (table->dev_mc_count < 0) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); - efx_filter_set_mc_def(&spec); - rc = efx_ef10_filter_insert(efx, &spec, true); - if (rc < 0) { - WARN_ON(1); - table->dev_mc_count = 0; - } else { - table->dev_mc_list[0].id = rc; } + rc = 0; + } else { + table->ucdef_id = rc; + rc = 0; } + return rc; +} + +/* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD + * flag or removes these filters, we don't need to hold the filter_lock while + * scanning for these filters. + */ +static void efx_ef10_filter_remove_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + bool remove_failed = false; + int i; - /* Remove filters that weren't renewed. Since nothing else - * changes the AUTO_OLD flag or removes these filters, we - * don't need to hold the filter_lock while scanning for - * these filters. - */ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { if (ACCESS_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { @@ -3917,6 +4075,87 @@ reset_nic: return rc ? rc : rc2; } +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; + bool uc_promisc = false, mc_promisc = false; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx, &uc_promisc); + efx_ef10_filter_mc_addr_list(efx, &mc_promisc); + netif_addr_unlock_bh(net_dev); + + /* Insert/renew unicast filters */ + if (uc_promisc) { + efx_ef10_filter_insert_def(efx, false, false); + efx_ef10_filter_insert_addr_list(efx, false, false); + } else { + /* If any of the filters failed to insert, fall back to + * promiscuous mode - add in the uc_def filter. But keep + * our individual unicast filters. + */ + if (efx_ef10_filter_insert_addr_list(efx, false, false)) + efx_ef10_filter_insert_def(efx, false, false); + } + + /* Insert/renew multicast filters */ + /* If changing promiscuous state with cascaded multicast filters, remove + * old filters first, so that packets are dropped rather than duplicated + */ + if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + efx_ef10_filter_remove_old(efx); + if (mc_promisc) { + if (nic_data->workaround_26807) { + /* If we failed to insert promiscuous filters, rollback + * and fall back to individual multicast filters + */ + if (efx_ef10_filter_insert_def(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + efx_ef10_filter_remove_old(efx); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If we failed to insert promiscuous filters, don't + * rollback. Regardless, also insert the mc_list + */ + efx_ef10_filter_insert_def(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } else { + /* If any filters failed to insert, rollback and fall back to + * promiscuous mode - mc_def filter and maybe broadcast. If + * that fails, roll back again and insert as many of our + * individual multicast filters as we can. + */ + if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + /* Changing promisc state, so remove old filters */ + if (nic_data->workaround_26807) + efx_ef10_filter_remove_old(efx); + if (efx_ef10_filter_insert_def(efx, true, true)) + efx_ef10_filter_insert_addr_list(efx, true, false); + } + } + + efx_ef10_filter_remove_old(efx); + efx->mc_promisc = mc_promisc; +} + static int efx_ef10_set_mac_address(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 81640f8bb811..98d172b04f71 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx) return rc; } -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled) +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags) { MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN); + size_t outlen; + int rc; BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type); MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled); - return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (!flags) + return 0; + + if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN) + *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS); + else + *flags = 0; + + return 0; } int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, @@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, return 0; fail: - netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); + /* Older firmware lacks GET_WORKAROUNDS and this isn't especially + * terrifying. The call site will have to deal with it though. + */ + netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR, + efx->net_dev, "%s: failed rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 1838afe2da92..025d504c472b 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx); bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); -int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled); +int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, + unsigned int *flags); int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out, unsigned int *enabled_out); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 45fca9fc66b7..4cc772164a79 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -26,6 +26,10 @@ * Unlike a warm boot, assume DMEM has been reloaded, so that * the MC persistent data must be reinitialised. */ #define MC_FW_TEPID_BOOT_OK (16) +/* We have entered the main firmware via recovery mode. This + * means that MC persistent data must be reinitialised, but that + * we shouldn't touch PCIe config. */ +#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32) /* BIST state has been initialized */ #define MC_FW_BIST_INIT_OK (128) @@ -169,6 +173,8 @@ #define MC_CMD_ERR_EINTR 4 /* I/O failure */ #define MC_CMD_ERR_EIO 5 +/* Already exists */ +#define MC_CMD_ERR_EEXIST 6 /* Try again */ #define MC_CMD_ERR_EAGAIN 11 /* Out of memory */ @@ -181,6 +187,10 @@ #define MC_CMD_ERR_ENODEV 19 /* Invalid argument to target */ #define MC_CMD_ERR_EINVAL 22 +/* Broken pipe */ +#define MC_CMD_ERR_EPIPE 32 +/* Read-only */ +#define MC_CMD_ERR_EROFS 30 /* Out of range */ #define MC_CMD_ERR_ERANGE 34 /* Non-recursive resource is already acquired */ @@ -226,6 +236,43 @@ #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a /* The datapath is disabled. */ #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b +/* The requesting client is not a function */ +#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c +/* The requested operation might require the + command to be passed between MCs, and the + transport doesn't support that. Should + only ever been seen over the UART. */ +#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d +/* VLAN tag(s) exists */ +#define MC_CMD_ERR_VLAN_EXIST 0x100e +/* No MAC address assigned to an EVB port */ +#define MC_CMD_ERR_NO_MAC_ADDR 0x100f +/* Notifies the driver that the request has been relayed + * to an admin function for authorization. The driver should + * wait for a PROXY_RESPONSE event and then resend its request. + * This error code is followed by a 32-bit handle that + * helps matching it with the respective PROXY_RESPONSE event. */ +#define MC_CMD_ERR_PROXY_PENDING 0x1010 +#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 +/* The request cannot be passed for authorization because + * another request from the same function is currently being + * authorized. The drvier should try again later. */ +#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011 +/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function + * that has enabled proxying or BLOCK_INDEX points to a function that + * doesn't await an authorization. */ +#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012 +/* This code is currently only used internally in FW. Its meaning is that + * an operation failed due to lack of SR-IOV privilege. + * Normally it is translated to EPERM by send_cmd_err(), + * but it may also be used to trigger some special mechanism + * for handling such case, e.g. to relay the failed request + * to a designated admin function for authorization. */ +#define MC_CMD_ERR_NO_PRIVILEGE 0x1013 +/* Workaround 26807 could not be turned on/off because some functions + * have already installed filters. See the comment at + * MC_CMD_WORKAROUND_BUG26807. */ +#define MC_CMD_ERR_FILTERS_PRESENT 0x1014 #define MC_CMD_ERR_CODE_OFST 0 @@ -275,6 +322,11 @@ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN) +/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default + * stack ID (which must be in the range 1-255) along with an EVB port ID. + */ +#define EVB_STACK_ID(n) (((n) & 0xff) << 16) + /* Version 2 adds an optional argument to error returns: the errno value * may be followed by the (0-based) number of the first argument that @@ -394,6 +446,8 @@ #define MCDI_EVENT_AOE_BYTEBLASTER 0x9 /* enum: DDR ECC status update */ #define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa +/* enum: PTP status update */ +#define MCDI_EVENT_AOE_PTP_STATUS 0xb #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 @@ -408,6 +462,16 @@ #define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 #define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 #define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 +#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 +/* enum: MUM failed to load - no valid image? */ +#define MCDI_EVENT_MUM_NO_LOAD 0x1 +/* enum: MUM f/w reported an exception */ +#define MCDI_EVENT_MUM_ASSERT 0x2 +/* enum: MUM not kicking watchdog */ +#define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_LBN 8 +#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 #define MCDI_EVENT_SRC_LBN 36 @@ -416,6 +480,8 @@ #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 #define MCDI_EVENT_CODE_WIDTH 8 +/* enum: Event generated by host software */ +#define MCDI_EVENT_SW_EVENT 0x0 /* enum: Bad assert. */ #define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum: PM Notice. */ @@ -470,6 +536,14 @@ #define MCDI_EVENT_CODE_MC_BIST 0x19 /* enum: PTP tick event providing current NIC time */ #define MCDI_EVENT_CODE_PTP_TIME 0x1a +/* enum: MUM fault */ +#define MCDI_EVENT_CODE_MUM 0x1b +/* enum: notify the designated PF of a new authorization request */ +#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c +/* enum: notify a function that awaits an authorization that its request has + * been processed and it may now resend the command + */ +#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -537,6 +611,33 @@ /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */ #define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36 #define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC clock has ever been set + */ +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36 +#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, indicates + * whether the NIC and System clocks are in sync + */ +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37 +#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1 +/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of + * the minor value of the PTP clock + */ +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38 +#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0 +#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0 +#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32 +/* Zero means that the request has been completed or authorized, and the driver + * should resend it. A non-zero value means that the authorization has been + * denied, and gives the reason. Typically it will be EPERM. + */ +#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36 +#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -581,6 +682,10 @@ #define FCDI_EVENT_CODE_PTP_TICK 0x7 /* enum: ECC error counters */ #define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 +/* enum: Current status of PTP */ +#define FCDI_EVENT_CODE_PTP_STATUS 0x9 +/* enum: Port id config to map MC-FC port idx */ +#define FCDI_EVENT_CODE_PORT_CONFIG 0xa #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -594,11 +699,24 @@ #define FCDI_EVENT_LINK_STATE_DATA_OFST 0 #define FCDI_EVENT_LINK_STATE_DATA_LBN 0 #define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 +#define FCDI_EVENT_PTP_STATE_OFST 0 +#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ +#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ +#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ +#define FCDI_EVENT_PTP_STATE_LBN 0 +#define FCDI_EVENT_PTP_STATE_WIDTH 32 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 #define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 #define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 +/* Index of MC port being referred to */ +#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 +#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 +/* FC Port index that matches the MC port index in SRC */ +#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 +#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -631,6 +749,90 @@ #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 +/* MUM_EVENT structuredef */ +#define MUM_EVENT_LEN 8 +#define MUM_EVENT_CONT_LBN 32 +#define MUM_EVENT_CONT_WIDTH 1 +#define MUM_EVENT_LEVEL_LBN 33 +#define MUM_EVENT_LEVEL_WIDTH 3 +/* enum: Info. */ +#define MUM_EVENT_LEVEL_INFO 0x0 +/* enum: Warning. */ +#define MUM_EVENT_LEVEL_WARN 0x1 +/* enum: Error. */ +#define MUM_EVENT_LEVEL_ERR 0x2 +/* enum: Fatal. */ +#define MUM_EVENT_LEVEL_FATAL 0x3 +#define MUM_EVENT_DATA_OFST 0 +#define MUM_EVENT_SENSOR_ID_LBN 0 +#define MUM_EVENT_SENSOR_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_LBN 8 +#define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_LBN 0 +#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 +#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 +#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 +#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 +#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 +#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 +#define MUM_EVENT_DATA_LBN 0 +#define MUM_EVENT_DATA_WIDTH 32 +#define MUM_EVENT_SRC_LBN 36 +#define MUM_EVENT_SRC_WIDTH 8 +#define MUM_EVENT_EV_CODE_LBN 60 +#define MUM_EVENT_EV_CODE_WIDTH 4 +#define MUM_EVENT_CODE_LBN 44 +#define MUM_EVENT_CODE_WIDTH 8 +/* enum: The MUM was rebooted. */ +#define MUM_EVENT_CODE_REBOOT 0x1 +/* enum: Bad assert. */ +#define MUM_EVENT_CODE_ASSERT 0x2 +/* enum: Sensor failure. */ +#define MUM_EVENT_CODE_SENSOR 0x3 +/* enum: Link fault has been asserted, or has cleared. */ +#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 +#define MUM_EVENT_SENSOR_DATA_OFST 0 +#define MUM_EVENT_SENSOR_DATA_LBN 0 +#define MUM_EVENT_SENSOR_DATA_WIDTH 32 +#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 +#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 +#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 +#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 +#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 +#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 +#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 +#define MUM_EVENT_PORT_PHY_TECH_OFST 0 +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ +#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ +#define MUM_EVENT_PORT_PHY_TECH_LBN 0 +#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 +#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 + /***********************************/ /* MC_CMD_READ32 @@ -687,24 +889,34 @@ /* MC_CMD_COPYCODE_IN msgrequest */ #define MC_CMD_COPYCODE_IN_LEN 16 -/* Source address */ -#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 -/* enum: The main image should be entered via a copy of a single word from and - * to this address when none of the other magic behaviours are required. +/* Source address + * + * The main image should be entered via a copy of a single word from and to a + * magic address, which controls various aspects of the boot. The magic address + * is a bitfield, with each bit as documented below. */ +#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to start the datapath CPUs. - * This is useful for certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) */ #define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 -/* enum: Entering the main image via a copy of a single word from and to this - * address indicates that it should not attempt to parse any configuration from - * flash. (In addition, the datapath CPUs will not be started, as for - * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for - * certain soft rebooting scenarios. (Huntington only) +/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, + * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see + * below) */ #define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 /* Destination address */ #define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 #define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 @@ -795,6 +1007,10 @@ #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4 #define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 /* Failing thread address */ #define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 @@ -802,7 +1018,8 @@ /***********************************/ /* MC_CMD_LOG_CTRL - * Configure the output stream for various events and messages. + * Configure the output stream for log events such as link state changes, + * sensor notifications and MCDI completions */ #define MC_CMD_LOG_CTRL 0x7 @@ -816,6 +1033,7 @@ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 +/* Legacy argument. Must be zero. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4 /* MC_CMD_LOG_CTRL_OUT msgresponse */ @@ -955,8 +1173,12 @@ * input on the same NIC. */ #define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a +/* enum: Set the PTP sync status. Status is used by firmware to report to event + * subscribers. + */ +#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b /* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x1b +#define MC_CMD_PTP_OP_MAX 0x1c /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -1191,8 +1413,12 @@ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ -/* Event queue to send PTP time events to */ +/* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16 @@ -1214,6 +1440,23 @@ /* 1 to enable PPS test mode, 0 to disable and return result. */ #define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8 +/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* NIC - Host System Clock Synchronization status */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8 +/* enum: Host System clock and NIC clock are not in sync */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0 +/* enum: Host System clock and NIC clock are synchronized */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1 +/* If synchronized, number of seconds until clocks should be considered to be + * no longer in sync. + */ +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16 +#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -1375,7 +1618,7 @@ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */ -#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24 /* Time format required/used by for this NIC. Applies to all PTP MCDI * operations that pass times between the host and firmware. If this operation * is not supported (older firmware) a format of seconds and nanoseconds should @@ -1396,6 +1639,13 @@ * end and start times minus the time that the MC waited for host end. */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4 +/* Various PTP capabilities */ +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 @@ -1415,6 +1665,9 @@ /* Enum values, see field(s): */ /* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */ +/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ +#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 + /***********************************/ /* MC_CMD_CSR_READ32 @@ -1915,6 +2168,14 @@ #define MC_CMD_FW_FULL_FEATURED 0x0 /* enum: Prefer to use firmware with fewer features but lower latency */ #define MC_CMD_FW_LOW_LATENCY 0x1 +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +#define MC_CMD_FW_PACKED_STREAM 0x2 +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +#define MC_CMD_FW_HIGH_TX_RATE 0x3 +/* enum: Reserved value */ +#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 /* enum: Only this option is allowed for non-admin functions */ #define MC_CMD_FW_DONT_CARE 0xffffffff @@ -2481,6 +2742,12 @@ #define MC_CMD_LOOPBACK_SD_FES_WS 0x22 /* enum: Near side of AOE Siena side port */ #define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 +/* enum: Medford Wireside datapath loopback */ +#define MC_CMD_LOOPBACK_DATA_WS 0x24 +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 /* Supported loopbacks. */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8 @@ -2552,12 +2819,8 @@ #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -/* enum: Flow control is off. */ -#define MC_CMD_FCNTL_OFF 0x0 -/* enum: Respond to flow control. */ -#define MC_CMD_FCNTL_RESPOND 0x1 -/* enum: Respond to and Issue flow control. */ -#define MC_CMD_FCNTL_BIDIR 0x2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 @@ -2632,7 +2895,7 @@ #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK /* MC_CMD_SET_MAC_IN msgrequest */ -#define MC_CMD_SET_MAC_IN_LEN 24 +#define MC_CMD_SET_MAC_IN_LEN 28 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of * EtherII, VLAN, bug16011 padding). */ @@ -2649,13 +2912,20 @@ #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 /* enum: Flow control is off. */ -/* MC_CMD_FCNTL_OFF 0x0 */ +#define MC_CMD_FCNTL_OFF 0x0 /* enum: Respond to flow control. */ -/* MC_CMD_FCNTL_RESPOND 0x1 */ +#define MC_CMD_FCNTL_RESPOND 0x1 /* enum: Respond to and Issue flow control. */ -/* MC_CMD_FCNTL_BIDIR 0x2 */ +#define MC_CMD_FCNTL_BIDIR 0x2 /* enum: Auto neg flow control. */ #define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control (eftest builds only). */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 @@ -2748,7 +3018,8 @@ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is * performed, and the statistics may be read from the message response. If * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location). - * Locks required: None. Returns: 0, ETIME + * Locks required: None. The PERIODIC_CLEAR option is not used and now has no + * effect. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e @@ -2791,6 +3062,7 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS #define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ +#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ #define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ #define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */ #define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */ @@ -2890,8 +3162,8 @@ * PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46 -/* enum: RXDP counter: Number of times an emergency descriptor fetch was - * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. +/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed. + * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing @@ -3213,6 +3485,8 @@ #define MC_CMD_NVRAM_TYPE_LICENSE 0x12 /* enum: FC Log. */ #define MC_CMD_NVRAM_TYPE_FC_LOG 0x13 +/* enum: Additional flash on FPGA. */ +#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14 /***********************************/ @@ -3407,6 +3681,8 @@ */ #define MC_CMD_SCHEDINFO 0x3e +#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN + /* MC_CMD_SCHEDINFO_IN msgrequest */ #define MC_CMD_SCHEDINFO_IN_LEN 0 @@ -3593,6 +3869,68 @@ #define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c /* enum: Hotpoint temperature: degC */ #define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d +/* enum: Port 0 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e +/* enum: Port 1 PHY power switch over-current: bool */ +#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f +/* enum: Mop-up microcontroller reference voltage (millivolts) */ +#define MC_CMD_SENSOR_MUM_VCC 0x30 +/* enum: 0.9v power phase A voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_A 0x31 +/* enum: 0.9v power phase A current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_A 0x32 +/* enum: 0.9V voltage regulator phase A temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33 +/* enum: 0.9v power phase B voltage: mV */ +#define MC_CMD_SENSOR_IN_0V9_B 0x34 +/* enum: 0.9v power phase B current: mA */ +#define MC_CMD_SENSOR_IN_I0V9_B 0x35 +/* enum: 0.9V voltage regulator phase B temperature: degC */ +#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36 +/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37 +/* enum: CCOM AVREG 1v2 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38 +/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 +/* enum: CCOM AVREG 1v8 supply (external ADC): mV */ +#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: Not a sensor: reserved for the next page flag */ +#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f +/* enum: controller internal temperature sensor voltage on master core + * (internal ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40 +/* enum: controller internal temperature on master core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41 +/* enum: controller internal temperature sensor voltage on master core + * (external ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42 +/* enum: controller internal temperature on master core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43 +/* enum: controller internal temperature on slave core sensor voltage (internal + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44 +/* enum: controller internal temperature on slave core (internal ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45 +/* enum: controller internal temperature on slave core sensor voltage (external + * ADC): mV + */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46 +/* enum: controller internal temperature on slave core (external ADC): degC */ +#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47 +/* enum: Voltage supplied to the SODIMMs from their power supply: mV */ +#define MC_CMD_SENSOR_SODIMM_VOUT 0x49 +/* enum: Temperature of SODIMM 0 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a +/* enum: Temperature of SODIMM 1 (if installed): degC */ +#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b +/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY0_VCC 0x4c +/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ +#define MC_CMD_SENSOR_PHY1_VCC 0x4d /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -3701,6 +4039,8 @@ #define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum: Sensor is working but does not currently have a reading. */ #define MC_CMD_SENSOR_STATE_NO_READING 0x4 +/* enum: Sensor initialisation failed. */ +#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8 #define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3 @@ -3870,6 +4210,7 @@ /* MC_CMD_WORKAROUND_IN msgrequest */ #define MC_CMD_WORKAROUND_IN_LEN 8 +/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */ #define MC_CMD_WORKAROUND_IN_TYPE_OFST 0 /* enum: Bug 17230 work around. */ #define MC_CMD_WORKAROUND_BUG17230 0x1 @@ -3877,11 +4218,38 @@ #define MC_CMD_WORKAROUND_BUG35388 0x2 /* enum: Bug35017 workaround (A64 tables must be identity map) */ #define MC_CMD_WORKAROUND_BUG35017 0x3 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_WORKAROUND_BUG41750 0x4 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_WORKAROUND_BUG42008 0x5 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) + * This feature cannot be turned on/off while there are any filters already + * present. The behaviour in such case depends on the acting client's privilege + * level. If the client has the admin privilege, then all functions that have + * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise + * the command will fail with MC_CMD_ERR_FILTERS_PRESENT. + */ +#define MC_CMD_WORKAROUND_BUG26807 0x6 +/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable + * the workaround + */ #define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4 /* MC_CMD_WORKAROUND_OUT msgresponse */ #define MC_CMD_WORKAROUND_OUT_LEN 0 +/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used + * when (TYPE == MC_CMD_WORKAROUND_BUG26807) + */ +#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 + /***********************************/ /* MC_CMD_GET_PHY_MEDIA_INFO @@ -4093,7 +4461,7 @@ /***********************************/ /* MC_CMD_GET_MAC_ADDRESSES - * Returns the base MAC, count and stride for the requestiong function + * Returns the base MAC, count and stride for the requesting function */ #define MC_CMD_GET_MAC_ADDRESSES 0x55 @@ -4115,6 +4483,527 @@ /* Spacing of allocated MAC addresses */ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 + +/***********************************/ +/* MC_CMD_CLP + * Perform a CLP related operation + */ +#define MC_CMD_CLP 0x56 + +#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_CLP_IN msgrequest */ +#define MC_CMD_CLP_IN_LEN 4 +/* Sub operation */ +#define MC_CMD_CLP_IN_OP_OFST 0 +/* enum: Return to factory default settings */ +#define MC_CMD_CLP_OP_DEFAULT 0x1 +/* enum: Set MAC address */ +#define MC_CMD_CLP_OP_SET_MAC 0x2 +/* enum: Get MAC address */ +#define MC_CMD_CLP_OP_GET_MAC 0x3 +/* enum: Set UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_SET_BOOT 0x4 +/* enum: Get UEFI/GPXE boot mode */ +#define MC_CMD_CLP_OP_GET_BOOT 0x5 + +/* MC_CMD_CLP_OUT msgresponse */ +#define MC_CMD_CLP_OUT_LEN 0 + +/* MC_CMD_CLP_IN_DEFAULT msgrequest */ +#define MC_CMD_CLP_IN_DEFAULT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ +#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 + +/* MC_CMD_CLP_IN_SET_MAC msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_LEN 12 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MAC address assigned to port */ +#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 + +/* MC_CMD_CLP_IN_GET_MAC msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ +#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 +/* MAC address assigned to port */ +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 +#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 +#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 + +/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* Boot flag */ +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 +#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 + +/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 + +/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ +#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 +/* MC_CMD_CLP_IN_OP_OFST 0 */ + +/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ +#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 +/* Boot flag */ +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 +#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 +/* Padding */ +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 +#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 + + +/***********************************/ +/* MC_CMD_MUM + * Perform a MUM operation + */ +#define MC_CMD_MUM 0x57 + +#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_MUM_IN msgrequest */ +#define MC_CMD_MUM_IN_LEN 4 +#define MC_CMD_MUM_IN_OP_HDR_OFST 0 +#define MC_CMD_MUM_IN_OP_LBN 0 +#define MC_CMD_MUM_IN_OP_WIDTH 8 +/* enum: NULL MCDI command to MUM */ +#define MC_CMD_MUM_OP_NULL 0x1 +/* enum: Get MUM version */ +#define MC_CMD_MUM_OP_GET_VERSION 0x2 +/* enum: Issue raw I2C command to MUM */ +#define MC_CMD_MUM_OP_RAW_CMD 0x3 +/* enum: Read from registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_READ 0x4 +/* enum: Write to registers on devices connected to MUM. */ +#define MC_CMD_MUM_OP_WRITE 0x5 +/* enum: Control UART logging. */ +#define MC_CMD_MUM_OP_LOG 0x6 +/* enum: Operations on MUM GPIO lines */ +#define MC_CMD_MUM_OP_GPIO 0x7 +/* enum: Get sensor readings from MUM */ +#define MC_CMD_MUM_OP_READ_SENSORS 0x8 +/* enum: Initiate clock programming on the MUM */ +#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 +/* enum: Initiate FPGA load from flash on the MUM */ +#define MC_CMD_MUM_OP_FPGA_LOAD 0xa +/* enum: Request sensor reading from MUM ADC resulting from earlier request via + * MUM ATB + */ +#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb +/* enum: Send commands relating to the QSFP ports via the MUM for PHY + * operations + */ +#define MC_CMD_MUM_OP_QSFP 0xc + +/* MC_CMD_MUM_IN_NULL msgrequest */ +#define MC_CMD_MUM_IN_NULL_LEN 4 +/* MUM cmd header */ +#define MC_CMD_MUM_IN_CMD_OFST 0 + +/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ +#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_READ_LEN 16 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to read from registers of */ +#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE 0x1 +/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ +#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 +/* 32-bit address to read from */ +#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 +/* Number of words to read. */ +#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 + +/* MC_CMD_MUM_IN_WRITE msgrequest */ +#define MC_CMD_MUM_IN_WRITE_LENMIN 16 +#define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* ID of (device connected to MUM) to write to registers of */ +#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 +/* enum: Hittite HMC1035 clock generator on Sorrento board */ +/* MC_CMD_MUM_DEV_HITTITE 0x1 */ +/* 32-bit address to write to */ +#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 +/* Words to write */ +#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 +#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 + +/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ +#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MUM I2C cmd code */ +#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 +/* Number of bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 +/* Number of bytes to read */ +#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 +/* Bytes to write */ +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 + +/* MC_CMD_MUM_IN_LOG msgrequest */ +#define MC_CMD_MUM_IN_LOG_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_LOG_OP_OFST 4 +#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ + +/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ +#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ +/* Enable/disable debug output to UART */ +#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 + +/* MC_CMD_MUM_IN_GPIO msgrequest */ +#define MC_CMD_MUM_IN_GPIO_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ + +/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 +/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 +/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 + +/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 + +/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 + +/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ +#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 + +/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Bit-mask of clocks to be programmed */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 +#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ +#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ +/* Control flags for clock programming */ +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 + +/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ +#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +/* Enable/Disable FPGA config from flash */ +#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 + +/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ +#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + +/* MC_CMD_MUM_IN_QSFP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_LEN 12 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 +#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 +#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ +#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ +#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 +#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 + +/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 +#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 + +/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 + +/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 +/* MC_CMD_MUM_IN_CMD_OFST 0 */ +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 +#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 + +/* MC_CMD_MUM_OUT msgresponse */ +#define MC_CMD_MUM_OUT_LEN 0 + +/* MC_CMD_MUM_OUT_NULL msgresponse */ +#define MC_CMD_MUM_OUT_NULL_LEN 0 + +/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ +#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 +#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 +#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 + +/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ +#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +/* returned data */ +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 + +/* MC_CMD_MUM_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_READ_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 +#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 +#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 + +/* MC_CMD_MUM_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_LOG msgresponse */ +#define MC_CMD_MUM_OUT_LOG_LEN 0 + +/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ +#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 +/* The first 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO IN register. */ +#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 +/* The first 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 +/* The second 32-bit word read from the GPIO OUT register. */ +#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 +#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 + +/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ +#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 + +/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 + +/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 +#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 + +/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ +#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 + +/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 +#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 + +/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 + +/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 + +/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +/* in bytes */ +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 + +/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 +#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 + +/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -4203,6 +5092,30 @@ #define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */ #define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff +/* enum: Primary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA 0xb00 +/* enum: Secondary FPGA partition */ +#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01 +/* enum: FC firmware partition */ +#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02 +/* enum: FC License partition */ +#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03 +/* enum: Non-volatile log output partition for FC */ +#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04 +/* enum: MUM firmware partition */ +#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00 +/* enum: MUM Non-volatile log output partition. */ +#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01 +/* enum: MUM Application table partition. */ +#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02 +/* enum: MUM boot rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03 +/* enum: MUM production signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04 +/* enum: MUM user signatures & calibration rom partition. */ +#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 +/* enum: MUM fuses and lockbits partition. */ +#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -4218,66 +5131,69 @@ #define LICENSED_APP_ID_LEN 4 #define LICENSED_APP_ID_ID_OFST 0 /* enum: OpenOnload */ -#define LICENSED_APP_ID_ONLOAD 0x1 +#define LICENSED_APP_ID_ONLOAD 0x1 /* enum: PTP timestamping */ -#define LICENSED_APP_ID_PTP 0x2 +#define LICENSED_APP_ID_PTP 0x2 /* enum: SolarCapture Pro */ -#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4 +/* enum: SolarSecure filter engine */ +#define LICENSED_APP_ID_SOLARSECURE 0x8 +/* enum: Performance monitor */ +#define LICENSED_APP_ID_PERF_MONITOR 0x10 +/* enum: SolarCapture Live */ +#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20 +/* enum: Capture SolarSystem */ +#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40 +/* enum: Network Access Control */ +#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80 #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 - -/***********************************/ -/* MC_CMD_GET_WORKAROUNDS - * Read the list of all implemented and all currently enabled workarounds. The - * enums here must correspond with those in MC_CMD_WORKAROUND. - */ -#define MC_CMD_GET_WORKAROUNDS 0x59 - -/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ -#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 -/* Each workaround is represented by a single bit according to the enums below. - */ -#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 -#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 -/* enum: Bug 17230 work around. */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 -/* enum: Bug 35388 work around (unsafe EVQ writes). */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 -/* enum: Bug35017 workaround (A64 tables must be identity map) */ -#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 - - -/***********************************/ -/* MC_CMD_LINK_STATE_MODE - * Read/set link state mode of a VF - */ -#define MC_CMD_LINK_STATE_MODE 0x5c - -#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ -#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 -/* The target function to have its link state mode read or set, must be a VF - * e.g. VF 1,3 = 0x00030001 - */ -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 -#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 -/* New link state mode to be set */ -#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ -#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ -/* enum: Use this value to just read the existing setting without modifying it. - */ -#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff - -/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ -#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 -#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 +/* TX_TIMESTAMP_EVENT structuredef */ +#define TX_TIMESTAMP_EVENT_LEN 6 +/* lower 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16 +/* Type of TX event, ordinary TX completion, low or high part of TX timestamp + */ +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1 +/* enum: This is a TX completion event, not a timestamp */ +#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0 +/* enum: This is the low part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51 +/* enum: This is the high part of a TX timestamp event */ +#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24 +#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8 +/* upper 16 bits of timestamp data */ +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32 +#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16 + +/* RSS_MODE structuredef */ +#define RSS_MODE_LEN 1 +/* The RSS mode for a particular packet type is a value from 0 - 15 which can + * be considered as 4 bits selecting which fields are included in the hash. (A + * value 0 effectively disables RSS spreading for the packet type.) The YAML + * generation tools require this structure to be a whole number of bytes wide, + * but only 4 bits are relevant. + */ +#define RSS_MODE_HASH_SELECTOR_OFST 0 +#define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_LBN 0 +#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_LBN 1 +#define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_LBN 2 +#define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_LBN 3 +#define RSS_MODE_HASH_DST_PORT_WIDTH 1 +#define RSS_MODE_HASH_SELECTOR_LBN 0 +#define RSS_MODE_HASH_SELECTOR_WIDTH 8 /***********************************/ @@ -4413,7 +5329,9 @@ #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_RXQ_IN msgrequest */ +/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version + * in new code. + */ #define MC_CMD_INIT_RXQ_IN_LENMIN 36 #define MC_CMD_INIT_RXQ_IN_LENMAX 252 #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) @@ -4456,9 +5374,73 @@ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode + * flags + */ +#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to INIT_EVQ + */ +#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ @@ -4467,7 +5449,9 @@ #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_INIT_TXQ_IN msgrequest */ +/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version + * in new code. + */ #define MC_CMD_INIT_TXQ_IN_LENMIN 36 #define MC_CMD_INIT_TXQ_IN_LENMAX 252 #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) @@ -4499,6 +5483,10 @@ #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -4511,6 +5499,60 @@ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode + * flags + */ +#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544 +/* Size, in entries */ +#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4 +/* The value to put in the event data. Check hardware spec. for valid range. */ +#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12 +/* There will be more flags here. */ +#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +/* Flags related to Qbb flow control mode. */ +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 + /* MC_CMD_INIT_TXQ_OUT msgresponse */ #define MC_CMD_INIT_TXQ_OUT_LEN 0 @@ -4617,6 +5659,132 @@ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 +/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to + * manage proxied requests + */ +#define MC_PROXY_STATUS_BUFFER_LEN 16 +/* Handle allocated by the firmware for this proxy transaction */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 +/* enum: An invalid handle. */ +#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 +#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 +/* The requesting physical function number */ +#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 +#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 +#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 +/* The requesting virtual function number. Set to VF_NULL if the target is a + * PF. + */ +#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 +#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 +#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 +#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 +/* The target function RID. */ +#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 +#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 +#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 +#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 +/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ +#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 +#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 +#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 +#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 +/* If a request is authorized rather than carried out by the host, this is the + * elevated privilege mask granted to the requesting function. + */ +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 +#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 + + +/***********************************/ +/* MC_CMD_PROXY_CONFIGURE + * Enable/disable authorization of MCDI requests from unprivileged functions by + * a designated admin function + */ +#define MC_CMD_PROXY_CONFIGURE 0x58 + +#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 +#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 + +/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ +#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PROXY_COMPLETE + * Tells FW that a requested proxy operation has either been completed (by + * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the + * function that enabled proxying/authorization (by using + * MC_CMD_PROXY_CONFIGURE). + */ +#define MC_CMD_PROXY_COMPLETE 0x5f + +#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ +#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 +#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 +#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 +/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply + * is stored in the REPLY_BUFF. + */ +#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 +/* enum: The operation has been authorized. The originating function may now + * try again. + */ +#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 +/* enum: The operation has been declined. */ +#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 +/* enum: The authorization failed because the relevant application did not + * respond in time. + */ +#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 +#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 + +/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ +#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 + /***********************************/ /* MC_CMD_ALLOC_BUFTBL_CHUNK @@ -4688,6 +5856,44 @@ /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ #define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 +/* PORT_CONFIG_ENTRY structuredef */ +#define PORT_CONFIG_ENTRY_LEN 16 +/* External port number (label) */ +#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0 +#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8 +/* Port core location */ +#define PORT_CONFIG_ENTRY_CORE_OFST 1 +#define PORT_CONFIG_ENTRY_CORE_LEN 1 +#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */ +#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */ +#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */ +#define PORT_CONFIG_ENTRY_CORE_LBN 8 +#define PORT_CONFIG_ENTRY_CORE_WIDTH 8 +/* Internal number (HW resource) relative to the core */ +#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1 +#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16 +#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8 +/* Reserved */ +#define PORT_CONFIG_ENTRY_RSVD_OFST 3 +#define PORT_CONFIG_ENTRY_RSVD_LEN 1 +#define PORT_CONFIG_ENTRY_RSVD_LBN 24 +#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8 +/* Bitmask of KR lanes used by the port */ +#define PORT_CONFIG_ENTRY_LANES_OFST 4 +#define PORT_CONFIG_ENTRY_LANES_LBN 32 +#define PORT_CONFIG_ENTRY_LANES_WIDTH 32 +/* Port capabilities (MC_CMD_PHY_CAP_*) */ +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64 +#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32 +/* Reserved (align to 16 bytes) */ +#define PORT_CONFIG_ENTRY_RSVD2_OFST 12 +#define PORT_CONFIG_ENTRY_RSVD2_LBN 96 +#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32 + /***********************************/ /* MC_CMD_FILTER_OP @@ -4759,9 +5965,9 @@ #define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1 /* enum: receive to MC */ #define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2 -/* enum: loop back to port 0 TX MAC */ +/* enum: loop back to TXDP 0 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3 -/* enum: loop back to port 1 TX MAC */ +/* enum: loop back to TXDP 1 */ #define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4 /* receive queue handle (for multiple queue modes, this is the base queue) */ #define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24 @@ -4778,9 +5984,7 @@ #define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or - * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered - * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be - * a valid handle. + * MC_CMD_DOT1P_MAPPING_ALLOC. */ #define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32 /* transmit domain (reserved; set to 0) */ @@ -4835,6 +6039,235 @@ #define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92 #define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16 +/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to + * include handling of VXLAN/NVGRE encapsulated frame filtering (which is + * supported on Medford only). + */ +#define MC_CMD_FILTER_OP_EXT_IN_LEN 172 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_IN/OP */ +/* filter handle (for remove / unsubscribe operations) */ +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8 +/* The port ID associated with the v-adaptor which should contain this filter. + */ +#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12 +/* fields to include in match criteria */ +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 +/* receive destination */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20 +/* enum: drop packets */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0 +/* enum: receive to host */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1 +/* enum: receive to MC */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2 +/* enum: loop back to TXDP 0 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3 +/* enum: loop back to TXDP 1 */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4 +/* receive queue handle (for multiple queue modes, this is the base queue) */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24 +/* receive mode */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28 +/* enum: receive to just the specified queue */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1 +/* enum: receive to multiple queues using .1p mapping */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2 +/* enum: install a filter entry that will never match; for test purposes only + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000 +/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for + * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or + * MC_CMD_DOT1P_MAPPING_ALLOC. + */ +#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32 +/* transmit domain (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36 +/* transmit destination (either set the MAC and/or PM bits for explicit + * control, or set this field to TX_DEST_DEFAULT for sensible default + * behaviour) + */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40 +/* enum: request default behaviour (based on filter type) */ +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 +/* source MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6 +/* source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2 +/* destination MAC address to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52 +#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6 +/* destination port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58 +#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2 +/* Ethernet type to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60 +#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2 +/* Inner VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62 +#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2 +/* Outer VLAN tag to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64 +#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2 +/* IP protocol to match (in low byte; set high byte to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66 +#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2 +/* Firmware defined register 0 to match (reserved; set to 0) */ +#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68 +/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP + * protocol is GRE) to match (as bytes in network order; set last byte to 0 for + * VXLAN/NVGRE, or 1 for Geneve) + */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 +/* enum: Match VXLAN traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0 +/* enum: Match Geneve traffic with this VNI */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 +/* enum: Reserved for experimental development use */ +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 +/* enum: Match NVGRE traffic with this VSID */ +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0 +/* source IP address to match (as bytes in network order; set last 12 bytes to + * 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76 +#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16 +/* destination IP address to match (as bytes in network order; set last 12 + * bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92 +#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16 +/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6 +/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2 +/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in + * network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6 +/* VXLAN/NVGRE inner frame destination port to match (as bytes in network + * order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2 +/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2 +/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2 +/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to + * 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2 +/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132 +/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set + * to 0) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136 +/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16 +/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network + * order; set last 12 bytes to 0 for IPv4 address) + */ +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156 +#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16 + /* MC_CMD_FILTER_OP_OUT msgresponse */ #define MC_CMD_FILTER_OP_OUT_LEN 12 /* identifies the type of operation requested */ @@ -4849,6 +6282,27 @@ #define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8 #define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4 #define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8 +/* enum: guaranteed invalid filter handle (low 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff +/* enum: guaranteed invalid filter handle (high 32 bits) */ +#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff + +/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */ +#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12 +/* identifies the type of operation requested */ +#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_EXT_IN/OP */ +/* Returned filter handle (for insert / subscribe operations). Note that these + * handles should be considered opaque to the host, although a value of + * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4 +#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8 +/* Enum values, see field(s): */ +/* MC_CMD_FILTER_OP_OUT/HANDLE */ /***********************************/ @@ -4865,6 +6319,10 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0 /* enum: read the list of supported RX filter matches */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1 +/* enum: read flags indicating restrictions on filter insertion for the calling + * client + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -4884,6 +6342,17 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* bitfield of filter insertion restrictions */ +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 + /***********************************/ /* MC_CMD_PARSER_DISP_RW @@ -4901,8 +6370,10 @@ #define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 /* enum: TX dispatcher CPU */ #define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine */ +/* enum: Lookup engine (with original metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 +/* enum: Lookup engine (with requested metadata format) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -4919,6 +6390,8 @@ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 +/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ +#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 /* value to write (for LUE writes) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 #define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 @@ -5019,7 +6492,9 @@ /* The maximum number of VIs that would be useful */ #define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4 -/* MC_CMD_ALLOC_VIS_OUT msgresponse */ +/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request. + * Use extended version in new code. + */ #define MC_CMD_ALLOC_VIS_OUT_LEN 8 /* The number of VIs allocated on this function */ #define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0 @@ -5028,6 +6503,17 @@ */ #define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4 +/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12 +/* The number of VIs allocated on this function */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0 +/* The base absolute VI number allocated to this function. Required to + * correctly interpret wakeup events. + */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8 + /***********************************/ /* MC_CMD_FREE_VIS @@ -5114,13 +6600,15 @@ #define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8 +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 /* The number of VIs allocated on this function */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 /* The base absolute VI number allocated to this function. Required to * correctly interpret wakeup events. */ #define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 +/* Function's port vi_shift value (always 0 on Huntington) */ +#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 /***********************************/ @@ -5575,6 +7063,7 @@ #define MC_CMD_GET_CAPABILITIES 0xbe #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + /* MC_CMD_GET_CAPABILITIES_IN msgrequest */ #define MC_CMD_GET_CAPABILITIES_IN_LEN 0 @@ -5582,6 +7071,20 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 @@ -5600,8 +7103,14 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 @@ -5609,6 +7118,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ @@ -5632,6 +7145,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ @@ -5642,22 +7159,69 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */ -#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */ +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ #define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12 /* Licensed capabilities */ @@ -5735,6 +7299,15 @@ /* the rate in mbps */ #define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 +/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 +/* the bucket id */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 +/* the rate in mbps */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 +/* the desired maximum fill level */ +#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 + /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ #define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 @@ -5753,8 +7326,14 @@ #define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 /* the static priority associated with the txq */ #define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 -/* bitmask of the priority queues this txq is inserted into */ +/* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ #define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 /* an already reserved bucket (typically set to bucket associated with outer @@ -5768,6 +7347,35 @@ /* the min bucket (typically for ETS/minimum bandwidth) */ #define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 +/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 +/* the txq id */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 +/* bitmask of the priority queues this txq is inserted into when inserted. */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 +/* the reaction point (RP) bucket */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 +/* an already reserved bucket (typically set to bucket associated with outer + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 +/* an already reserved bucket (typically set to bucket associated with inner + * vswitch) + */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 +/* the min bucket (typically for ETS/minimum bandwidth) */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 +/* the static priority associated with the txq */ +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 + /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ #define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 @@ -5826,13 +7434,23 @@ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1 /* enum: VEB */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2 -/* enum: VEPA */ +/* enum: VEPA (obsolete) */ #define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3 +/* enum: MUX */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4 +/* enum: Snapper specific; semantics TBD */ +#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5 /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to support. */ +/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, + * this must be one or greated, and the attached v-ports must have exactly this + * number of tags. For other v-switch types, this must be zero of greater, and + * is an upper limit on the number of VLAN tags for attached v-ports. An error + * will be returned if existing configuration means we can't support attached + * v-ports with this number of tags. + */ #define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */ @@ -5892,7 +7510,10 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 -/* The number of VLAN tags to insert/remove. */ +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ #define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12 /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 @@ -6136,8 +7757,13 @@ /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 -/* The handle of the new RSS context */ +/* The handle of the new RSS context. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0 +/* enum: guaranteed invalid RSS context handle value */ +#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff /***********************************/ @@ -6249,7 +7875,11 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags */ +/* Hash control flags. The _EN bits are always supported. The _MODE bits only + * work when the firmware reports ADDITIONAL_RSS_MODES in + * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. + * See the RSS_MODE structure for the meaning of the mode bits. + */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6259,6 +7889,20 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0 @@ -6279,7 +7923,12 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags */ +/* Hash control flags. If any _MODE bits are non-zero (which will only be true + * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be + * disregarded (but are guaranteed to be consistent with the _MODE bits if + * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was + * allocated). + */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 @@ -6289,6 +7938,20 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 /***********************************/ @@ -6311,8 +7974,13 @@ /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 -/* The handle of the new .1p mapping */ +/* The handle of the new .1p mapping. This should be considered opaque to the + * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid + * handle. + */ #define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 +/* enum: guaranteed invalid .1p mapping handle value */ +#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff /***********************************/ @@ -6421,375 +8089,6 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_STATS - * Retrieve rmon rx class statistics - */ -#define MC_CMD_RMON_RX_CLASS_STATS 0xc3 - -/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_CLASS_STATS - * Retrieve rmon tx class statistics - */ -#define MC_CMD_RMON_TX_CLASS_STATS 0xc4 - -/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS - * Retrieve rmon rx super_class statistics - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS - * Retrieve rmon tx super_class statistics - */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS - * Add qid to class for statistics collection - */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12 -/* class */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0 -/* qid */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4 -/* flags */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8 -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14 - -/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_CLASS - * Allocate an rmon class - */ -#define MC_CMD_RMON_ALLOC_CLASS 0xca - -/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_CLASS - * Deallocate an rmon class - */ -#define MC_CMD_RMON_DEALLOC_CLASS 0xcb - -/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4 -/* class */ -#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_ALLOC_SUPER_CLASS - * Allocate an rmon super_class - */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0 - -/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS - * Deallocate an rmon tx super_class - */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4 -/* super_class */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0 - -/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */ -#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_RMON_RX_UP_CONV_STATS - * Retrieve up converter statistics - */ -#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce - -/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPI_STATS - * Retrieve rx ipi stats - */ -#define MC_CMD_RMON_RX_IPI_STATS 0xcf - -/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS - * Retrieve rx ipsec cntxt_ptr indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS - * Retrieve rx ipsec port indexed stats - */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ /* MC_CMD_VPORT_ADD_MAC_ADDRESS * Add a MAC address to a v-port */ @@ -6877,7 +8176,7 @@ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */ +/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 @@ -6921,354 +8220,6 @@ /***********************************/ -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS - * Retrieve rx class drop stats - */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS - * Retrieve rx super class drop stats - */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5 - -/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_RX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPI_STATS - * Retrieve tx ipi stats - */ -#define MC_CMD_RMON_TX_IPI_STATS 0xd7 - -/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0 -#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5 -#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS - * Retrieve tx ipsec counters by cntxt_ptr - */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS - * Retrieve tx ipsec counters by port - */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS - * Retrieve tx ipsec overflow - */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_STATS - * Retrieve tx nowhere stats - */ -#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb - -/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS - * Retrieve tx nowhere qbb stats - */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_ERRORS_STATS - * Retrieve rxdp errors - */ -#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd - -/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11 -#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_TX_OVERFLOW_STATS - * Retrieve rxdp overflow - */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4 -/* flags */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num)) -/* Array of stats */ -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1 -#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4 -/* class */ -#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0 - - -/***********************************/ -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS - * Explicitly collect class stats at the specified evb port - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4 -/* The port id associated with the vport/pport at which to collect class stats - */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0 - -/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4 -/* super_class */ -#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0 - - -/***********************************/ /* MC_CMD_GET_CLOCK * Return the system and PDCPU clock frequencies. */ @@ -7296,22 +8247,66 @@ #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_SET_CLOCK_IN msgrequest */ -#define MC_CMD_SET_CLOCK_IN_LEN 12 -/* Requested system frequency in MHz; 0 leaves unchanged. */ +#define MC_CMD_SET_CLOCK_IN_LEN 28 +/* Requested frequency in MHz for system clock domain */ #define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 -/* Requested inter-core frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the system clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for inter-core clock domain */ #define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 -/* Request DPCPU frequency in MHz; 0 leaves unchanged. */ +/* enum: Leave the inter-core clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for DPCPU clock domain */ #define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 +/* enum: Leave the DPCPU clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for PCS clock domain */ +#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 +/* enum: Leave the PCS clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for MC clock domain */ +#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 +/* enum: Leave the MC clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for rmon clock domain */ +#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 +/* enum: Leave the rmon clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 +/* Requested frequency in MHz for vswitch clock domain */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 +/* enum: Leave the vswitch clock domain frequency unchanged */ +#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 /* MC_CMD_SET_CLOCK_OUT msgresponse */ -#define MC_CMD_SET_CLOCK_OUT_LEN 12 +#define MC_CMD_SET_CLOCK_OUT_LEN 28 /* Resulting system frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 +/* enum: The system clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 /* Resulting inter-core frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 +/* enum: The inter-core clock domain doesn't exist / isn't used */ +#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 /* Resulting DPCPU frequency in MHz */ #define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 +/* enum: The dpcpu clock domain doesn't exist */ +#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 +/* Resulting PCS frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 +/* enum: The PCS clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 +/* Resulting MC frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 +/* enum: The MC clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 +/* Resulting rmon frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 +/* enum: The rmon clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 +/* Resulting vswitch frequency in MHz */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 +/* enum: The vswitch clock domain doesn't exist / isn't controlled */ +#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 /***********************************/ @@ -7325,12 +8320,22 @@ /* MC_CMD_DPCPU_RPC_IN msgrequest */ #define MC_CMD_DPCPU_RPC_IN_LEN 36 #define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 -/* enum: RxDPCPU */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0 +/* enum: RxDPCPU0 */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 /* enum: TxDPCPU0 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 /* enum: TxDPCPU1 */ #define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 +/* enum: RxDPCPU1 (Medford only) */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 +/* enum: RxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_RX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 +/* enum: TxDPCPU (will be for the calling function; for now, just an alias of + * DPCPU_TX0) + */ +#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be * initialised to zero */ @@ -7418,6 +8423,25 @@ /***********************************/ +/* MC_CMD_SHMBOOT_OP + * Special operations to support (for now) shmboot. + */ +#define MC_CMD_SHMBOOT_OP 0xe6 + +#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SHMBOOT_OP_IN msgrequest */ +#define MC_CMD_SHMBOOT_OP_IN_LEN 4 +/* Identifies the operation to perform */ +#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 +/* enum: Copy slave_data section to the slave core. (Greenport only) */ +#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 + +/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ +#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_CAP_BLK_READ * Read multiple 64bit words from capture block memory */ @@ -7730,6 +8754,8 @@ * more data is returned. */ #define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6 +/* enum: Read Figure Of Merit (eye quality, higher is better). */ +#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7 /* Align the arguments to 32 bits */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1 #define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3 @@ -7762,20 +8788,32 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15) */ +/* enum: Attenuation (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15) */ +/* enum: CTLE Boost (0-15, TBD for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ +/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD + * for Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ +/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for + * Medford) + */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 +/* enum: Edge DFE DLEV (TBD for Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7865,6 +8903,8 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 /* enum: TX Slew Rate Fine control */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 +/* enum: TX Termination Impedance control */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -7955,6 +8995,20 @@ #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 +/* Requested operation */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1 +#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 + +/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */ +#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0 + /***********************************/ /* MC_CMD_PCIE_TUNE @@ -8224,6 +9278,8 @@ #define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 /* enum: validate application */ #define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 +/* enum: mask application */ +#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 /* arguments specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 @@ -8258,10 +9314,22 @@ #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 #define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 +/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 +/* application ID */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 +/* the type of operation requested */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 +/* flag */ +#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 + +/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ +#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 + /***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG - * Configure port sniffing for the physical port associated with the calling + * Configure RX port sniffing for the physical port associated with the calling * function. Only a privileged function may change the port sniffing * configuration. A copy of all traffic delivered to the host (non-promiscuous * mode) or all traffic arriving at the port (promiscuous mode) may be @@ -8299,7 +9367,7 @@ /***********************************/ /* MC_CMD_GET_PORT_SNIFF_CONFIG - * Obtain the current port sniffing configuration for the physical port + * Obtain the current RX port sniffing configuration for the physical port * associated with the calling function. Only a privileged function may read * the configuration. */ @@ -8330,4 +9398,673 @@ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 +/***********************************/ +/* MC_CMD_SET_PARSER_DISP_CONFIG + * Change configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 + +#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +/* the type of configuration setting to change */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* enum: Per-TXQ enable for multicast UDP destination lookup for possible + * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0 +/* enum: Per-v-adaptor enable for suppression of self-transmissions on the + * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single + * boolean.) + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1 +/* handle for the entity to update: queue handle, EVB port ID, etc. depending + * on the type of configuration setting being changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 +/* new value: the details depend on the type of configuration setting being + * changed + */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 + +/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_PARSER_DISP_CONFIG + * Read configuration related to the parser-dispatcher subsystem. + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa + +#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 +/* the type of configuration setting to read */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ +/* handle for the entity to query: queue handle, EVB port ID, etc. depending on + * the type of configuration setting being read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 + +/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +/* current value: the details depend on the type of configuration setting being + * read + */ +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 + + +/***********************************/ +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG + * Configure TX port sniffing for the physical port associated with the calling + * function. Only a privileged function may change the port sniffing + * configuration. A copy of all traffic transmitted through the port may be + * delivered to a specific queue, or a set of queues with RSS. Note that these + * packets are delivered with transmit timestamps in the packet prefix, not + * receive timestamps, so it is likely that the queue(s) will need to be + * dedicated as TX sniff receivers. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb + +#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 +/* configuration flags */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +/* receive queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 +/* enum: receive to just the specified queue */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 +/* enum: receive to multiple queues using RSS context */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note + * that these handles should be considered opaque to the host, although a value + * of 0xFFFFFFFF is guaranteed never to be a valid handle. + */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 + +/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG + * Obtain the current TX port sniffing configuration for the physical port + * associated with the calling function. Only a privileged function may read + * the configuration. + */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc + +#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 + +/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 +/* configuration flags */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +/* receiving queue handle (for RSS mode, this is the base queue) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 +/* receive mode */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 +/* enum: receiving to just the specified queue */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 +/* enum: receiving to multiple queues using RSS context */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 +/* RSS context (for RX_MODE_RSS) */ +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 + + +/***********************************/ +/* MC_CMD_RMON_STATS_RX_ERRORS + * Per queue rx error stats. + */ +#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe + +#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 +/* The rx queue to get stats for. */ +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 + +/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 +#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 + + +/***********************************/ +/* MC_CMD_GET_PCIE_RESOURCE_INFO + * Find out about available PCIE resources + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 + +/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 +/* The maximum number of PFs the device can expose */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 +/* The maximum number of VFs the device can expose in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 +/* The maximum number of MSI-X vectors the device can provide in total */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 +/* the number of MSI-X vectors the device will allocate by default to each PF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 +/* the number of MSI-X vectors the device will allocate by default to each VF + */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 +/* the maximum number of MSI-X vectors the device can allocate to any one PF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 +/* the maximum number of MSI-X vectors the device can allocate to any one VF */ +#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 + + +/***********************************/ +/* MC_CMD_GET_PORT_MODES + * Find out about available port modes + */ +#define MC_CMD_GET_PORT_MODES 0xff + +#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_PORT_MODES_IN msgrequest */ +#define MC_CMD_GET_PORT_MODES_IN_LEN 0 + +/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ +#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 + + +/***********************************/ +/* MC_CMD_READ_ATB + * Sample voltages on the ATB + */ +#define MC_CMD_READ_ATB 0x100 + +#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_READ_ATB_IN msgrequest */ +#define MC_CMD_READ_ATB_IN_LEN 16 +#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 +#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ +#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ +#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 +#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 +#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 + +/* MC_CMD_READ_ATB_OUT msgresponse */ +#define MC_CMD_READ_ATB_OUT_LEN 4 +#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_WORKAROUNDS + * Read the list of all implemented and all currently enabled workarounds. The + * enums here must correspond with those in MC_CMD_WORKAROUND. + */ +#define MC_CMD_GET_WORKAROUNDS 0x59 + +#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */ +#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8 +/* Each workaround is represented by a single bit according to the enums below. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0 +#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4 +/* enum: Bug 17230 work around. */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2 +/* enum: Bug 35388 work around (unsafe EVQ writes). */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4 +/* enum: Bug35017 workaround (A64 tables must be identity map) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8 +/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10 +/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution + * - before adding code that queries this workaround, remember that there's + * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008, + * and will hence (incorrectly) report that the bug doesn't exist. + */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20 +/* enum: Bug 26807 features present in firmware (multicast filter chaining) */ +#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MASK + * Read/set privileges of an arbitrary PCIe function + */ +#define MC_CMD_PRIVILEGE_MASK 0x5a + +#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8 +/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF + * 1,3 = 0x00030001 + */ +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ +/* New privilege mask to be set. The mask will only be changed if the MSB is + * set to 1. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4 +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Set this bit to indicate that a new privilege mask is to be set, + * otherwise the command will only read the existing mask. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000 + +/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4 +/* For an admin function, always all the privileges are reported. */ +#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0 + + +/***********************************/ +/* MC_CMD_LINK_STATE_MODE + * Read/set link state mode of a VF + */ +#define MC_CMD_LINK_STATE_MODE 0x5c + +#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LINK_STATE_MODE_IN msgrequest */ +#define MC_CMD_LINK_STATE_MODE_IN_LEN 8 +/* The target function to have its link state mode read or set, must be a VF + * e.g. VF 1,3 = 0x00030001 + */ +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 +/* New link state mode to be set */ +#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4 +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */ +#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */ +/* enum: Use this value to just read the existing setting without modifying it. + */ +#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff + +/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4 +#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 + + +/***********************************/ +/* MC_CMD_GET_SNAPSHOT_LENGTH + * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH + * parameter to MC_CMD_INIT_RXQ. + */ +#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 + +#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 + +/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 +/* Minimum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 +/* Maximum acceptable snapshot length. */ +#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 + + +/***********************************/ +/* MC_CMD_FUSE_DIAGS + * Additional fuse diagnostics + */ +#define MC_CMD_FUSE_DIAGS 0x102 + +#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_FUSE_DIAGS_IN msgrequest */ +#define MC_CMD_FUSE_DIAGS_IN_LEN 0 + +/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ +#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 +/* Total number of mismatched bits between pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 +/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 +/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 +/* Checksum of data after logical OR of pairs in area 0 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 +/* Total number of mismatched bits between pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 +/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 +/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 +/* Checksum of data after logical OR of pairs in area 1 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 +/* Total number of mismatched bits between pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 +/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 +/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 +/* Checksum of data after logical OR of pairs in area 2 */ +#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 + + +/***********************************/ +/* MC_CMD_PRIVILEGE_MODIFY + * Modify the privileges of a set of PCIe functions. Note that this operation + * only effects non-admin functions unless the admin privilege itself is + * included in one of the masks provided. + */ +#define MC_CMD_PRIVILEGE_MODIFY 0x60 + +#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 +/* The groups of functions to have their privilege masks modified. */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ +/* For VFS_OF_PF specify the PF, for ONE specify the target function */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 +/* Privileges to be added to the target functions. For privilege definitions + * refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 +/* Privileges to be removed from the target functions. For privilege + * definitions refer to the command MC_CMD_PRIVILEGE_MASK + */ +#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 + +/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ +#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_BYTES + * Read XPM memory + */ +#define MC_CMD_XPM_READ_BYTES 0x103 + +#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ +#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +/* Data */ +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_BYTES + * Write XPM memory + */ +#define MC_CMD_XPM_WRITE_BYTES 0x104 + +#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +/* Start address (byte) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 +/* Data */ +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 + +/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_READ_SECTOR + * Read XPM sector + */ +#define MC_CMD_XPM_READ_SECTOR 0x105 + +#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 +/* Sector index */ +#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 +/* Sector size */ +#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 + +/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +/* Sector type */ +#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ +#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ +/* Sector data */ +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_SECTOR + * Write XPM sector + */ +#define MC_CMD_XPM_WRITE_SECTOR 0x106 + +#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +/* If writing fails due to an uncorrectable error, try up to RETRIES following + * sectors (or until no more space available). If 0, only one write attempt is + * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair + * mechanism. + */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 +/* Sector type */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 +/* Enum values, see field(s): */ +/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* Sector size */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 +/* Sector data */ +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 + +/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 +/* New sector index */ +#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 + + +/***********************************/ +/* MC_CMD_XPM_INVALIDATE_SECTOR + * Invalidate XPM sector + */ +#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 + +#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 +/* Sector index */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 + +/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ +#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_BLANK_CHECK + * Blank-check XPM memory and report bad locations + */ +#define MC_CMD_XPM_BLANK_CHECK 0x108 + +#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ +#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +/* Total number of bad (non-blank) locations */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 +/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit + * into MCDI response) + */ +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 + + +/***********************************/ +/* MC_CMD_XPM_REPAIR + * Blank-check and repair XPM memory + */ +#define MC_CMD_XPM_REPAIR 0x109 + +#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_REPAIR_IN msgrequest */ +#define MC_CMD_XPM_REPAIR_IN_LEN 8 +/* Start address (byte) */ +#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 +/* Count (bytes) */ +#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 + +/* MC_CMD_XPM_REPAIR_OUT msgresponse */ +#define MC_CMD_XPM_REPAIR_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_DECODER_TEST + * Test XPM memory address decoders for gross manufacturing defects. Can only + * be performed on an unprogrammed part. + */ +#define MC_CMD_XPM_DECODER_TEST 0x10a + +#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ +#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 + +/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ +#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_XPM_WRITE_TEST + * XPM memory write test. Test XPM write logic for gross manufacturing defects + * by writing to a dedicated test row. There are 16 locations in the test row + * and the test can only be performed on locations that have not been + * previously used (i.e. can be run at most 16 times). The test will pick the + * first available location to use, or fail with ENOSPC if none left. + */ +#define MC_CMD_XPM_WRITE_TEST 0x10b + +#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ +#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 + +/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ +#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 47d1e3a96522..4d35313a239d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -925,6 +925,7 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb + * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -1072,6 +1073,7 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; + bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 31ff9084d9a4..0b536e27d3b2 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -506,6 +506,7 @@ enum { * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 + * @workaround_26807: Flag: firmware supports workaround for bug 26807 * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated * after MC reboot * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of @@ -535,6 +536,7 @@ struct efx_ef10_nic_data { bool rx_rss_context_exclusive; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; + bool workaround_26807; bool must_check_datapath_caps; u32 datapath_caps; unsigned int rx_dpcpu_fw_id; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index b323b9167526..b2f886d90429 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = { .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE | 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | - 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ), + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index cb888d3ebbdc..78e9d1861896 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -25,66 +25,53 @@ # define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0 # define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4 -struct lpc18xx_dwmac_priv_data { +static int lpc18xx_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct regmap *reg; - int interface; -}; + u8 ethmode; + int ret; -static void *lpc18xx_dwmac_setup(struct platform_device *pdev) -{ - struct lpc18xx_dwmac_priv_data *dwmac; + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; - dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return ERR_PTR(-ENOMEM); + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); - dwmac->interface = of_get_phy_mode(pdev->dev.of_node); - if (dwmac->interface < 0) - return ERR_PTR(dwmac->interface); + plat_dat->has_gmac = true; - dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); - if (IS_ERR(dwmac->reg)) { - dev_err(&pdev->dev, "Syscon lookup failed\n"); - return dwmac->reg; + reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); + if (IS_ERR(reg)) { + dev_err(&pdev->dev, "syscon lookup failed\n"); + return PTR_ERR(reg); } - return dwmac; -} - -static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct lpc18xx_dwmac_priv_data *dwmac = priv; - u8 ethmode; - - if (dwmac->interface == PHY_INTERFACE_MODE_MII) { + if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII; - } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) { + } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) { ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); return -EINVAL; } - regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6, + regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - return 0; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } -static const struct stmmac_of_data lpc18xx_dwmac_data = { - .has_gmac = 1, - .setup = lpc18xx_dwmac_setup, - .init = lpc18xx_dwmac_init, -}; - static const struct of_device_id lpc18xx_dwmac_match[] = { - { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data }, + { .compatible = "nxp,lpc1850-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match); static struct platform_driver lpc18xx_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = lpc18xx_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "lpc18xx-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 61a324a87d09..c1bac1912b37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, dwmac->reg); } -static void *meson6_dwmac_setup(struct platform_device *pdev) +static int meson6_dwmac_probe(struct platform_device *pdev) { + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; struct meson_dwmac *dwmac; struct resource *res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); if (!dwmac) - return ERR_PTR(-ENOMEM); + return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->reg)) - return ERR_CAST(dwmac->reg); + return PTR_ERR(dwmac->reg); - return dwmac; -} + plat_dat->bsp_priv = dwmac; + plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; -static const struct stmmac_of_data meson6_dwmac_data = { - .setup = meson6_dwmac_setup, - .fix_mac_speed = meson6_dwmac_fix_mac_speed, -}; + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} static const struct of_device_id meson6_dwmac_match[] = { - { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data}, + { .compatible = "amlogic,meson6-dwmac" }, { } }; MODULE_DEVICE_TABLE(of, meson6_dwmac_match); static struct platform_driver meson6_dwmac_driver = { - .probe = stmmac_pltfr_probe, + .probe = meson6_dwmac_probe, .remove = stmmac_pltfr_remove, .driver = { .name = "meson6-dwmac", diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index f3918c7e7eeb..eca0eb845241 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -104,18 +104,20 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) * this function is to read the driver parameters from device-tree and * set some private fields that will be used by the main at runtime. */ -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat; + const struct stmmac_of_data *data; struct stmmac_dma_cfg *dma_cfg; - const struct of_device_id *device; - struct device *dev = &pdev->dev; - device = of_match_device(dev->driver->of_match_table, dev); - if (device->data) { - const struct stmmac_of_data *data = device->data; + plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return ERR_PTR(-ENOMEM); + + data = of_device_get_match_data(&pdev->dev); + if (data) { plat->has_gmac = data->has_gmac; plat->enh_desc = data->enh_desc; plat->tx_coe = data->tx_coe; @@ -151,7 +153,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* If phy-handle is not specified, check if we have a fixed-phy */ if (!plat->phy_node && of_phy_is_fixed_link(np)) { if ((of_phy_register_fixed_link(np) < 0)) - return -ENODEV; + return ERR_PTR(-ENODEV); plat->phy_node = of_node_get(np); } @@ -182,6 +184,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, */ plat->maxmtu = JUMBO_LEN; + /* Set default value for multicast hash bins */ + plat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat->unicast_filter_entries = 1; + /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added @@ -222,7 +230,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, GFP_KERNEL); if (!dma_cfg) { of_node_put(np); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); @@ -240,44 +248,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } - return 0; + return plat; } #else -static int stmmac_probe_config_dt(struct platform_device *pdev, - struct plat_stmmacenet_data *plat, - const char **mac) +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { - return -ENOSYS; + return ERR_PTR(-ENOSYS); } #endif /* CONFIG_OF */ +EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); -/** - * stmmac_pltfr_probe - platform driver probe. - * @pdev: platform device pointer - * Description: platform_device probe function. It is to allocate - * the necessary platform resources, invoke custom helper (if required) and - * invoke the main probe function. - */ -int stmmac_pltfr_probe(struct platform_device *pdev) +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res) { - struct stmmac_resources stmmac_res; - int ret = 0; struct resource *res; - struct device *dev = &pdev->dev; - struct plat_stmmacenet_data *plat_dat = NULL; - memset(&stmmac_res, 0, sizeof(stmmac_res)); + memset(stmmac_res, 0, sizeof(*stmmac_res)); /* Get IRQ information early to have an ability to ask for deferred * probe if needed before we went too far with resource allocation. */ - stmmac_res.irq = platform_get_irq_byname(pdev, "macirq"); - if (stmmac_res.irq < 0) { - if (stmmac_res.irq != -EPROBE_DEFER) { - dev_err(dev, + stmmac_res->irq = platform_get_irq_byname(pdev, "macirq"); + if (stmmac_res->irq < 0) { + if (stmmac_res->irq != -EPROBE_DEFER) { + dev_err(&pdev->dev, "MAC IRQ configuration information not found\n"); } - return stmmac_res.irq; + return stmmac_res->irq; } /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq @@ -287,45 +285,61 @@ int stmmac_pltfr_probe(struct platform_device *pdev) * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ - stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (stmmac_res.wol_irq < 0) { - if (stmmac_res.wol_irq == -EPROBE_DEFER) + stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (stmmac_res->wol_irq < 0) { + if (stmmac_res->wol_irq == -EPROBE_DEFER) return -EPROBE_DEFER; - stmmac_res.wol_irq = stmmac_res.irq; + stmmac_res->wol_irq = stmmac_res->irq; } - stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (stmmac_res.lpi_irq == -EPROBE_DEFER) + stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (stmmac_res->lpi_irq == -EPROBE_DEFER) return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - stmmac_res.addr = devm_ioremap_resource(dev, res); - if (IS_ERR(stmmac_res.addr)) - return PTR_ERR(stmmac_res.addr); - - plat_dat = dev_get_platdata(&pdev->dev); - - if (!plat_dat) - plat_dat = devm_kzalloc(&pdev->dev, - sizeof(struct plat_stmmacenet_data), - GFP_KERNEL); - if (!plat_dat) { - pr_err("%s: ERROR: no memory", __func__); - return -ENOMEM; - } + stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(stmmac_res->addr)) + return PTR_ERR(stmmac_res->addr); - /* Set default value for multicast hash bins */ - plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + return 0; +} +EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); - /* Set default value for unicast filter entries */ - plat_dat->unicast_filter_entries = 1; +/** + * stmmac_pltfr_probe - platform driver probe. + * @pdev: platform device pointer + * Description: platform_device probe function. It is to allocate + * the necessary platform resources, invoke custom helper (if required) and + * invoke the main probe function. + */ +int stmmac_pltfr_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; if (pdev->dev.of_node) { - ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac); - if (ret) { - pr_err("%s: main dt probe failed", __func__); - return ret; + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } + } else { + plat_dat = dev_get_platdata(&pdev->dev); + if (!plat_dat) { + dev_err(&pdev->dev, "no platform data provided\n"); + return -EINVAL; } + + /* Set default value for multicast hash bins */ + plat_dat->multicast_filter_bins = HASH_TABLE_SIZE; + + /* Set default value for unicast filter entries */ + plat_dat->unicast_filter_entries = 1; } /* Custom setup (if needed) */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 71da86d7bd00..84ceb5342686 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -19,6 +19,14 @@ #ifndef __STMMAC_PLATFORM_H__ #define __STMMAC_PLATFORM_H__ +#include "stmmac.h" + +struct plat_stmmacenet_data * +stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); + +int stmmac_get_platform_resources(struct platform_device *pdev, + struct stmmac_resources *stmmac_res); + int stmmac_pltfr_probe(struct platform_device *pdev); int stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index ec8ed30196f3..3ca87f26582a 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2142,7 +2142,6 @@ MODULE_DEVICE_TABLE(of, of_match); static struct platform_driver netcp_driver = { .driver = { .name = "netcp-1.0", - .owner = THIS_MODULE, .of_match_table = of_match, }, .probe = netcp_probe, diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index dd4544085db3..26cd14ccf4d5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -589,6 +589,7 @@ struct nvsp_message { #define NETVSC_MTU 65536 +#define NETVSC_MTU_MIN 68 #define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 358475ed9b59..7b36d5fecc1f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net) return ret; } - netif_tx_start_all_queues(net); + netif_tx_wake_all_queues(net); nvdev = hv_get_drvdata(device_obj); rdev = nvdev->extension; @@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(device_obj); int ret; + u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; + struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); - if (ret != 0) + if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); + return ret; + } + + /* Ensure pending bytes in ring are read */ + while (true) { + aread = 0; + for (i = 0; i < nvdev->num_chn; i++) { + chn = nvdev->chn_table[i]; + if (!chn) + continue; + + hv_get_ringbuffer_availbytes(&chn->inbound, &aread, + &awrite); + + if (aread) + break; + + hv_get_ringbuffer_availbytes(&chn->outbound, &aread, + &awrite); + + if (aread) + break; + } + + retry++; + if (retry > retry_max || aread == 0) + break; + + msleep(msec); + + if (msec < 1000) + msec *= 2; + } + + if (aread) { + netdev_err(net, "Ring buffer not empty after closing rndis\n"); + ret = -ETIMEDOUT; + } return ret; } @@ -736,6 +777,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = hv_get_drvdata(hdev); struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; + int ret = 0; if (nvdev == NULL || nvdev->destroy) return -ENODEV; @@ -743,13 +785,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; - /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */ - if (mtu < ETH_DATA_LEN || mtu > limit) + if (mtu < NETVSC_MTU_MIN || mtu > limit) return -EINVAL; + ret = netvsc_close(ndev); + if (ret) + goto out; + nvdev->start_remove = true; - cancel_work_sync(&ndevctx->work); - netif_tx_disable(ndev); rndis_filter_device_remove(hdev); ndev->mtu = mtu; @@ -759,9 +802,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) device_info.ring_size = ring_size; device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); - netif_tx_wake_all_queues(ndev); - return 0; +out: + netvsc_open(ndev); + + return ret; } static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 236aeb76ef22..2e40417a8087 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1054,7 +1054,7 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_query_device(rndis_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); - if (ret == 0 && size == sizeof(u32)) + if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu) net_device->ndev->mtu = mtu; /* Get the mac address */ diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 94570aace241..cc56fac3c3f8 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -38,69 +38,68 @@ #include <net/net_namespace.h> #define TX_Q_LIMIT 32 -struct ifb_private { +struct ifb_q_private { + struct net_device *dev; struct tasklet_struct ifb_tasklet; - int tasklet_pending; - - struct u64_stats_sync rsync; + int tasklet_pending; + int txqnum; struct sk_buff_head rq; - u64 rx_packets; - u64 rx_bytes; + u64 rx_packets; + u64 rx_bytes; + struct u64_stats_sync rsync; struct u64_stats_sync tsync; + u64 tx_packets; + u64 tx_bytes; struct sk_buff_head tq; - u64 tx_packets; - u64 tx_bytes; -}; +} ____cacheline_aligned_in_smp; -static int numifbs = 2; +struct ifb_dev_private { + struct ifb_q_private *tx_private; +}; -static void ri_tasklet(unsigned long dev); static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); -static void ri_tasklet(unsigned long dev) +static void ifb_ri_tasklet(unsigned long _txp) { - struct net_device *_dev = (struct net_device *)dev; - struct ifb_private *dp = netdev_priv(_dev); + struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; - txq = netdev_get_tx_queue(_dev, 0); - if ((skb = skb_peek(&dp->tq)) == NULL) { - if (__netif_tx_trylock(txq)) { - skb_queue_splice_tail_init(&dp->rq, &dp->tq); - __netif_tx_unlock(txq); - } else { - /* reschedule */ + txq = netdev_get_tx_queue(txp->dev, txp->txqnum); + skb = skb_peek(&txp->tq); + if (!skb) { + if (!__netif_tx_trylock(txq)) goto resched; - } + skb_queue_splice_tail_init(&txp->rq, &txp->tq); + __netif_tx_unlock(txq); } - while ((skb = __skb_dequeue(&dp->tq)) != NULL) { + while ((skb = __skb_dequeue(&txp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); - u64_stats_update_begin(&dp->tsync); - dp->tx_packets++; - dp->tx_bytes += skb->len; - u64_stats_update_end(&dp->tsync); + u64_stats_update_begin(&txp->tsync); + txp->tx_packets++; + txp->tx_bytes += skb->len; + u64_stats_update_end(&txp->tsync); rcu_read_lock(); - skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif); + skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); - _dev->stats.tx_dropped++; - if (skb_queue_len(&dp->tq) != 0) + txp->dev->stats.tx_dropped++; + if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); - skb->skb_iif = _dev->ifindex; + skb->skb_iif = txp->dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); @@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev) } if (__netif_tx_trylock(txq)) { - if ((skb = skb_peek(&dp->rq)) == NULL) { - dp->tasklet_pending = 0; - if (netif_queue_stopped(_dev)) - netif_wake_queue(_dev); + skb = skb_peek(&txp->rq); + if (!skb) { + txp->tasklet_pending = 0; + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; @@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev) __netif_tx_unlock(txq); } else { resched: - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } } @@ -132,29 +132,58 @@ resched: static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; unsigned int start; - - do { - start = u64_stats_fetch_begin_irq(&dp->rsync); - stats->rx_packets = dp->rx_packets; - stats->rx_bytes = dp->rx_bytes; - } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); - - do { - start = u64_stats_fetch_begin_irq(&dp->tsync); - - stats->tx_packets = dp->tx_packets; - stats->tx_bytes = dp->tx_bytes; - - } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); - + u64 packets, bytes; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + do { + start = u64_stats_fetch_begin_irq(&txp->rsync); + packets = txp->rx_packets; + bytes = txp->rx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + + do { + start = u64_stats_fetch_begin_irq(&txp->tsync); + packets = txp->tx_packets; + bytes = txp->tx_bytes; + } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; return stats; } +static int ifb_dev_init(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp; + int i; + + txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); + if (!txp) + return -ENOMEM; + dp->tx_private = txp; + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + txp->txqnum = i; + txp->dev = dev; + __skb_queue_head_init(&txp->rq); + __skb_queue_head_init(&txp->tq); + u64_stats_init(&txp->rsync); + u64_stats_init(&txp->tsync); + tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, + (unsigned long)txp); + netif_tx_start_queue(netdev_get_tx_queue(dev, i)); + } + return 0; +} static const struct net_device_ops ifb_netdev_ops = { .ndo_open = ifb_open, @@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = { .ndo_get_stats64 = ifb_stats64, .ndo_start_xmit = ifb_xmit, .ndo_validate_addr = eth_validate_addr, + .ndo_init = ifb_dev_init, }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ @@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = { NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) +static void ifb_dev_free(struct net_device *dev) +{ + struct ifb_dev_private *dp = netdev_priv(dev); + struct ifb_q_private *txp = dp->tx_private; + int i; + + for (i = 0; i < dev->num_tx_queues; i++,txp++) { + tasklet_kill(&txp->ifb_tasklet); + __skb_queue_purge(&txp->rq); + __skb_queue_purge(&txp->tq); + } + kfree(dp->tx_private); + free_netdev(dev); +} + static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->destructor = free_netdev; dev->netdev_ops = &ifb_netdev_ops; /* Fill in device structure with ethernet-generic values. */ @@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); eth_hw_addr_random(dev); + dev->destructor = ifb_dev_free; } static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); + struct ifb_dev_private *dp = netdev_priv(dev); u32 from = G_TC_FROM(skb->tc_verd); + struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); - u64_stats_update_begin(&dp->rsync); - dp->rx_packets++; - dp->rx_bytes += skb->len; - u64_stats_update_end(&dp->rsync); + u64_stats_update_begin(&txp->rsync); + txp->rx_packets++; + txp->rx_bytes += skb->len; + u64_stats_update_end(&txp->rsync); if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); @@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } - if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { - netif_stop_queue(dev); - } + if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) + netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); - __skb_queue_tail(&dp->rq, skb); - if (!dp->tasklet_pending) { - dp->tasklet_pending = 1; - tasklet_schedule(&dp->ifb_tasklet); + __skb_queue_tail(&txp->rq, skb); + if (!txp->tasklet_pending) { + txp->tasklet_pending = 1; + tasklet_schedule(&txp->ifb_tasklet); } return NETDEV_TX_OK; @@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) static int ifb_close(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_kill(&dp->ifb_tasklet); - netif_stop_queue(dev); - __skb_queue_purge(&dp->rq); - __skb_queue_purge(&dp->tq); + netif_tx_stop_all_queues(dev); return 0; } static int ifb_open(struct net_device *dev) { - struct ifb_private *dp = netdev_priv(dev); - - tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); - __skb_queue_head_init(&dp->rq); - __skb_queue_head_init(&dp->tq); - netif_start_queue(dev); - + netif_tx_start_all_queues(dev); return 0; } @@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) static struct rtnl_link_ops ifb_link_ops __read_mostly = { .kind = "ifb", - .priv_size = sizeof(struct ifb_private), + .priv_size = sizeof(struct ifb_dev_private), .setup = ifb_setup, .validate = ifb_validate, }; -/* Number of ifb devices to be set up by this module. */ +/* Number of ifb devices to be set up by this module. + * Note that these legacy devices have one queue. + * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb + */ +static int numifbs = 2; module_param(numifbs, int, 0); MODULE_PARM_DESC(numifbs, "Number of ifb devices"); static int __init ifb_init_one(int index) { struct net_device *dev_ifb; - struct ifb_private *dp; int err; - dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d", + dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d", NET_NAME_UNKNOWN, ifb_setup); if (!dev_ifb) return -ENOMEM; - dp = netdev_priv(dev_ifb); - u64_stats_init(&dp->rsync); - u64_stats_init(&dp->tsync); - dev_ifb->rtnl_link_ops = &ifb_link_ops; err = register_netdevice(dev_ifb); if (err < 0) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cb86d7a01542..d6aff873803c 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -54,6 +54,11 @@ config VITESSE_PHY ---help--- Currently supports the vsc8244 +config TERANETICS_PHY + tristate "Drivers for the Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 + config SMSC_PHY tristate "Drivers for SMSC PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index fcc25a0c45cd..16aac1c3e703 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o obj-$(CONFIG_LXT_PHY) += lxt.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 00cb41e71312..185b03c08e16 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info) info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 1960b46add65..479b93f9581c 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) u16 lpagb = 0; u16 lpa = 0; + if (!fp->status.link) + goto done; + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + if (fp->status.duplex) { bmcr |= BMCR_FULLDPLX; @@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) } } - if (fp->status.link) - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - if (fp->status.pause) lpa |= LPA_PAUSE_CAP; if (fp->status.asym_pause) lpa |= LPA_PAUSE_ASYM; +done: fp->regs[MII_PHYSID1] = 0; fp->regs[MII_PHYSID2] = 0; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f721444c2b0a..3320a179ee36 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -48,6 +48,8 @@ #define MII_M1011_IMASK_CLEAR 0x0000 #define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_MDI 0x0000 +#define MII_M1011_PHY_SCR_MDI_X 0x0020 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 #define MII_M1145_PHY_EXT_SR 0x1b @@ -159,6 +161,43 @@ static int marvell_config_intr(struct phy_device *phydev) return err; } +static int marvell_set_polarity(struct phy_device *phydev, int polarity) +{ + int reg; + int err; + int val; + + /* get the current settings */ + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + val = reg; + val &= ~MII_M1011_PHY_SCR_AUTO_CROSS; + switch (polarity) { + case ETH_TP_MDI: + val |= MII_M1011_PHY_SCR_MDI; + break; + case ETH_TP_MDI_X: + val |= MII_M1011_PHY_SCR_MDI_X; + break; + case ETH_TP_MDI_AUTO: + case ETH_TP_MDI_INVALID: + default: + val |= MII_M1011_PHY_SCR_AUTO_CROSS; + break; + } + + if (val != reg) { + /* Set the new polarity value in the register */ + err = phy_write(phydev, MII_M1011_PHY_SCR, val); + if (err) + return err; + } + + return 0; +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -191,8 +230,7 @@ static int marvell_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix); if (err < 0) return err; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b2197b506acb..84b1fba58ac3 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) phydev->duplex = cmd->duplex; + phydev->mdix = cmd->eth_tp_mdix_ctrl; + /* Restart the PHY */ phy_start_aneg(phydev); @@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; + cmd->eth_tp_mdix_ctrl = phydev->mdix; return 0; } diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c new file mode 100644 index 000000000000..7dcb5aada1c4 --- /dev/null +++ b/drivers/net/phy/teranetics.c @@ -0,0 +1,128 @@ +/* + * Driver for Teranetics PHY + * + * Author: Shaohui Xie <Shaohui.Xie@freescale.com> + * + * Copyright 2015 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/mdio.h> +#include <linux/phy.h> + +MODULE_DESCRIPTION("Teranetics PHY driver"); +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>"); +MODULE_LICENSE("GPL v2"); + +#define PHY_ID_TN2020 0x00a19410 +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ + MDIO_PHYXS_LNSTAT_SYNC1 | \ + MDIO_PHYXS_LNSTAT_SYNC2 | \ + MDIO_PHYXS_LNSTAT_SYNC3 | \ + MDIO_PHYXS_LNSTAT_ALIGN) + +static int teranetics_config_init(struct phy_device *phydev) +{ + phydev->supported = SUPPORTED_10000baseT_Full; + phydev->advertising = SUPPORTED_10000baseT_Full; + + return 0; +} + +static int teranetics_soft_reset(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_aneg_done(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); +} + +static int teranetics_config_aneg(struct phy_device *phydev) +{ + return 0; +} + +static int teranetics_read_status(struct phy_device *phydev) +{ + int reg; + + phydev->link = 1; + + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + + if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { + reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); + if (reg < 0 || + !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { + phydev->link = 0; + return 0; + } + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + } + + return 0; +} + +static int teranetics_match_phy_device(struct phy_device *phydev) +{ + return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; +} + +static struct phy_driver teranetics_driver[] = { +{ + .phy_id = PHY_ID_TN2020, + .phy_id_mask = 0xffffffff, + .name = "Teranetics TN2020", + .soft_reset = teranetics_soft_reset, + .aneg_done = teranetics_aneg_done, + .config_init = teranetics_config_init, + .config_aneg = teranetics_config_aneg, + .read_status = teranetics_read_status, + .match_phy_device = teranetics_match_phy_device, + .driver = { .owner = THIS_MODULE,}, +}, +}; + +static int __init teranetics_init(void) +{ + return phy_drivers_register(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +static void __exit teranetics_exit(void) +{ + return phy_drivers_unregister(teranetics_driver, + ARRAY_SIZE(teranetics_driver)); +} + +module_init(teranetics_init); +module_exit(teranetics_exit); + +static struct mdio_device_id __maybe_unused teranetics_tbl[] = { + { PHY_ID_TN2020, 0xffffffff }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, teranetics_tbl); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9d43460ce3c7..1f7a7cd97e50 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -785,6 +785,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7f6419ebb5e1..57b72ecb5455 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -622,6 +622,7 @@ enum rtl_version { RTL_VER_03, RTL_VER_04, RTL_VER_05, + RTL_VER_06, RTL_VER_MAX }; @@ -2560,7 +2561,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) u32 ocp_data; u16 data; - ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || + tp->version == RTL_VER_05) + ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + data = r8152_mdio_read(tp, MII_BMCR); if (data & BMCR_PDOWN) { data &= ~BMCR_PDOWN; @@ -3274,6 +3278,13 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + } else if (tp->version == RTL_VER_06) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); + if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) + ocp_data &= ~DYNAMIC_BURST; + else + ocp_data |= DYNAMIC_BURST; + ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -3908,6 +3919,10 @@ static void r8152b_get_version(struct r8152 *tp) tp->version = RTL_VER_05; tp->mii.supports_gmii = 1; break; + case 0x5c30: + tp->version = RTL_VER_06; + tp->mii.supports_gmii = 1; + break; default: netif_info(tp, probe, tp->netdev, "Unknown version 0x%04x\n", version); @@ -3953,6 +3968,7 @@ static int rtl_ops_init(struct r8152 *tp) case RTL_VER_03: case RTL_VER_04: case RTL_VER_05: + case RTL_VER_06: ops->init = r8153_init; ops->enable = rtl8153_enable; ops->disable = rtl8153_disable; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 34c519eb1db5..e9feefb41f0b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -49,15 +49,12 @@ #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif +#include <net/dst_metadata.h> #define VXLAN_VERSION "0.1" #define PORT_HASH_BITS 8 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS) -#define VNI_HASH_BITS 10 -#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) -#define FDB_HASH_BITS 8 -#define FDB_HASH_SIZE (1<<FDB_HASH_BITS) #define FDB_AGE_DEFAULT 300 /* 5 min */ #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */ @@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int vxlan_net_id; +static struct rtnl_link_ops vxlan_link_ops; static const u8 all_zeros_mac[ETH_ALEN]; +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + bool no_share, u32 flags); + /* per-network namespace private data for this module */ struct vxlan_net { struct list_head vxlan_list; @@ -84,21 +85,6 @@ struct vxlan_net { spinlock_t sock_lock; }; -union vxlan_addr { - struct sockaddr_in sin; - struct sockaddr_in6 sin6; - struct sockaddr sa; -}; - -struct vxlan_rdst { - union vxlan_addr remote_ip; - __be16 remote_port; - u32 remote_vni; - u32 remote_ifindex; - struct list_head list; - struct rcu_head rcu; -}; - /* Forwarding table entry */ struct vxlan_fdb { struct hlist_node hlist; /* linked list of entries */ @@ -106,40 +92,21 @@ struct vxlan_fdb { unsigned long updated; /* jiffies */ unsigned long used; struct list_head remotes; + u8 eth_addr[ETH_ALEN]; u16 state; /* see ndm_state */ u8 flags; /* see ndm_flags */ - u8 eth_addr[ETH_ALEN]; -}; - -/* Pseudo network device */ -struct vxlan_dev { - struct hlist_node hlist; /* vni hash table */ - struct list_head next; /* vxlan's per namespace list */ - struct vxlan_sock *vn_sock; /* listening socket */ - struct net_device *dev; - struct net *net; /* netns for packet i/o */ - struct vxlan_rdst default_dst; /* default destination */ - union vxlan_addr saddr; /* source address */ - __be16 dst_port; - __u16 port_min; /* source port range */ - __u16 port_max; - __u8 tos; /* TOS override */ - __u8 ttl; - u32 flags; /* VXLAN_F_* in vxlan.h */ - - unsigned long age_interval; - struct timer_list age_timer; - spinlock_t hash_lock; - unsigned int addrcnt; - unsigned int addrmax; - - struct hlist_head fdb_head[FDB_HASH_SIZE]; }; /* salt for hash table */ static u32 vxlan_salt __read_mostly; static struct workqueue_struct *vxlan_wq; +static inline bool vxlan_collect_metadata(struct vxlan_sock *vs) +{ + return vs->flags & VXLAN_F_COLLECT_METADATA || + ip_tunnel_collect_metadata(); +} + #if IS_ENABLED(CONFIG_IPV6) static inline bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b) @@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip)) goto nla_put_failure; - if (rdst->remote_port && rdst->remote_port != vxlan->dst_port && + if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port && nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && @@ -749,7 +716,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if (!(flags & NLM_F_CREATE)) return -ENOENT; - if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax) + if (vxlan->cfg.addrmax && + vxlan->addrcnt >= vxlan->cfg.addrmax) return -ENOSPC; /* Disallow replace to add a multicast entry */ @@ -835,7 +803,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, return -EINVAL; *port = nla_get_be16(tb[NDA_PORT]); } else { - *port = vxlan->dst_port; + *port = vxlan->cfg.dst_port; } if (tb[NDA_VNI]) { @@ -1021,7 +989,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan_fdb_create(vxlan, src_mac, src_ip, NUD_REACHABLE, NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, + vxlan->cfg.dst_port, vxlan->default_dst.remote_vni, 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); @@ -1062,7 +1030,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) return false; } -void vxlan_sock_release(struct vxlan_sock *vs) +static void vxlan_sock_release(struct vxlan_sock *vs) { struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); @@ -1078,7 +1046,6 @@ void vxlan_sock_release(struct vxlan_sock *vs) queue_work(vxlan_wq, &vs->del_work); } -EXPORT_SYMBOL_GPL(vxlan_sock_release); /* Update multicast group membership when first VNI on * multicast address is brought up @@ -1161,13 +1128,112 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, return vh; } +static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, + struct vxlan_metadata *md, u32 vni, + struct metadata_dst *tun_dst) +{ + struct iphdr *oip = NULL; + struct ipv6hdr *oip6 = NULL; + struct vxlan_dev *vxlan; + struct pcpu_sw_netstats *stats; + union vxlan_addr saddr; + int err = 0; + union vxlan_addr *remote_ip; + + /* For flow based devices, map all packets to VNI 0 */ + if (vs->flags & VXLAN_F_FLOW_BASED) + vni = 0; + + /* Is this VNI defined? */ + vxlan = vxlan_vs_find_vni(vs, vni); + if (!vxlan) + goto drop; + + remote_ip = &vxlan->default_dst.remote_ip; + skb_reset_mac_header(skb); + skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); + skb->protocol = eth_type_trans(skb, vxlan->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + + /* Ignore packet loops (and multicast echo) */ + if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) + goto drop; + + /* Re-examine inner Ethernet packet */ + if (remote_ip->sa.sa_family == AF_INET) { + oip = ip_hdr(skb); + saddr.sin.sin_addr.s_addr = oip->saddr; + saddr.sa.sa_family = AF_INET; +#if IS_ENABLED(CONFIG_IPV6) + } else { + oip6 = ipv6_hdr(skb); + saddr.sin6.sin6_addr = oip6->saddr; + saddr.sa.sa_family = AF_INET6; +#endif + } + + if (tun_dst) { + skb_dst_set(skb, (struct dst_entry *)tun_dst); + tun_dst = NULL; + } + + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + goto drop; + + skb_reset_network_header(skb); + /* In flow-based mode, GBP is carried in dst_metadata */ + if (!(vs->flags & VXLAN_F_FLOW_BASED)) + skb->mark = md->gbp; + + if (oip6) + err = IP6_ECN_decapsulate(oip6, skb); + if (oip) + err = IP_ECN_decapsulate(oip, skb); + + if (unlikely(err)) { + if (log_ecn_error) { + if (oip6) + net_info_ratelimited("non-ECT from %pI6\n", + &oip6->saddr); + if (oip) + net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", + &oip->saddr, oip->tos); + } + if (err > 1) { + ++vxlan->dev->stats.rx_frame_errors; + ++vxlan->dev->stats.rx_errors; + goto drop; + } + } + + stats = this_cpu_ptr(vxlan->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + netif_rx(skb); + + return; +drop: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + + /* Consume bad packet */ + kfree_skb(skb); +} + /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { + struct metadata_dst *tun_dst = NULL; + struct ip_tunnel_info *info; struct vxlan_sock *vs; struct vxlanhdr *vxh; u32 flags, vni; - struct vxlan_metadata md = {0}; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1202,6 +1268,32 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) vni &= VXLAN_VNI_MASK; } + if (vxlan_collect_metadata(vs)) { + const struct iphdr *iph = ip_hdr(skb); + + tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC); + if (!tun_dst) + goto drop; + + info = &tun_dst->u.tun_info; + info->key.ipv4_src = iph->saddr; + info->key.ipv4_dst = iph->daddr; + info->key.ipv4_tos = iph->tos; + info->key.ipv4_ttl = iph->ttl; + info->key.tp_src = udp_hdr(skb)->source; + info->key.tp_dst = udp_hdr(skb)->dest; + + info->mode = IP_TUNNEL_INFO_RX; + info->key.tun_flags = TUNNEL_KEY; + info->key.tun_id = cpu_to_be64(vni >> 8); + if (udp_hdr(skb)->check != 0) + info->key.tun_flags |= TUNNEL_CSUM; + + md = ip_tunnel_info_opts(info, sizeof(*md)); + } else { + memset(md, 0, sizeof(*md)); + } + /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. */ @@ -1209,13 +1301,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) struct vxlanhdr_gbp *gbp; gbp = (struct vxlanhdr_gbp *)vxh; - md.gbp = ntohs(gbp->policy_id); + md->gbp = ntohs(gbp->policy_id); + + if (tun_dst) + info->key.tun_flags |= TUNNEL_VXLAN_OPT; if (gbp->dont_learn) - md.gbp |= VXLAN_GBP_DONT_LEARN; + md->gbp |= VXLAN_GBP_DONT_LEARN; if (gbp->policy_applied) - md.gbp |= VXLAN_GBP_POLICY_APPLIED; + md->gbp |= VXLAN_GBP_POLICY_APPLIED; flags &= ~VXLAN_GBP_USED_BITS; } @@ -1233,8 +1328,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto bad_flags; } - md.vni = vxh->vx_vni; - vs->rcv(vs, skb, &md); + vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); return 0; drop: @@ -1247,93 +1341,13 @@ bad_flags: ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); error: + if (tun_dst) + dst_release((struct dst_entry *)tun_dst); + /* Return non vxlan pkt */ return 1; } -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md) -{ - struct iphdr *oip = NULL; - struct ipv6hdr *oip6 = NULL; - struct vxlan_dev *vxlan; - struct pcpu_sw_netstats *stats; - union vxlan_addr saddr; - __u32 vni; - int err = 0; - union vxlan_addr *remote_ip; - - vni = ntohl(md->vni) >> 8; - /* Is this VNI defined? */ - vxlan = vxlan_vs_find_vni(vs, vni); - if (!vxlan) - goto drop; - - remote_ip = &vxlan->default_dst.remote_ip; - skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); - skb->protocol = eth_type_trans(skb, vxlan->dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - - /* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) - goto drop; - - /* Re-examine inner Ethernet packet */ - if (remote_ip->sa.sa_family == AF_INET) { - oip = ip_hdr(skb); - saddr.sin.sin_addr.s_addr = oip->saddr; - saddr.sa.sa_family = AF_INET; -#if IS_ENABLED(CONFIG_IPV6) - } else { - oip6 = ipv6_hdr(skb); - saddr.sin6.sin6_addr = oip6->saddr; - saddr.sa.sa_family = AF_INET6; -#endif - } - - if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) - goto drop; - - skb_reset_network_header(skb); - skb->mark = md->gbp; - - if (oip6) - err = IP6_ECN_decapsulate(oip6, skb); - if (oip) - err = IP_ECN_decapsulate(oip, skb); - - if (unlikely(err)) { - if (log_ecn_error) { - if (oip6) - net_info_ratelimited("non-ECT from %pI6\n", - &oip6->saddr); - if (oip) - net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", - &oip->saddr, oip->tos); - } - if (err > 1) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; - goto drop; - } - } - - stats = this_cpu_ptr(vxlan->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - netif_rx(skb); - - return; -drop: - /* Consume bad packet */ - kfree_skb(skb); -} - static int arp_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -1672,7 +1686,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, - __be16 src_port, __be16 dst_port, + __be16 src_port, __be16 dst_port, __u32 vni, struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; @@ -1722,7 +1736,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1755,10 +1769,10 @@ err: } #endif -int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, - __be16 src_port, __be16 dst_port, - struct vxlan_metadata *md, bool xnet, u32 vxflags) +static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, + __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, + __be16 src_port, __be16 dst_port, __u32 vni, + struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; int min_headroom; @@ -1801,7 +1815,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = md->vni; + vxh->vx_vni = vni; if (type & SKB_GSO_TUNNEL_REMCSUM) { u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> @@ -1828,7 +1842,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, ttl, df, src_port, dst_port, xnet, !(vxflags & VXLAN_F_UDP_CSUM)); } -EXPORT_SYMBOL_GPL(vxlan_xmit_skb); /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, @@ -1878,22 +1891,43 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { + struct ip_tunnel_info *info; struct vxlan_dev *vxlan = netdev_priv(dev); struct sock *sk = vxlan->vn_sock->sock->sk; struct rtable *rt = NULL; const struct iphdr *old_iph; struct flowi4 fl4; union vxlan_addr *dst; - struct vxlan_metadata md; + union vxlan_addr remote_ip; + struct vxlan_metadata _md; + struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; u32 vni; __be16 df = 0; __u8 tos, ttl; int err; + u32 flags = vxlan->flags; + + /* FIXME: Support IPv6 */ + info = skb_tunnel_info(skb, AF_INET); + + if (rdst) { + dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; + vni = rdst->remote_vni; + dst = &rdst->remote_ip; + } else { + if (!info) { + WARN_ONCE(1, "%s: Missing encapsulation instructions\n", + dev->name); + goto drop; + } - dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port; - vni = rdst->remote_vni; - dst = &rdst->remote_ip; + dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; + vni = be64_to_cpu(info->key.tun_id); + remote_ip.sin.sin_family = AF_INET; + remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst; + dst = &remote_ip; + } if (vxlan_addr_any(dst)) { if (did_rsc) { @@ -1906,25 +1940,42 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, old_iph = ip_hdr(skb); - ttl = vxlan->ttl; + ttl = vxlan->cfg.ttl; if (!ttl && vxlan_addr_multicast(dst)) ttl = 1; - tos = vxlan->tos; + tos = vxlan->cfg.tos; if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); - src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min, - vxlan->port_max, true); + src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, + vxlan->cfg.port_max, true); if (dst->sa.sa_family == AF_INET) { + if (info) { + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + if (info->key.tun_flags & TUNNEL_CSUM) + flags |= VXLAN_F_UDP_CSUM; + else + flags &= ~VXLAN_F_UDP_CSUM; + + ttl = info->key.ipv4_ttl; + tos = info->key.ipv4_tos; + + if (info->options_len) + md = ip_tunnel_info_opts(info, sizeof(*md)); + } else { + md->gbp = skb->mark; + } + memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst->remote_ifindex; + fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; fl4.flowi4_tos = RT_TOS(tos); fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = dst->sin.sin_addr.s_addr; - fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr; + fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; rt = ip_route_output_key(vxlan->net, &fl4); if (IS_ERR(rt)) { @@ -1958,14 +2009,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; - err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, - src_port, dst_port, &md, + src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - vxlan->flags); + flags); if (err < 0) { /* skb is already freed. */ skb = NULL; @@ -1980,9 +2028,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, u32 flags; memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = rdst->remote_ifindex; + fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; fl6.daddr = dst->sin6.sin6_addr; - fl6.saddr = vxlan->saddr.sin6.sin6_addr; + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; @@ -2018,11 +2066,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = ttl ? : ip6_dst_hoplimit(ndst); - md.vni = htonl(vni << 8); - md.gbp = skb->mark; + md->gbp = skb->mark; err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, - 0, ttl, src_port, dst_port, &md, + 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), vxlan->flags); #endif @@ -2051,11 +2098,15 @@ tx_free: static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + const struct ip_tunnel_info *info; struct ethhdr *eth; bool did_rsc = false; struct vxlan_rdst *rdst, *fdst = NULL; struct vxlan_fdb *f; + /* FIXME: Support IPv6 */ + info = skb_tunnel_info(skb, AF_INET); + skb_reset_mac_header(skb); eth = eth_hdr(skb); @@ -2078,6 +2129,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) #endif } + if (vxlan->flags & VXLAN_F_FLOW_BASED && + info && info->mode == IP_TUNNEL_INFO_TX) { + vxlan_xmit_one(skb, dev, NULL, false); + return NETDEV_TX_OK; + } + f = vxlan_find_mac(vxlan, eth->h_dest); did_rsc = false; @@ -2143,7 +2200,7 @@ static void vxlan_cleanup(unsigned long arg) if (f->state & NUD_PERMANENT) continue; - timeout = f->used + vxlan->age_interval * HZ; + timeout = f->used + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -2207,8 +2264,8 @@ static int vxlan_open(struct net_device *dev) struct vxlan_sock *vs; int ret = 0; - vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL, - false, vxlan->flags); + vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port, + vxlan->cfg.no_share, vxlan->flags); if (IS_ERR(vs)) return PTR_ERR(vs); @@ -2222,7 +2279,7 @@ static int vxlan_open(struct net_device *dev) } } - if (vxlan->age_interval) + if (vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL); return ret; @@ -2380,7 +2437,7 @@ static void vxlan_setup(struct net_device *dev) vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.data = (unsigned long) vxlan; - vxlan->dst_port = htons(vxlan_port); + vxlan->cfg.dst_port = htons(vxlan_port); vxlan->dev = dev; @@ -2405,6 +2462,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, + [IFLA_VXLAN_FLOWBASED] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, @@ -2500,7 +2558,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, /* Create new listen socket if needed */ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); @@ -2529,8 +2586,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, vs->sock = sock; atomic_set(&vs->refcnt, 1); - vs->rcv = rcv; - vs->data = data; vs->flags = (flags & VXLAN_F_RCV_FLAGS); /* Initialize the vxlan udp offloads structure */ @@ -2554,9 +2609,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, return vs; } -struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, - vxlan_rcv_t *rcv, void *data, - bool no_share, u32 flags) +static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + bool no_share, u32 flags) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -2566,7 +2620,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_lock(&vn->sock_lock); vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port, flags); - if (vs && vs->rcv == rcv) { + if (vs) { if (!atomic_add_unless(&vs->refcnt, 1, 0)) vs = ERR_PTR(-EBUSY); spin_unlock(&vn->sock_lock); @@ -2575,58 +2629,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, spin_unlock(&vn->sock_lock); } - return vxlan_socket_create(net, port, rcv, data, flags); + return vxlan_socket_create(net, port, flags); } -EXPORT_SYMBOL_GPL(vxlan_sock_add); -static int vxlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) +static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, + struct vxlan_config *conf) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; - __u32 vni; int err; bool use_ipv6 = false; - - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; + __be16 default_port = vxlan->cfg.dst_port; vxlan->net = src_net; - vni = nla_get_u32(data[IFLA_VXLAN_ID]); - dst->remote_vni = vni; - - /* Unless IPv6 is explicitly requested, assume IPv4 */ - dst->remote_ip.sa.sa_family = AF_INET; - if (data[IFLA_VXLAN_GROUP]) { - dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); - } else if (data[IFLA_VXLAN_GROUP6]) { - if (!IS_ENABLED(CONFIG_IPV6)) - return -EPFNOSUPPORT; + dst->remote_vni = conf->vni; - dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); - dst->remote_ip.sa.sa_family = AF_INET6; - use_ipv6 = true; - } + memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip)); - if (data[IFLA_VXLAN_LOCAL]) { - vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); - vxlan->saddr.sa.sa_family = AF_INET; - } else if (data[IFLA_VXLAN_LOCAL6]) { - if (!IS_ENABLED(CONFIG_IPV6)) - return -EPFNOSUPPORT; + /* Unless IPv6 is explicitly requested, assume IPv4 */ + if (!dst->remote_ip.sa.sa_family) + dst->remote_ip.sa.sa_family = AF_INET; - /* TODO: respect scope id */ - vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); - vxlan->saddr.sa.sa_family = AF_INET6; + if (dst->remote_ip.sa.sa_family == AF_INET6 || + vxlan->cfg.saddr.sa.sa_family == AF_INET6) use_ipv6 = true; - } - if (data[IFLA_VXLAN_LINK] && - (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) { + if (conf->remote_ifindex) { struct net_device *lowerdev - = __dev_get_by_index(src_net, dst->remote_ifindex); + = __dev_get_by_index(src_net, conf->remote_ifindex); + + dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { pr_info("ifindex %d does not exist\n", dst->remote_ifindex); @@ -2644,7 +2678,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, } #endif - if (!tb[IFLA_MTU]) + if (!conf->mtu) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); dev->needed_headroom = lowerdev->hard_header_len + @@ -2652,101 +2686,188 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, } else if (use_ipv6) vxlan->flags |= VXLAN_F_IPV6; + memcpy(&vxlan->cfg, conf, sizeof(*conf)); + if (!vxlan->cfg.dst_port) + vxlan->cfg.dst_port = default_port; + vxlan->flags |= conf->flags; + + if (!vxlan->cfg.age_interval) + vxlan->cfg.age_interval = FDB_AGE_DEFAULT; + + if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->cfg.dst_port, vxlan->flags)) + return -EEXIST; + + dev->ethtool_ops = &vxlan_ethtool_ops; + + /* create an fdb entry for a valid default destination */ + if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { + err = vxlan_fdb_create(vxlan, all_zeros_mac, + &vxlan->default_dst.remote_ip, + NUD_REACHABLE|NUD_PERMANENT, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->cfg.dst_port, + vxlan->default_dst.remote_vni, + vxlan->default_dst.remote_ifindex, + NTF_SELF); + if (err) + return err; + } + + err = register_netdevice(dev); + if (err) { + vxlan_fdb_delete_default(vxlan); + return err; + } + + list_add(&vxlan->next, &vn->vxlan_list); + + return 0; +} + +struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, struct vxlan_config *conf) +{ + struct nlattr *tb[IFLA_MAX+1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + return dev; +} +EXPORT_SYMBOL_GPL(vxlan_dev_create); + +static int vxlan_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct vxlan_config conf; + int err; + + if (!data[IFLA_VXLAN_ID]) + return -EINVAL; + + memset(&conf, 0, sizeof(conf)); + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_GROUP]) { + conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); + } else if (data[IFLA_VXLAN_GROUP6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); + conf.remote_ip.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LOCAL]) { + conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); + conf.saddr.sa.sa_family = AF_INET; + } else if (data[IFLA_VXLAN_LOCAL6]) { + if (!IS_ENABLED(CONFIG_IPV6)) + return -EPFNOSUPPORT; + + /* TODO: respect scope id */ + conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); + conf.saddr.sa.sa_family = AF_INET6; + } + + if (data[IFLA_VXLAN_LINK]) + conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]); + if (data[IFLA_VXLAN_TOS]) - vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); + conf.tos = nla_get_u8(data[IFLA_VXLAN_TOS]); if (data[IFLA_VXLAN_TTL]) - vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) - vxlan->flags |= VXLAN_F_LEARN; + conf.flags |= VXLAN_F_LEARN; if (data[IFLA_VXLAN_AGEING]) - vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); - else - vxlan->age_interval = FDB_AGE_DEFAULT; + conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY])) - vxlan->flags |= VXLAN_F_PROXY; + conf.flags |= VXLAN_F_PROXY; if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC])) - vxlan->flags |= VXLAN_F_RSC; + conf.flags |= VXLAN_F_RSC; if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS])) - vxlan->flags |= VXLAN_F_L2MISS; + conf.flags |= VXLAN_F_L2MISS; if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS])) - vxlan->flags |= VXLAN_F_L3MISS; + conf.flags |= VXLAN_F_L3MISS; if (data[IFLA_VXLAN_LIMIT]) - vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); + + if (data[IFLA_VXLAN_FLOWBASED] && + nla_get_u8(data[IFLA_VXLAN_FLOWBASED])) + conf.flags |= VXLAN_F_FLOW_BASED; if (data[IFLA_VXLAN_PORT_RANGE]) { const struct ifla_vxlan_port_range *p = nla_data(data[IFLA_VXLAN_PORT_RANGE]); - vxlan->port_min = ntohs(p->low); - vxlan->port_max = ntohs(p->high); + conf.port_min = ntohs(p->low); + conf.port_max = ntohs(p->high); } if (data[IFLA_VXLAN_PORT]) - vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) - vxlan->flags |= VXLAN_F_UDP_CSUM; + conf.flags |= VXLAN_F_UDP_CSUM; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) - vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; if (data[IFLA_VXLAN_REMCSUM_TX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) - vxlan->flags |= VXLAN_F_REMCSUM_TX; + conf.flags |= VXLAN_F_REMCSUM_TX; if (data[IFLA_VXLAN_REMCSUM_RX] && nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) - vxlan->flags |= VXLAN_F_REMCSUM_RX; + conf.flags |= VXLAN_F_REMCSUM_RX; if (data[IFLA_VXLAN_GBP]) - vxlan->flags |= VXLAN_F_GBP; + conf.flags |= VXLAN_F_GBP; if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) - vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL; - - if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, - vxlan->dst_port, vxlan->flags)) { - pr_info("duplicate VNI %u\n", vni); - return -EEXIST; - } + conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; - dev->ethtool_ops = &vxlan_ethtool_ops; + err = vxlan_dev_configure(src_net, dev, &conf); + switch (err) { + case -ENODEV: + pr_info("ifindex %d does not exist\n", conf.remote_ifindex); + break; - /* create an fdb entry for a valid default destination */ - if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) { - err = vxlan_fdb_create(vxlan, all_zeros_mac, - &vxlan->default_dst.remote_ip, - NUD_REACHABLE|NUD_PERMANENT, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - vxlan->default_dst.remote_ifindex, - NTF_SELF); - if (err) - return err; - } + case -EPERM: + pr_info("IPv6 is disabled via sysctl\n"); + break; - err = register_netdevice(dev); - if (err) { - vxlan_fdb_delete_default(vxlan); - return err; + case -EEXIST: + pr_info("duplicate VNI %u\n", conf.vni); + break; } - list_add(&vxlan->next, &vn->vxlan_list); - - return 0; + return err; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -2777,6 +2898,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_FLOWBASED */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ nla_total_size(sizeof(struct ifla_vxlan_port_range)) + @@ -2794,8 +2916,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) const struct vxlan_dev *vxlan = netdev_priv(dev); const struct vxlan_rdst *dst = &vxlan->default_dst; struct ifla_vxlan_port_range ports = { - .low = htons(vxlan->port_min), - .high = htons(vxlan->port_max), + .low = htons(vxlan->cfg.port_min), + .high = htons(vxlan->cfg.port_max), }; if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) @@ -2818,22 +2940,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex)) goto nla_put_failure; - if (!vxlan_addr_any(&vxlan->saddr)) { - if (vxlan->saddr.sa.sa_family == AF_INET) { + if (!vxlan_addr_any(&vxlan->cfg.saddr)) { + if (vxlan->cfg.saddr.sa.sa_family == AF_INET) { if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL, - vxlan->saddr.sin.sin_addr.s_addr)) + vxlan->cfg.saddr.sin.sin_addr.s_addr)) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) } else { if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6, - &vxlan->saddr.sin6.sin6_addr)) + &vxlan->cfg.saddr.sin6.sin6_addr)) goto nla_put_failure; #endif } } - if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || - nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || + if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || + nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->flags & VXLAN_F_LEARN)) || nla_put_u8(skb, IFLA_VXLAN_PROXY, @@ -2843,9 +2965,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(vxlan->flags & VXLAN_F_L2MISS)) || nla_put_u8(skb, IFLA_VXLAN_L3MISS, !!(vxlan->flags & VXLAN_F_L3MISS)) || - nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || - nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) || - nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) || + nla_put_u8(skb, IFLA_VXLAN_FLOWBASED, + !!(vxlan->flags & VXLAN_F_FLOW_BASED)) || + nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || + nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || + nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, !!(vxlan->flags & VXLAN_F_UDP_CSUM)) || nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 8a495b318b6f..c6cb85a85c89 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -325,9 +325,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) queue->pending_prod + queue->pending_cons; } -/* Callback from stack when TX packet can be released */ -void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); - irqreturn_t xenvif_interrupt(int irq, void *dev_id); extern bool separate_tx_rx_irq; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index fdc60db60829..7c8c23cc6896 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach); bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; - int len; + int len, err; + const char *managed; /* New binding */ dn = of_get_child_by_name(np, "fixed-link"); @@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np) return true; } + err = of_property_read_string(np, "managed", &managed); + if (err == 0 && strcmp(managed, "auto") != 0) + return true; + /* Old binding */ if (of_get_property(np, "fixed-link", &len) && len == (5 * sizeof(__be32))) @@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np) struct fixed_phy_status status = {}; struct device_node *fixed_link_node; const __be32 *fixed_link_prop; - int len; + int len, err; struct phy_device *phy; + const char *managed; + + err = of_property_read_string(np, "managed", &managed); + if (err == 0) { + if (strcmp(managed, "in-band-status") == 0) { + /* status is zeroed, namely its .link member */ + phy = fixed_phy_register(PHY_POLL, &status, np); + return IS_ERR(phy) ? PTR_ERR(phy) : 0; + } + } /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); |