diff options
63 files changed, 10700 insertions, 436 deletions
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index c388f7d9feb1..27e1b4cebfbd 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -10,8 +10,10 @@ Required properties: - #size-cells: 0 - spi-max-frequency: Maximum frequency of the SPI bus the chip can operate at should be less than or equal to 18 MHz. - - data-ready-gpios: Interrupt GPIO for data and error reporting. - device-wake-gpios: Wake up GPIO to wake up the TCAN device. + - interrupt-parent: the phandle to the interrupt controller which provides + the interrupt. + - interrupts: interrupt specification for data-ready. See Documentation/devicetree/bindings/net/can/m_can.txt for additional required property details. @@ -30,7 +32,8 @@ tcan4x5x: tcan4x5x@0 { #size-cells = <1>; spi-max-frequency = <10000000>; bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; - data-ready-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio1>; + interrupts = <14 GPIO_ACTIVE_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt index 47aa205ee0bd..c5ed5d25f642 100644 --- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt +++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt @@ -35,6 +35,42 @@ Required properties for the child nodes within ports container: - phy-mode: String, must be either "trgmii" or "rgmii" for port labeled "cpu". +Port 5 of the switch is muxed between: +1. GMAC5: GMAC5 can interface with another external MAC or PHY. +2. PHY of port 0 or port 4: PHY interfaces with an external MAC like 2nd GMAC + of the SOC. Used in many setups where port 0/4 becomes the WAN port. + Note: On a MT7621 SOC with integrated switch: 2nd GMAC can only connected to + GMAC5 when the gpios for RGMII2 (GPIO 22-33) are not used and not + connected to external component! + +Port 5 modes/configurations: +1. Port 5 is disabled and isolated: An external phy can interface to the 2nd + GMAC of the SOC. + In the case of a build-in MT7530 switch, port 5 shares the RGMII bus with 2nd + GMAC and an optional external phy. Mind the GPIO/pinctl settings of the SOC! +2. Port 5 is muxed to PHY of port 0/4: Port 0/4 interfaces with 2nd GMAC. + It is a simple MAC to PHY interface, port 5 needs to be setup for xMII mode + and RGMII delay. +3. Port 5 is muxed to GMAC5 and can interface to an external phy. + Port 5 becomes an extra switch port. + Only works on platform where external phy TX<->RX lines are swapped. + Like in the Ubiquiti ER-X-SFP. +4. Port 5 is muxed to GMAC5 and interfaces with the 2nd GAMC as 2nd CPU port. + Currently a 2nd CPU port is not supported by DSA code. + +Depending on how the external PHY is wired: +1. normal: The PHY can only connect to 2nd GMAC but not to the switch +2. swapped: RGMII TX, RX are swapped; external phy interface with the switch as + a ethernet port. But can't interface to the 2nd GMAC. + +Based on the DT the port 5 mode is configured. + +Driver tries to lookup the phy-handle of the 2nd GMAC of the master device. +When phy-handle matches PHY of port 0 or 4 then port 5 set-up as mode 2. +phy-mode must be set, see also example 2 below! + * mt7621: phy-mode = "rgmii-txid"; + * mt7623: phy-mode = "rgmii"; + See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional required, optional properties and how the integrated switch subnodes must be specified. @@ -94,3 +130,181 @@ Example: }; }; }; + +Example 2: MT7621: Port 4 is WAN port: 2nd GMAC -> Port 5 -> PHY port 4. + +ð { + gmac0: mac@0 { + compatible = "mediatek,eth-mac"; + reg = <0>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + + gmac1: mac@1 { + compatible = "mediatek,eth-mac"; + reg = <1>; + phy-mode = "rgmii-txid"; + phy-handle = <&phy4>; + }; + + mdio: mdio-bus { + #address-cells = <1>; + #size-cells = <0>; + + /* Internal phy */ + phy4: ethernet-phy@4 { + reg = <4>; + }; + + mt7530: switch@1f { + compatible = "mediatek,mt7621"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1f>; + pinctrl-names = "default"; + mediatek,mcm; + + resets = <&rstctrl 2>; + reset-names = "mcm"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + }; + + port@1 { + reg = <1>; + label = "lan1"; + }; + + port@2 { + reg = <2>; + label = "lan2"; + }; + + port@3 { + reg = <3>; + label = "lan3"; + }; + +/* Commented out. Port 4 is handled by 2nd GMAC. + port@4 { + reg = <4>; + label = "lan4"; + }; +*/ + + cpu_port0: port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + }; + }; +}; + +Example 3: MT7621: Port 5 is connected to external PHY: Port 5 -> external PHY. + +ð { + gmac0: mac@0 { + compatible = "mediatek,eth-mac"; + reg = <0>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + + mdio: mdio-bus { + #address-cells = <1>; + #size-cells = <0>; + + /* External phy */ + ephy5: ethernet-phy@7 { + reg = <7>; + }; + + mt7530: switch@1f { + compatible = "mediatek,mt7621"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1f>; + pinctrl-names = "default"; + mediatek,mcm; + + resets = <&rstctrl 2>; + reset-names = "mcm"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + }; + + port@1 { + reg = <1>; + label = "lan1"; + }; + + port@2 { + reg = <2>; + label = "lan2"; + }; + + port@3 { + reg = <3>; + label = "lan3"; + }; + + port@4 { + reg = <4>; + label = "lan4"; + }; + + port@5 { + reg = <5>; + label = "lan5"; + phy-mode = "rgmii"; + phy-handle = <&ephy5>; + }; + + cpu_port0: port@6 { + reg = <6>; + label = "cpu"; + ethernet = <&gmac0>; + phy-mode = "rgmii"; + + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + }; + }; +}; diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 2b7fefe72351..57fec66c5419 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -23,6 +23,7 @@ Contents: intel/ice google/gve mellanox/mlx5 + pensando/ionic .. only:: subproject diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/pensando/ionic.rst new file mode 100644 index 000000000000..67b6839d516b --- /dev/null +++ b/Documentation/networking/device_drivers/pensando/ionic.rst @@ -0,0 +1,43 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +========================================================== +Linux* Driver for the Pensando(R) Ethernet adapter family +========================================================== + +Pensando Linux Ethernet driver. +Copyright(c) 2019 Pensando Systems, Inc + +Contents +======== + +- Identifying the Adapter +- Support + +Identifying the Adapter +======================= + +To find if one or more Pensando PCI Ethernet devices are installed on the +host, check for the PCI devices:: + + $ lspci -d 1dd8: + b5:00.0 Ethernet controller: Device 1dd8:1002 + b6:00.0 Ethernet controller: Device 1dd8:1002 + +If such devices are listed as above, then the ionic.ko driver should find +and configure them for use. There should be log entries in the kernel +messages such as these:: + + $ dmesg | grep ionic + ionic Pensando Ethernet NIC Driver, ver 0.15.0-k + ionic 0000:b5:00.0 enp181s0: renamed from eth0 + ionic 0000:b6:00.0 enp182s0: renamed from eth0 + +Support +======= +For general Linux networking support, please use the netdev mailing +list, which is monitored by Pensando personnel:: + +For more specific support needs, please use the Pensando driver support +email:: diff --git a/Documentation/networking/devlink-info-versions.rst b/Documentation/networking/devlink-info-versions.rst index 4316342b7746..4914f581b1fd 100644 --- a/Documentation/networking/devlink-info-versions.rst +++ b/Documentation/networking/devlink-info-versions.rst @@ -14,11 +14,27 @@ board.rev Board design revision. +asic.id +======= + +ASIC design identifier. + +asic.rev +======== + +ASIC design revision. + board.manufacture ================= An identifier of the company or the facility which produced the part. +fw +== + +Overall firmware version, often representing the collection of +fw.mgmt, fw.app, etc. + fw.mgmt ======= diff --git a/MAINTAINERS b/MAINTAINERS index 844f416437c4..84bb34727f81 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12618,6 +12618,14 @@ L: [email protected] S: Maintained F: drivers/platform/x86/peaq-wmi.c +PENSANDO ETHERNET DRIVERS +M: Shannon Nelson <[email protected]> +M: Pensando Drivers <[email protected]> +S: Supported +F: Documentation/networking/device_drivers/pensando/ionic.rst +F: drivers/net/ethernet/pensando/ + PER-CPU MEMORY ALLOCATOR M: Dennis Zhou <[email protected]> M: Tejun Heo <[email protected]> diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 635bb4b447fb..e6df5b95ed47 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -474,7 +474,8 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->getsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->getsockopt(sk, level, + optname, optval, optlen); return do_chtls_getsockopt(sk, optval, optlen); } @@ -541,7 +542,8 @@ static int chtls_setsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->setsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->setsockopt(sk, level, + optname, optval, optlen); return do_chtls_setsockopt(sk, optname, optval, optlen); } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index b429550c4cc2..ac86be52b461 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix * Copyright (C) 2006 Andrey Volkov, Varma Electronics * Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]> */ @@ -63,8 +62,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc); #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ #define CAN_CALC_SYNC_SEG 1 -/* - * Bit-timing calculation derived from: +/* Bit-timing calculation derived from: * * Code based on LinCAN sources and H8S2638 project * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz @@ -76,10 +74,11 @@ EXPORT_SYMBOL_GPL(can_len2dlc); * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ -static int can_update_sample_point(const struct can_bittiming_const *btc, - unsigned int sample_point_nominal, unsigned int tseg, - unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, - unsigned int *sample_point_error_ptr) +static int +can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) { unsigned int sample_point_error, best_sample_point_error = UINT_MAX; unsigned int sample_point, best_sample_point = 0; @@ -87,7 +86,9 @@ static int can_update_sample_point(const struct can_bittiming_const *btc, int i; for (i = 0; i <= 1; i++) { - tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = tseg + CAN_CALC_SYNC_SEG - + (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / + 1000 - i; tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); tseg1 = tseg - tseg2; if (tseg1 > btc->tseg1_max) { @@ -95,10 +96,12 @@ static int can_update_sample_point(const struct can_bittiming_const *btc, tseg2 = tseg - tseg1; } - sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / + (tseg + CAN_CALC_SYNC_SEG); sample_point_error = abs(sample_point_nominal - sample_point); - if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + if (sample_point <= sample_point_nominal && + sample_point_error < best_sample_point_error) { best_sample_point = sample_point; best_sample_point_error = sample_point_error; *tseg1_ptr = tseg1; @@ -149,7 +152,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, /* choose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; - if ((brp < btc->brp_min) || (brp > btc->brp_max)) + if (brp < btc->brp_min || brp > btc->brp_max) continue; bitrate = priv->clock.freq / (brp * tsegall); @@ -163,7 +166,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, if (bitrate_error < best_bitrate_error) best_sample_point_error = UINT_MAX; - can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + can_update_sample_point(btc, sample_point_nominal, tseg / 2, + &tseg1, &tseg2, &sample_point_error); if (sample_point_error > best_sample_point_error) continue; @@ -192,8 +196,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } /* real sample point */ - bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, - &tseg1, &tseg2, NULL); + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, + best_tseg, &tseg1, &tseg2, + NULL); v64 = (u64)best_brp * 1000 * 1000 * 1000; do_div(v64, priv->clock.freq); @@ -217,7 +222,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, bt->brp = best_brp; /* real bitrate */ - bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); + bt->bitrate = priv->clock.freq / + (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); return 0; } @@ -230,8 +236,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } #endif /* CONFIG_CAN_CALC_BITTIMING */ -/* - * Checks the validity of the specified bit-timing parameters prop_seg, +/* Checks the validity of the specified bit-timing parameters prop_seg, * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate * prescaler value brp. You can find more information in the header * file linux/can/netlink.h. @@ -271,9 +276,10 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt, } /* Checks the validity of predefined bitrate settings */ -static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, - const u32 *bitrate_const, - const unsigned int bitrate_const_cnt) +static int +can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt, + const u32 *bitrate_const, + const unsigned int bitrate_const_cnt) { struct can_priv *priv = netdev_priv(dev); unsigned int i; @@ -296,8 +302,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, { int err; - /* - * Depending on the given can_bittiming parameter structure the CAN + /* Depending on the given can_bittiming parameter structure the CAN * timing parameters are calculated based on the provided bitrate OR * alternatively the CAN timing parameters (tq, prop_seg, etc.) are * provided directly which are then checked and fixed up. @@ -398,8 +403,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, } EXPORT_SYMBOL_GPL(can_change_state); -/* - * Local echo of CAN messages +/* Local echo of CAN messages * * CAN network devices *should* support a local echo functionality * (see Documentation/networking/can.rst). To test the handling of CAN @@ -424,8 +428,7 @@ static void can_flush_echo_skb(struct net_device *dev) } } -/* - * Put the skb on the stack to be looped backed locally lateron +/* Put the skb on the stack to be looped backed locally lateron * * The function is typically called in the start_xmit function * of the device driver. The driver must protect access to @@ -447,7 +450,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } if (!priv->echo_skb[idx]) { - skb = can_create_echo_skb(skb); if (!skb) return; @@ -467,7 +469,8 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, } EXPORT_SYMBOL_GPL(can_put_echo_skb); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) +struct sk_buff * +__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); @@ -494,8 +497,7 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } -/* - * Get the skb from the stack and loop it back locally +/* Get the skb from the stack and loop it back locally * * The function is typically called when the TX done interrupt * is handled in the device driver. The driver must protect @@ -516,11 +518,10 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) } EXPORT_SYMBOL_GPL(can_get_echo_skb); -/* - * Remove the skb from the stack and free it. - * - * The function is typically called when TX failed. - */ +/* Remove the skb from the stack and free it. + * + * The function is typically called when TX failed. + */ void can_free_echo_skb(struct net_device *dev, unsigned int idx) { struct can_priv *priv = netdev_priv(dev); @@ -534,9 +535,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) } EXPORT_SYMBOL_GPL(can_free_echo_skb); -/* - * CAN device restart for bus-off recovery - */ +/* CAN device restart for bus-off recovery */ static void can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -547,15 +546,14 @@ static void can_restart(struct net_device *dev) BUG_ON(netif_carrier_ok(dev)); - /* - * No synchronization needed because the device is bus-off and + /* No synchronization needed because the device is bus-off and * no messages can come in or go out. */ can_flush_echo_skb(dev); /* send restart message upstream */ skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) { + if (!skb) { err = -ENOMEM; goto restart; } @@ -581,7 +579,8 @@ restart: static void can_restart_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + struct can_priv *priv = container_of(dwork, struct can_priv, + restart_work); can_restart(priv->dev); } @@ -590,8 +589,7 @@ int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - /* - * A manual restart is only permitted if automatic restart is + /* A manual restart is only permitted if automatic restart is * disabled and the device is in the bus-off state */ if (priv->restart_ms) @@ -605,8 +603,7 @@ int can_restart_now(struct net_device *dev) return 0; } -/* - * CAN bus-off +/* CAN bus-off * * This functions should be called when the device goes bus-off to * tell the netif layer that no more packets can be sent or received. @@ -709,9 +706,7 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) } EXPORT_SYMBOL_GPL(alloc_can_err_skb); -/* - * Allocate and setup space for the CAN network device - */ +/* Allocate and setup space for the CAN network device */ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, unsigned int txqs, unsigned int rxqs) { @@ -762,18 +757,14 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max, } EXPORT_SYMBOL_GPL(alloc_candev_mqs); -/* - * Free space of the CAN network device - */ +/* Free space of the CAN network device */ void free_candev(struct net_device *dev) { free_netdev(dev); } EXPORT_SYMBOL_GPL(free_candev); -/* - * changing MTU and control mode for CAN/CANFD devices - */ +/* changing MTU and control mode for CAN/CANFD devices */ int can_change_mtu(struct net_device *dev, int new_mtu) { struct can_priv *priv = netdev_priv(dev); @@ -810,8 +801,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu) } EXPORT_SYMBOL_GPL(can_change_mtu); -/* - * Common open function when the device gets opened. +/* Common open function when the device gets opened. * * This function should be called in the open function of the device * driver. @@ -828,7 +818,7 @@ int open_candev(struct net_device *dev) /* For CAN FD the data bitrate has to be >= the arbitration bitrate */ if ((priv->ctrlmode & CAN_CTRLMODE_FD) && (!priv->data_bittiming.bitrate || - (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) { + priv->data_bittiming.bitrate < priv->bittiming.bitrate)) { netdev_err(dev, "incorrect/missing data bit-timing\n"); return -EINVAL; } @@ -864,8 +854,7 @@ void of_can_transceiver(struct net_device *dev) EXPORT_SYMBOL_GPL(of_can_transceiver); #endif -/* - * Common close function for cleanup before the device gets closed. +/* Common close function for cleanup before the device gets closed. * * This function should be called in the close function of the device * driver. @@ -879,9 +868,7 @@ void close_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(close_candev); -/* - * CAN netlink interface - */ +/* CAN netlink interface */ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = { [IFLA_CAN_STATE] = { .type = NLA_U32 }, [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) }, @@ -1225,7 +1212,6 @@ static int can_newlink(struct net *src_net, struct net_device *dev, static void can_dellink(struct net_device *dev, struct list_head *head) { - return; } static struct rtnl_link_ops can_link_ops __read_mostly = { @@ -1243,9 +1229,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = { .fill_xstats = can_fill_xstats, }; -/* - * Register the CAN network device - */ +/* Register the CAN network device */ int register_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -1271,22 +1255,19 @@ int register_candev(struct net_device *dev) } EXPORT_SYMBOL_GPL(register_candev); -/* - * Unregister the CAN network device - */ +/* Unregister the CAN network device */ void unregister_candev(struct net_device *dev) { unregister_netdev(dev); } EXPORT_SYMBOL_GPL(unregister_candev); -/* - * Test if a network device is a candev based device +/* Test if a network device is a candev based device * and return the can_priv* if so. */ struct can_priv *safe_candev_priv(struct net_device *dev) { - if ((dev->type != ARPHRD_CAN) || (dev->rtnl_link_ops != &can_link_ops)) + if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops) return NULL; return netdev_priv(dev); @@ -1301,7 +1282,7 @@ static __init int can_dev_init(void) err = rtnl_link_register(&can_link_ops); if (!err) - printk(KERN_INFO MOD_DESC "\n"); + pr_info(MOD_DESC "\n"); return err; } diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index f9815fda8840..6f766918211a 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -65,6 +65,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_SYSID_BASE 0x1f020 #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8) #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc) +#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10) #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14) /* Shared receive buffer registers */ #define KVASER_PCIEFD_SRB_BASE 0x1f200 @@ -268,6 +269,7 @@ struct kvaser_pciefd { struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; void *dma_data[KVASER_PCIEFD_DMA_COUNT]; u8 nr_channels; + u32 bus_freq; u32 freq; u32 freq_to_ticks_div; }; @@ -666,7 +668,7 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) spin_lock_irqsave(&can->lock, irq); /* Set frequency to 500 KHz*/ - top = can->can.clock.freq / (2 * 500000) - 1; + top = can->kv_pcie->bus_freq / (2 * 500000) - 1; pwm_ctrl = top & 0xff; pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT; @@ -1119,6 +1121,8 @@ static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) return -ENODEV; } + pcie->bus_freq = ioread32(pcie->reg_base + + KVASER_PCIEFD_SYSID_BUSFREQ_REG); pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG); pcie->freq_to_ticks_div = pcie->freq / 1000000; if (pcie->freq_to_ticks_div == 0) diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index a697996d81b4..3db619209fe1 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -117,7 +117,6 @@ struct tcan4x5x_priv { struct m_can_classdev *mcan_dev; struct gpio_desc *reset_gpio; - struct gpio_desc *interrupt_gpio; struct gpio_desc *device_wake_gpio; struct gpio_desc *device_state_gpio; struct regulator *power; @@ -236,8 +235,6 @@ static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) struct tcan4x5x_priv *priv = cdev->device_data; u32 val; - tcan4x5x_check_wake(priv); - regmap_read(priv->regmap, priv->reg_offset + reg, &val); return val; @@ -248,8 +245,6 @@ static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset) struct tcan4x5x_priv *priv = cdev->device_data; u32 val; - tcan4x5x_check_wake(priv); - regmap_read(priv->regmap, priv->mram_start + addr_offset, &val); return val; @@ -259,8 +254,6 @@ static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) { struct tcan4x5x_priv *priv = cdev->device_data; - tcan4x5x_check_wake(priv); - return regmap_write(priv->regmap, priv->reg_offset + reg, val); } @@ -269,8 +262,6 @@ static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, { struct tcan4x5x_priv *priv = cdev->device_data; - tcan4x5x_check_wake(priv); - return regmap_write(priv->regmap, priv->mram_start + addr_offset, val); } @@ -290,18 +281,13 @@ static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, { struct tcan4x5x_priv *priv = cdev->device_data; - tcan4x5x_check_wake(priv); - return regmap_write(priv->regmap, reg, val); } static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) { - struct tcan4x5x_priv *tcan4x5x = cdev->device_data; int ret; - tcan4x5x_check_wake(tcan4x5x); - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, TCAN4X5X_CLEAR_ALL_INT); if (ret) @@ -356,13 +342,6 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev->device_data; - tcan4x5x->interrupt_gpio = devm_gpiod_get(cdev->dev, "data-ready", - GPIOD_IN); - if (IS_ERR(tcan4x5x->interrupt_gpio)) { - dev_err(cdev->dev, "data-ready gpio not defined\n"); - return -EINVAL; - } - tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); if (IS_ERR(tcan4x5x->device_wake_gpio)) { @@ -381,8 +360,6 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev) if (IS_ERR(tcan4x5x->device_state_gpio)) tcan4x5x->device_state_gpio = NULL; - cdev->net->irq = gpiod_to_irq(tcan4x5x->interrupt_gpio); - tcan4x5x->power = devm_regulator_get_optional(cdev->dev, "vsup"); if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER) @@ -447,6 +424,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi) mcan_class->is_peripheral = true; mcan_class->bit_timing = &tcan4x5x_bittiming_const; mcan_class->data_timing = &tcan4x5x_data_bittiming_const; + mcan_class->net->irq = spi->irq; spi_set_drvdata(spi, priv); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 58992fd61cb9..bee9f7b8dad6 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -17,26 +17,6 @@ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 - * - * Your platform definition file should specify something like: - * - * static struct mcp251x_platform_data mcp251x_info = { - * .oscillator_frequency = 8000000, - * }; - * - * static struct spi_board_info spi_board_info[] = { - * { - * .modalias = "mcp2510", - * // "mcp2515" or "mcp25625" depending on your controller - * .platform_data = &mcp251x_info, - * .irq = IRQ_EINT13, - * .max_speed_hz = 2*1000*1000, - * .chip_select = 2, - * }, - * }; - * - * Please see mcp251x.h for a description of the fields in - * struct mcp251x_platform_data. */ #include <linux/can/core.h> @@ -53,8 +33,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> -#include <linux/of.h> -#include <linux/of_device.h> +#include <linux/property.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spi/spi.h> @@ -914,7 +893,7 @@ static int mcp251x_open(struct net_device *net) priv->tx_skb = NULL; priv->tx_len = 0; - if (!spi->dev.of_node) + if (!dev_fwnode(&spi->dev)) flags = IRQF_TRIGGER_FALLING; ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, @@ -1006,23 +985,20 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table); static int mcp251x_can_probe(struct spi_device *spi) { - const struct of_device_id *of_id = of_match_device(mcp251x_of_match, - &spi->dev); + const void *match = device_get_match_data(&spi->dev); struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev); struct net_device *net; struct mcp251x_priv *priv; struct clk *clk; int freq, ret; - clk = devm_clk_get(&spi->dev, NULL); - if (IS_ERR(clk)) { - if (pdata) - freq = pdata->oscillator_frequency; - else - return PTR_ERR(clk); - } else { - freq = clk_get_rate(clk); - } + clk = devm_clk_get_optional(&spi->dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + freq = clk_get_rate(clk); + if (freq == 0 && pdata) + freq = pdata->oscillator_frequency; /* Sanity check */ if (freq < 1000000 || freq > 25000000) @@ -1033,11 +1009,9 @@ static int mcp251x_can_probe(struct spi_device *spi) if (!net) return -ENOMEM; - if (!IS_ERR(clk)) { - ret = clk_prepare_enable(clk); - if (ret) - goto out_free; - } + ret = clk_prepare_enable(clk); + if (ret) + goto out_free; net->netdev_ops = &mcp251x_netdev_ops; net->flags |= IFF_ECHO; @@ -1048,8 +1022,8 @@ static int mcp251x_can_probe(struct spi_device *spi) priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; - if (of_id) - priv->model = (enum mcp251x_model)of_id->data; + if (match) + priv->model = (enum mcp251x_model)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; @@ -1122,8 +1096,7 @@ error_probe: mcp251x_power_enable(priv->power, 0); out_clk: - if (!IS_ERR(clk)) - clk_disable_unprepare(clk); + clk_disable_unprepare(clk); out_free: free_candev(net); @@ -1141,8 +1114,7 @@ static int mcp251x_can_remove(struct spi_device *spi) mcp251x_power_enable(priv->power, 0); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free_candev(net); @@ -1170,10 +1142,8 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev) priv->after_suspend = AFTER_SUSPEND_DOWN; } - if (!IS_ERR_OR_NULL(priv->power)) { - regulator_disable(priv->power); - priv->after_suspend |= AFTER_SUSPEND_POWER; - } + mcp251x_power_enable(priv->power, 0); + priv->after_suspend |= AFTER_SUSPEND_POWER; return 0; } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index c48e29486b10..1d8d36de4d20 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -13,7 +13,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_platform.h> -#include <linux/phy.h> +#include <linux/phylink.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -633,61 +633,75 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(mt7530_mib); } -static void mt7530_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) { struct mt7530_priv *priv = ds->priv; + u8 tx_delay = 0; + int val; - if (phy_is_pseudo_fixed_link(phydev)) { - dev_dbg(priv->dev, "phy-mode for master device = %x\n", - phydev->interface); + mutex_lock(&priv->reg_mutex); - /* Setup TX circuit incluing relevant PAD and driving */ - mt7530_pad_clk_setup(ds, phydev->interface); + val = mt7530_read(priv, MT7530_MHWTRAP); - if (priv->id == ID_MT7530) { - /* Setup RX circuit, relevant PAD and driving on the - * host which must be placed after the setup on the - * device side is all finished. - */ - mt7623_pad_clk_setup(ds); - } - } else { - u16 lcl_adv = 0, rmt_adv = 0; - u8 flowctrl; - u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; + val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; + val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; - switch (phydev->speed) { - case SPEED_1000: - mcr |= PMCR_FORCE_SPEED_1000; - break; - case SPEED_100: - mcr |= PMCR_FORCE_SPEED_100; - break; - } + switch (priv->p5_intf_sel) { + case P5_INTF_SEL_PHY_P0: + /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ + val |= MHWTRAP_PHY0_SEL; + /* fall through */ + case P5_INTF_SEL_PHY_P4: + /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ + val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; + + /* Setup the MAC by default for the cpu port */ + mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); + break; + case P5_INTF_SEL_GMAC5: + /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ + val &= ~MHWTRAP_P5_DIS; + break; + case P5_DISABLED: + interface = PHY_INTERFACE_MODE_NA; + break; + default: + dev_err(ds->dev, "Unsupported p5_intf_sel %d\n", + priv->p5_intf_sel); + goto unlock_exit; + } - if (phydev->link) - mcr |= PMCR_FORCE_LNK; + /* Setup RGMII settings */ + if (phy_interface_mode_is_rgmii(interface)) { + val |= MHWTRAP_P5_RGMII_MODE; - if (phydev->duplex) { - mcr |= PMCR_FORCE_FDX; + /* P5 RGMII RX Clock Control: delay setting for 1000M */ + mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; + /* Don't set delay in DSA mode */ + if (!dsa_is_dsa_port(priv->ds, 5) && + (interface == PHY_INTERFACE_MODE_RGMII_TXID || + interface == PHY_INTERFACE_MODE_RGMII_ID)) + tx_delay = 4; /* n * 0.5 ns */ - lcl_adv = linkmode_adv_to_lcl_adv_t( - phydev->advertising); - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + /* P5 RGMII TX Clock Control: delay x */ + mt7530_write(priv, MT7530_P5RGMIITXCR, + CSR_RGMII_TXC_CFG(0x10 + tx_delay)); - if (flowctrl & FLOW_CTRL_TX) - mcr |= PMCR_TX_FC_EN; - if (flowctrl & FLOW_CTRL_RX) - mcr |= PMCR_RX_FC_EN; - } - mt7530_write(priv, MT7530_PMCR_P(port), mcr); + /* reduce P5 RGMII Tx driving, 8mA */ + mt7530_write(priv, MT7530_IO_DRV_CR, + P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); } + + mt7530_write(priv, MT7530_MHWTRAP, val); + + dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", + val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); + + priv->p5_interface = interface; + +unlock_exit: + mutex_unlock(&priv->reg_mutex); } static int @@ -698,9 +712,6 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, mt7530_write(priv, MT7530_PVC_P(port), PORT_SPEC_TAG); - /* Setup the MAC by default for the cpu port */ - mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK); - /* Disable auto learning on the cpu port */ mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); @@ -731,9 +742,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port, mutex_lock(&priv->reg_mutex); - /* Setup the MAC for the user port */ - mt7530_write(priv, MT7530_PMCR_P(port), PMCR_USERP_LINK); - /* Allow the user port gets connected to the cpu port and also * restore the port matrix if the port is the member of a certain * bridge. @@ -742,7 +750,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port, priv->ports[port].enable = true; mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, priv->ports[port].pm); - mt7530_port_set_status(priv, port, 1); + mt7530_port_set_status(priv, port, 0); mutex_unlock(&priv->reg_mutex); @@ -1232,10 +1240,13 @@ static int mt7530_setup(struct dsa_switch *ds) { struct mt7530_priv *priv = ds->priv; - int ret, i; - u32 id, val; - struct device_node *dn; + struct device_node *phy_node; + struct device_node *mac_np; struct mt7530_dummy_poll p; + phy_interface_t interface; + struct device_node *dn; + u32 id, val; + int ret, i; /* The parent node of master netdev which holds the common system * controller also is the container for two GMACs nodes representing @@ -1305,6 +1316,8 @@ mt7530_setup(struct dsa_switch *ds) val |= MHWTRAP_MANUAL; mt7530_write(priv, MT7530_MHWTRAP, val); + priv->p6_interface = PHY_INTERFACE_MODE_NA; + /* Enable and reset MIB counters */ mt7530_mib_reset(ds); @@ -1321,6 +1334,40 @@ mt7530_setup(struct dsa_switch *ds) mt7530_port_disable(ds, i); } + /* Setup port 5 */ + priv->p5_intf_sel = P5_DISABLED; + interface = PHY_INTERFACE_MODE_NA; + + if (!dsa_is_unused_port(ds, 5)) { + priv->p5_intf_sel = P5_INTF_SEL_GMAC5; + interface = of_get_phy_mode(ds->ports[5].dn); + } else { + /* Scan the ethernet nodes. look for GMAC1, lookup used phy */ + for_each_child_of_node(dn, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + ret = of_property_read_u32(mac_np, "reg", &id); + if (ret < 0 || id != 1) + continue; + + phy_node = of_parse_phandle(mac_np, "phy-handle", 0); + if (phy_node->parent == priv->dev->of_node->parent) { + interface = of_get_phy_mode(mac_np); + id = of_mdio_parse_addr(ds->dev, phy_node); + if (id == 0) + priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; + if (id == 4) + priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; + } + of_node_put(phy_node); + break; + } + } + + mt7530_setup_port5(ds, interface); + /* Flush the FDB table */ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); if (ret < 0) @@ -1329,6 +1376,216 @@ mt7530_setup(struct dsa_switch *ds) return 0; } +static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct mt7530_priv *priv = ds->priv; + u32 mcr_cur, mcr_new; + + switch (port) { + case 0: /* Internal phy */ + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_GMII) + return; + break; + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + if (priv->p5_interface == state->interface) + break; + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII) + return; + + mt7530_setup_port5(ds, state->interface); + break; + case 6: /* 1st cpu port */ + if (priv->p6_interface == state->interface) + break; + + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_TRGMII) + return; + + /* Setup TX circuit incluing relevant PAD and driving */ + mt7530_pad_clk_setup(ds, state->interface); + + if (priv->id == ID_MT7530) { + /* Setup RX circuit, relevant PAD and driving on the + * host which must be placed after the setup on the + * device side is all finished. + */ + mt7623_pad_clk_setup(ds); + } + + priv->p6_interface = state->interface; + break; + default: + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); + return; + } + + if (phylink_autoneg_inband(mode)) { + dev_err(ds->dev, "%s: in-band negotiation unsupported\n", + __func__); + return; + } + + mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port)); + mcr_new = mcr_cur; + mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 | + PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN); + mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN | + PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK; + + /* Are we connected to external phy */ + if (port == 5 && dsa_is_user_port(ds, 5)) + mcr_new |= PMCR_EXT_PHY; + + switch (state->speed) { + case SPEED_1000: + mcr_new |= PMCR_FORCE_SPEED_1000; + break; + case SPEED_100: + mcr_new |= PMCR_FORCE_SPEED_100; + break; + } + if (state->duplex == DUPLEX_FULL) { + mcr_new |= PMCR_FORCE_FDX; + if (state->pause & MLO_PAUSE_TX) + mcr_new |= PMCR_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + mcr_new |= PMCR_RX_FC_EN; + } + + if (mcr_new != mcr_cur) + mt7530_write(priv, MT7530_PMCR_P(port), mcr_new); +} + +static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_port_set_status(priv, port, 0); +} + +static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_port_set_status(priv, port, 1); +} + +static void mt7530_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + switch (port) { + case 0: /* Internal phy */ + case 1: + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_GMII) + goto unsupported; + break; + case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */ + if (state->interface != PHY_INTERFACE_MODE_NA && + !phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_GMII) + goto unsupported; + break; + case 6: /* 1st cpu port */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_TRGMII) + goto unsupported; + break; + default: + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); +unsupported: + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + + if (state->interface == PHY_INTERFACE_MODE_TRGMII) { + phylink_set(mask, 1000baseT_Full); + } else { + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + if (state->interface != PHY_INTERFACE_MODE_MII) { + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + if (port == 5) + phylink_set(mask, 1000baseX_Full); + } + } + + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +static int +mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct mt7530_priv *priv = ds->priv; + u32 pmsr; + + if (port < 0 || port >= MT7530_NUM_PORTS) + return -EINVAL; + + pmsr = mt7530_read(priv, MT7530_PMSR_P(port)); + + state->link = (pmsr & PMSR_LINK); + state->an_complete = state->link; + state->duplex = !!(pmsr & PMSR_DPX); + + switch (pmsr & PMSR_SPEED_MASK) { + case PMSR_SPEED_10: + state->speed = SPEED_10; + break; + case PMSR_SPEED_100: + state->speed = SPEED_100; + break; + case PMSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause &= ~(MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & PMSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & PMSR_TX_FC) + state->pause |= MLO_PAUSE_TX; + + return 1; +} + static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt7530_setup, @@ -1337,7 +1594,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phy_write = mt7530_phy_write, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, - .adjust_link = mt7530_adjust_link, .port_enable = mt7530_port_enable, .port_disable = mt7530_port_disable, .port_stp_state_set = mt7530_stp_state_set, @@ -1350,6 +1606,11 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_vlan_prepare = mt7530_port_vlan_prepare, .port_vlan_add = mt7530_port_vlan_add, .port_vlan_del = mt7530_port_vlan_del, + .phylink_validate = mt7530_phylink_validate, + .phylink_mac_link_state = mt7530_phylink_mac_link_state, + .phylink_mac_config = mt7530_phylink_mac_config, + .phylink_mac_link_down = mt7530_phylink_mac_link_down, + .phylink_mac_link_up = mt7530_phylink_mac_link_up, }; static const struct of_device_id mt7530_of_match[] = { diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index bfac90f48102..ccb9da8cad0d 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -186,6 +186,7 @@ enum mt7530_vlan_port_attr { /* Register for port MAC control register */ #define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) #define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18) +#define PMCR_EXT_PHY BIT(17) #define PMCR_MAC_MODE BIT(16) #define PMCR_FORCE_MODE BIT(15) #define PMCR_TX_EN BIT(14) @@ -198,26 +199,20 @@ enum mt7530_vlan_port_attr { #define PMCR_FORCE_SPEED_100 BIT(2) #define PMCR_FORCE_FDX BIT(1) #define PMCR_FORCE_LNK BIT(0) -#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ - PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \ - PMCR_TX_EN | PMCR_RX_EN | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN) -#define PMCR_CPUP_LINK (PMCR_COMMON_LINK | PMCR_FORCE_MODE | \ - PMCR_FORCE_SPEED_1000 | \ - PMCR_FORCE_FDX | \ - PMCR_FORCE_LNK) -#define PMCR_USERP_LINK PMCR_COMMON_LINK -#define PMCR_FIXED_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ - PMCR_FORCE_MODE | PMCR_TX_EN | \ - PMCR_RX_EN | PMCR_BACKPR_EN | \ - PMCR_BACKOFF_EN | \ - PMCR_FORCE_SPEED_1000 | \ - PMCR_FORCE_FDX | \ - PMCR_FORCE_LNK) -#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN) +#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \ + PMCR_FORCE_SPEED_1000) #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) +#define PMSR_EEE1G BIT(7) +#define PMSR_EEE100M BIT(6) +#define PMSR_RX_FC BIT(5) +#define PMSR_TX_FC BIT(4) +#define PMSR_SPEED_1000 BIT(3) +#define PMSR_SPEED_100 BIT(2) +#define PMSR_SPEED_10 0x00 +#define PMSR_SPEED_MASK (PMSR_SPEED_100 | PMSR_SPEED_1000) +#define PMSR_DPX BIT(1) +#define PMSR_LINK BIT(0) /* Register for MIB */ #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100) @@ -251,6 +246,7 @@ enum mt7530_vlan_port_attr { /* Register for hw trap modification */ #define MT7530_MHWTRAP 0x7804 +#define MHWTRAP_PHY0_SEL BIT(20) #define MHWTRAP_MANUAL BIT(16) #define MHWTRAP_P5_MAC_SEL BIT(13) #define MHWTRAP_P6_DIS BIT(8) @@ -408,6 +404,30 @@ struct mt7530_port { u16 pvid; }; +/* Port 5 interface select definitions */ +enum p5_interface_select { + P5_DISABLED = 0, + P5_INTF_SEL_PHY_P0, + P5_INTF_SEL_PHY_P4, + P5_INTF_SEL_GMAC5, +}; + +static const char *p5_intf_modes(unsigned int p5_interface) +{ + switch (p5_interface) { + case P5_DISABLED: + return "DISABLED"; + case P5_INTF_SEL_PHY_P0: + return "PHY P0"; + case P5_INTF_SEL_PHY_P4: + return "PHY P4"; + case P5_INTF_SEL_GMAC5: + return "GMAC5"; + default: + return "unknown"; + } +} + /* struct mt7530_priv - This is the main data structure for holding the state * of the driver * @dev: The device pointer @@ -423,6 +443,8 @@ struct mt7530_port { * @ports: Holding the state among ports * @reg_mutex: The lock for protecting among process accessing * registers + * @p6_interface Holding the current port 6 interface + * @p5_intf_sel: Holding the current port 5 interface select */ struct mt7530_priv { struct device *dev; @@ -435,6 +457,9 @@ struct mt7530_priv { struct gpio_desc *reset; unsigned int id; bool mcm; + phy_interface_t p6_interface; + phy_interface_t p5_interface; + unsigned int p5_intf_sel; struct mt7530_port ports[MT7530_NUM_PORTS]; /* protect among processes for registers access*/ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 93a2d4deb27c..2830dc283ce6 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -168,6 +168,7 @@ config ETHOC source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" +source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index fb9155cffcff..061edd22f507 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -97,3 +97,4 @@ obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ +obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 5402867272be..162d7d8fb295 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1348,7 +1348,7 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) return total; } -static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) +static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) { int retries = 10; u32 pending; @@ -1360,6 +1360,31 @@ static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) } while (pending && --retries); } +#define DPNI_TX_PENDING_VER_MAJOR 7 +#define DPNI_TX_PENDING_VER_MINOR 13 +static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) +{ + union dpni_statistics stats; + int retries = 10; + int err; + + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, + DPNI_TX_PENDING_VER_MINOR) < 0) + goto out; + + do { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, + &stats); + if (err) + goto out; + if (stats.page_6.tx_pending_frames == 0) + return; + } while (--retries); + +out: + msleep(500); +} + static int dpaa2_eth_stop(struct net_device *net_dev) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@ -1379,7 +1404,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) * on WRIOP. After it finishes, wait until all remaining frames on Rx * and Tx conf queues are consumed on NAPI poll. */ - msleep(500); + wait_for_egress_fq_empty(priv); do { dpni_disable(priv->mc_io, 0, priv->mc_token); @@ -1395,7 +1420,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev) */ } - wait_for_fq_empty(priv); + wait_for_ingress_fq_empty(priv); disable_ch_napi(priv); /* Empty the buffer pool */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 93076fe01b45..0aa1c34019bb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -28,6 +28,11 @@ static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { "[hw] rx nobuffer discards", "[hw] tx discarded frames", "[hw] tx confirmed frames", + "[hw] tx dequeued bytes", + "[hw] tx dequeued frames", + "[hw] tx rejected bytes", + "[hw] tx rejected frames", + "[hw] tx pending frames", }; #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) @@ -188,27 +193,33 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_drv_stats *extras; struct dpaa2_eth_ch_stats *ch_stats; + int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { + sizeof(dpni_stats.page_0), + sizeof(dpni_stats.page_1), + sizeof(dpni_stats.page_2), + sizeof(dpni_stats.page_3), + sizeof(dpni_stats.page_4), + sizeof(dpni_stats.page_5), + sizeof(dpni_stats.page_6), + }; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); /* Print standard counters, from DPNI statistics */ - for (j = 0; j <= 2; j++) { + for (j = 0; j <= 6; j++) { + /* We're not interested in pages 4 & 5 for now */ + if (j == 4 || j == 5) + continue; err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, j, &dpni_stats); - if (err != 0) + if (err == -EINVAL) + /* Older firmware versions don't support all pages */ + memset(&dpni_stats, 0, sizeof(dpni_stats)); + else netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); - switch (j) { - case 0: - num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); - break; - case 1: - num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); - break; - case 2: - num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); - break; - } + + num_cnt = dpni_stats_page_size[j] / sizeof(u64); for (k = 0; k < num_cnt; k++) *(data + i++) = dpni_stats.raw.counter[k]; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 05e30893dee6..dd54e6953aeb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -1470,7 +1470,7 @@ int dpni_get_queue(struct fsl_mc_io *mc_io, * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object * @page: Selects the statistics page to retrieve, see - * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 6. * @stat: Structure containing the statistics * * Return: '0' on Success; Error code otherwise. diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index 3e8fc6c7a8da..fd583911b6c0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -416,6 +416,26 @@ int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, * lack of buffers * @page_2.egress_discarded_frames: Egress discarded frame count * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @page3: Page_3 statistics structure + * @page_3.egress_dequeue_bytes: Cumulative count of the number of bytes + * dequeued from egress FQs + * @page_3.egress_dequeue_frames: Cumulative count of the number of frames + * dequeued from egress FQs + * @page_3.egress_reject_bytes: Cumulative count of the number of bytes in + * egress frames whose enqueue was rejected + * @page_3.egress_reject_frames: Cumulative count of the number of egress + * frames whose enqueue was rejected + * @page_4: Page_4 statistics structure: congestion points + * @page_4.cgr_reject_frames: number of rejected frames due to congestion point + * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point + * @page_5: Page_5 statistics structure: policer + * @page_5.policer_cnt_red: NUmber of red colored frames + * @page_5.policer_cnt_yellow: number of yellow colored frames + * @page_5.policer_cnt_green: number of green colored frames + * @page_5.policer_cnt_re_red: number of recolored red frames + * @page_5.policer_cnt_re_yellow: number of recolored yellow frames + * @page_6: Page_6 statistics structure + * @page_6.tx_pending_frames: total number of frames pending in egress FQs * @raw: raw statistics structure, used to index counters */ union dpni_statistics { @@ -443,6 +463,26 @@ union dpni_statistics { u64 egress_confirmed_frames; } page_2; struct { + u64 egress_dequeue_bytes; + u64 egress_dequeue_frames; + u64 egress_reject_bytes; + u64 egress_reject_frames; + } page_3; + struct { + u64 cgr_reject_frames; + u64 cgr_reject_bytes; + } page_4; + struct { + u64 policer_cnt_red; + u64 policer_cnt_yellow; + u64 policer_cnt_green; + u64 policer_cnt_re_red; + u64 policer_cnt_re_yellow; + } page_5; + struct { + u64 tx_pending_frames; + } page_6; + struct { u64 counter[DPNI_STATISTICS_CNT]; } raw; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 31629fc7e820..113f6087c7c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -960,11 +960,9 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) return 0; err_aead: - memset(xs->aead, 0, sizeof(*xs->aead)); - kfree(xs->aead); + kzfree(xs->aead); err_xs: - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); err_out: msgbuf[1] = err; return err; @@ -1049,8 +1047,7 @@ int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ixgbe_ipsec_del_sa(xs); /* remove the xs that was made-up in the add request */ - memset(xs, 0, sizeof(*xs)); - kfree(xs); + kzfree(xs); return 0; } diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig new file mode 100644 index 000000000000..5ea570be8379 --- /dev/null +++ b/drivers/net/ethernet/pensando/Kconfig @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2019 Pensando Systems, Inc +# +# Pensando device configuration +# + +config NET_VENDOR_PENSANDO + bool "Pensando devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Pensando cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_PENSANDO + +config IONIC + tristate "Pensando Ethernet IONIC Support" + depends on 64BIT && PCI + help + This enables the support for the Pensando family of Ethernet + adapters. More specific information on this driver can be + found in + <file:Documentation/networking/device_drivers/pensando/ionic.rst>. + + To compile this driver as a module, choose M here. The module + will be called ionic. + +endif # NET_VENDOR_PENSANDO diff --git a/drivers/net/ethernet/pensando/Makefile b/drivers/net/ethernet/pensando/Makefile new file mode 100644 index 000000000000..21ce7499c122 --- /dev/null +++ b/drivers/net/ethernet/pensando/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Pensando network device drivers. +# + +obj-$(CONFIG_IONIC) += ionic/ diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile new file mode 100644 index 000000000000..29f304d75261 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2017 - 2019 Pensando Systems, Inc + +obj-$(CONFIG_IONIC) := ionic.o + +ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \ + ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \ + ionic_txrx.o ionic_stats.o diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h new file mode 100644 index 000000000000..7a7060677f15 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_H_ +#define _IONIC_H_ + +struct ionic_lif; + +#include "ionic_if.h" +#include "ionic_dev.h" +#include "ionic_devlink.h" + +#define IONIC_DRV_NAME "ionic" +#define IONIC_DRV_DESCRIPTION "Pensando Ethernet NIC Driver" +#define IONIC_DRV_VERSION "0.15.0-k" + +#define PCI_VENDOR_ID_PENSANDO 0x1dd8 + +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 +#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 + +#define IONIC_SUBDEV_ID_NAPLES_25 0x4000 +#define IONIC_SUBDEV_ID_NAPLES_100_4 0x4001 +#define IONIC_SUBDEV_ID_NAPLES_100_8 0x4002 + +#define DEVCMD_TIMEOUT 10 + +struct ionic { + struct pci_dev *pdev; + struct device *dev; + struct devlink_port dl_port; + struct ionic_dev idev; + struct mutex dev_cmd_lock; /* lock for dev_cmd operations */ + struct dentry *dentry; + struct ionic_dev_bar bars[IONIC_BARS_MAX]; + unsigned int num_bars; + struct ionic_identity ident; + struct list_head lifs; + struct ionic_lif *master_lif; + unsigned int nnqs_per_lif; + unsigned int neqs_per_lif; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX); + unsigned int nintrs; + DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); + struct work_struct nb_work; + struct notifier_block nb; +}; + +struct ionic_admin_ctx { + struct completion work; + union ionic_adminq_cmd cmd; + union ionic_adminq_comp comp; +}; + +int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb, + ionic_cq_done_cb done_cb, void *done_arg); + +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); +int ionic_set_dma_mask(struct ionic *ionic); +int ionic_setup(struct ionic *ionic); + +int ionic_identify(struct ionic *ionic); +int ionic_init(struct ionic *ionic); +int ionic_reset(struct ionic *ionic); + +int ionic_port_identify(struct ionic *ionic); +int ionic_port_init(struct ionic *ionic); +int ionic_port_reset(struct ionic *ionic); + +#endif /* _IONIC_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus.h b/drivers/net/ethernet/pensando/ionic/ionic_bus.h new file mode 100644 index 000000000000..2f4d08c64910 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_BUS_H_ +#define _IONIC_BUS_H_ + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num); +const char *ionic_bus_info(struct ionic *ionic); +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs); +void ionic_bus_free_irq_vectors(struct ionic *ionic); +int ionic_bus_register_driver(void); +void ionic_bus_unregister_driver(void); +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num); +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page); + +#endif /* _IONIC_BUS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c new file mode 100644 index 000000000000..9a9ab8cb2cb3 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +/* Supported devices */ +static const struct pci_device_id ionic_id_table[] = { + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF) }, + { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF) }, + { 0, } /* end of table */ +}; +MODULE_DEVICE_TABLE(pci, ionic_id_table); + +int ionic_bus_get_irq(struct ionic *ionic, unsigned int num) +{ + return pci_irq_vector(ionic->pdev, num); +} + +const char *ionic_bus_info(struct ionic *ionic) +{ + return pci_name(ionic->pdev); +} + +int ionic_bus_alloc_irq_vectors(struct ionic *ionic, unsigned int nintrs) +{ + return pci_alloc_irq_vectors(ionic->pdev, nintrs, nintrs, + PCI_IRQ_MSIX); +} + +void ionic_bus_free_irq_vectors(struct ionic *ionic) +{ + pci_free_irq_vectors(ionic->pdev); +} + +static int ionic_map_bars(struct ionic *ionic) +{ + struct pci_dev *pdev = ionic->pdev; + struct device *dev = ionic->dev; + struct ionic_dev_bar *bars; + unsigned int i, j; + + bars = ionic->bars; + ionic->num_bars = 0; + + for (i = 0, j = 0; i < IONIC_BARS_MAX; i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + bars[j].len = pci_resource_len(pdev, i); + + /* only map the whole bar 0 */ + if (j > 0) { + bars[j].vaddr = NULL; + } else { + bars[j].vaddr = pci_iomap(pdev, i, bars[j].len); + if (!bars[j].vaddr) { + dev_err(dev, + "Cannot memory-map BAR %d, aborting\n", + i); + return -ENODEV; + } + } + + bars[j].bus_addr = pci_resource_start(pdev, i); + bars[j].res_index = i; + ionic->num_bars++; + j++; + } + + return 0; +} + +static void ionic_unmap_bars(struct ionic *ionic) +{ + struct ionic_dev_bar *bars = ionic->bars; + unsigned int i; + + for (i = 0; i < IONIC_BARS_MAX; i++) { + if (bars[i].vaddr) { + iounmap(bars[i].vaddr); + bars[i].bus_addr = 0; + bars[i].vaddr = NULL; + bars[i].len = 0; + } + } +} + +void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) +{ + return pci_iomap_range(ionic->pdev, + ionic->bars[IONIC_PCI_BAR_DBELL].res_index, + (u64)page_num << PAGE_SHIFT, PAGE_SIZE); +} + +void ionic_bus_unmap_dbpage(struct ionic *ionic, void __iomem *page) +{ + iounmap(page); +} + +static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct ionic *ionic; + int err; + + ionic = ionic_devlink_alloc(dev); + if (!ionic) + return -ENOMEM; + + ionic->pdev = pdev; + ionic->dev = dev; + pci_set_drvdata(pdev, ionic); + mutex_init(&ionic->dev_cmd_lock); + + /* Query system for DMA addressing limitation for the device. */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(IONIC_ADDR_LEN)); + if (err) { + dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting. err=%d\n", + err); + goto err_out_clear_drvdata; + } + + ionic_debugfs_add_dev(ionic); + + /* Setup PCI device */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(dev, "Cannot enable PCI device: %d, aborting\n", err); + goto err_out_debugfs_del_dev; + } + + err = pci_request_regions(pdev, IONIC_DRV_NAME); + if (err) { + dev_err(dev, "Cannot request PCI regions: %d, aborting\n", err); + goto err_out_pci_disable_device; + } + + pci_set_master(pdev); + + err = ionic_map_bars(ionic); + if (err) + goto err_out_pci_clear_master; + + /* Configure the device */ + err = ionic_setup(ionic); + if (err) { + dev_err(dev, "Cannot setup device: %d, aborting\n", err); + goto err_out_unmap_bars; + } + + err = ionic_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify device: %d, aborting\n", err); + goto err_out_teardown; + } + + err = ionic_init(ionic); + if (err) { + dev_err(dev, "Cannot init device: %d, aborting\n", err); + goto err_out_teardown; + } + + /* Configure the ports */ + err = ionic_port_identify(ionic); + if (err) { + dev_err(dev, "Cannot identify port: %d, aborting\n", err); + goto err_out_reset; + } + + err = ionic_port_init(ionic); + if (err) { + dev_err(dev, "Cannot init port: %d, aborting\n", err); + goto err_out_reset; + } + + /* Configure LIFs */ + err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC, + &ionic->ident.lif); + if (err) { + dev_err(dev, "Cannot identify LIFs: %d, aborting\n", err); + goto err_out_port_reset; + } + + err = ionic_lifs_size(ionic); + if (err) { + dev_err(dev, "Cannot size LIFs: %d, aborting\n", err); + goto err_out_port_reset; + } + + err = ionic_lifs_alloc(ionic); + if (err) { + dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err); + goto err_out_free_irqs; + } + + err = ionic_lifs_init(ionic); + if (err) { + dev_err(dev, "Cannot init LIFs: %d, aborting\n", err); + goto err_out_free_lifs; + } + + err = ionic_lifs_register(ionic); + if (err) { + dev_err(dev, "Cannot register LIFs: %d, aborting\n", err); + goto err_out_deinit_lifs; + } + + err = ionic_devlink_register(ionic); + if (err) { + dev_err(dev, "Cannot register devlink: %d\n", err); + goto err_out_deregister_lifs; + } + + return 0; + +err_out_deregister_lifs: + ionic_lifs_unregister(ionic); +err_out_deinit_lifs: + ionic_lifs_deinit(ionic); +err_out_free_lifs: + ionic_lifs_free(ionic); +err_out_free_irqs: + ionic_bus_free_irq_vectors(ionic); +err_out_port_reset: + ionic_port_reset(ionic); +err_out_reset: + ionic_reset(ionic); +err_out_teardown: + ionic_dev_teardown(ionic); +err_out_unmap_bars: + ionic_unmap_bars(ionic); + pci_release_regions(pdev); +err_out_pci_clear_master: + pci_clear_master(pdev); +err_out_pci_disable_device: + pci_disable_device(pdev); +err_out_debugfs_del_dev: + ionic_debugfs_del_dev(ionic); +err_out_clear_drvdata: + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); + + return err; +} + +static void ionic_remove(struct pci_dev *pdev) +{ + struct ionic *ionic = pci_get_drvdata(pdev); + + if (!ionic) + return; + + ionic_devlink_unregister(ionic); + ionic_lifs_unregister(ionic); + ionic_lifs_deinit(ionic); + ionic_lifs_free(ionic); + ionic_bus_free_irq_vectors(ionic); + ionic_port_reset(ionic); + ionic_reset(ionic); + ionic_dev_teardown(ionic); + ionic_unmap_bars(ionic); + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + ionic_debugfs_del_dev(ionic); + mutex_destroy(&ionic->dev_cmd_lock); + ionic_devlink_free(ionic); +} + +static struct pci_driver ionic_driver = { + .name = IONIC_DRV_NAME, + .id_table = ionic_id_table, + .probe = ionic_probe, + .remove = ionic_remove, +}; + +int ionic_bus_register_driver(void) +{ + return pci_register_driver(&ionic_driver); +} + +void ionic_bus_unregister_driver(void) +{ + pci_unregister_driver(&ionic_driver); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c new file mode 100644 index 000000000000..7afc4a365b75 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/pci.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *ionic_dir; + +void ionic_debugfs_create(void) +{ + ionic_dir = debugfs_create_dir(IONIC_DRV_NAME, NULL); +} + +void ionic_debugfs_destroy(void) +{ + debugfs_remove_recursive(ionic_dir); +} + +void ionic_debugfs_add_dev(struct ionic *ionic) +{ + ionic->dentry = debugfs_create_dir(ionic_bus_info(ionic), ionic_dir); +} + +void ionic_debugfs_del_dev(struct ionic *ionic) +{ + debugfs_remove_recursive(ionic->dentry); + ionic->dentry = NULL; +} + +static int identity_show(struct seq_file *seq, void *v) +{ + struct ionic *ionic = seq->private; + struct ionic_identity *ident; + + ident = &ionic->ident; + + seq_printf(seq, "nlifs: %d\n", ident->dev.nlifs); + seq_printf(seq, "nintrs: %d\n", ident->dev.nintrs); + seq_printf(seq, "ndbpgs_per_lif: %d\n", ident->dev.ndbpgs_per_lif); + seq_printf(seq, "intr_coal_mult: %d\n", ident->dev.intr_coal_mult); + seq_printf(seq, "intr_coal_div: %d\n", ident->dev.intr_coal_div); + + seq_printf(seq, "max_ucast_filters: %d\n", ident->lif.eth.max_ucast_filters); + seq_printf(seq, "max_mcast_filters: %d\n", ident->lif.eth.max_mcast_filters); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(identity); + +void ionic_debugfs_add_ident(struct ionic *ionic) +{ + debugfs_create_file("identity", 0400, ionic->dentry, + ionic, &identity_fops) ? 0 : -EOPNOTSUPP; +} + +void ionic_debugfs_add_sizes(struct ionic *ionic) +{ + debugfs_create_u32("nlifs", 0400, ionic->dentry, + (u32 *)&ionic->ident.dev.nlifs); + debugfs_create_u32("nintrs", 0400, ionic->dentry, &ionic->nintrs); + + debugfs_create_u32("ntxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_TXQ]); + debugfs_create_u32("nrxqs_per_lif", 0400, ionic->dentry, + (u32 *)&ionic->ident.lif.eth.config.queue_count[IONIC_QTYPE_RXQ]); +} + +static int q_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->tail->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_tail); + +static int q_head_show(struct seq_file *seq, void *v) +{ + struct ionic_queue *q = seq->private; + + seq_printf(seq, "%d\n", q->head->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(q_head); + +static int cq_tail_show(struct seq_file *seq, void *v) +{ + struct ionic_cq *cq = seq->private; + + seq_printf(seq, "%d\n", cq->tail->index); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cq_tail); + +static const struct debugfs_reg32 intr_ctrl_regs[] = { + { .name = "coal_init", .offset = 0, }, + { .name = "mask", .offset = 4, }, + { .name = "credits", .offset = 8, }, + { .name = "mask_on_assert", .offset = 12, }, + { .name = "coal_timer", .offset = 16, }, +}; + +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry; + struct ionic_dev *idev = &lif->ionic->idev; + struct debugfs_regset32 *intr_ctrl_regset; + struct ionic_intr_info *intr = &qcq->intr; + struct debugfs_blob_wrapper *desc_blob; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + qcq->dentry = debugfs_create_dir(q->name, lif->dentry); + + debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size); + debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa); + + q_dentry = debugfs_create_dir("q", qcq->dentry); + + debugfs_create_u32("index", 0400, q_dentry, &q->index); + debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa); + if (qcq->flags & IONIC_QCQ_F_SG) { + debugfs_create_x64("sg_base_pa", 0400, q_dentry, + &q->sg_base_pa); + debugfs_create_u32("sg_desc_size", 0400, q_dentry, + &q->sg_desc_size); + } + debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs); + debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size); + debugfs_create_u32("pid", 0400, q_dentry, &q->pid); + debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); + debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); + debugfs_create_u64("drop", 0400, q_dentry, &q->drop); + debugfs_create_u64("stop", 0400, q_dentry, &q->stop); + debugfs_create_u64("wake", 0400, q_dentry, &q->wake); + + debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); + debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->base; + desc_blob->size = (unsigned long)q->num_descs * q->desc_size; + debugfs_create_blob("desc_blob", 0400, q_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_SG) { + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = q->sg_base; + desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size; + debugfs_create_blob("sg_desc_blob", 0400, q_dentry, + desc_blob); + } + + cq_dentry = debugfs_create_dir("cq", qcq->dentry); + + debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); + debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); + debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); + debugfs_create_u8("done_color", 0400, cq_dentry, + (u8 *)&cq->done_color); + + debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); + + desc_blob = devm_kzalloc(dev, sizeof(*desc_blob), GFP_KERNEL); + if (!desc_blob) + return; + desc_blob->data = cq->base; + desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size; + debugfs_create_blob("desc_blob", 0400, cq_dentry, desc_blob); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + intr_dentry = debugfs_create_dir("intr", qcq->dentry); + + debugfs_create_u32("index", 0400, intr_dentry, + &intr->index); + debugfs_create_u32("vector", 0400, intr_dentry, + &intr->vector); + + intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset), + GFP_KERNEL); + if (!intr_ctrl_regset) + return; + intr_ctrl_regset->regs = intr_ctrl_regs; + intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs); + intr_ctrl_regset->base = &idev->intr_ctrl[intr->index]; + + debugfs_create_regset32("intr_ctrl", 0400, intr_dentry, + intr_ctrl_regset); + } + + if (qcq->flags & IONIC_QCQ_F_NOTIFYQ) { + stats_dentry = debugfs_create_dir("notifyblock", qcq->dentry); + + debugfs_create_u64("eid", 0400, stats_dentry, + (u64 *)&lif->info->status.eid); + debugfs_create_u16("link_status", 0400, stats_dentry, + (u16 *)&lif->info->status.link_status); + debugfs_create_u32("link_speed", 0400, stats_dentry, + (u32 *)&lif->info->status.link_speed); + debugfs_create_u16("link_down_count", 0400, stats_dentry, + (u16 *)&lif->info->status.link_down_count); + } +} + +static int netdev_show(struct seq_file *seq, void *v) +{ + struct net_device *netdev = seq->private; + + seq_printf(seq, "%s\n", netdev->name); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(netdev); + +void ionic_debugfs_add_lif(struct ionic_lif *lif) +{ + lif->dentry = debugfs_create_dir(lif->name, lif->ionic->dentry); + debugfs_create_file("netdev", 0400, lif->dentry, + lif->netdev, &netdev_fops); +} + +void ionic_debugfs_del_lif(struct ionic_lif *lif) +{ + debugfs_remove_recursive(lif->dentry); + lif->dentry = NULL; +} + +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) +{ + debugfs_remove_recursive(qcq->dentry); + qcq->dentry = NULL; +} + +#endif diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h new file mode 100644 index 000000000000..c44ebde170b6 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEBUGFS_H_ +#define _IONIC_DEBUGFS_H_ + +#include <linux/debugfs.h> + +#ifdef CONFIG_DEBUG_FS + +void ionic_debugfs_create(void); +void ionic_debugfs_destroy(void); +void ionic_debugfs_add_dev(struct ionic *ionic); +void ionic_debugfs_del_dev(struct ionic *ionic); +void ionic_debugfs_add_ident(struct ionic *ionic); +void ionic_debugfs_add_sizes(struct ionic *ionic); +void ionic_debugfs_add_lif(struct ionic_lif *lif); +void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq); +void ionic_debugfs_del_lif(struct ionic_lif *lif); +void ionic_debugfs_del_qcq(struct ionic_qcq *qcq); +#else +static inline void ionic_debugfs_create(void) { } +static inline void ionic_debugfs_destroy(void) { } +static inline void ionic_debugfs_add_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_del_dev(struct ionic *ionic) { } +static inline void ionic_debugfs_add_ident(struct ionic *ionic) { } +static inline void ionic_debugfs_add_sizes(struct ionic *ionic) { } +static inline void ionic_debugfs_add_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq) { } +static inline void ionic_debugfs_del_lif(struct ionic_lif *lif) { } +static inline void ionic_debugfs_del_qcq(struct ionic_qcq *qcq) { } +#endif + +#endif /* _IONIC_DEBUGFS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c new file mode 100644 index 000000000000..d168a6435322 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include "ionic.h" +#include "ionic_dev.h" +#include "ionic_lif.h" + +void ionic_init_devinfo(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + + idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type); + idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev); + + memcpy_fromio(idev->dev_info.fw_version, + idev->dev_info_regs->fw_version, + IONIC_DEVINFO_FWVERS_BUFLEN); + + memcpy_fromio(idev->dev_info.serial_num, + idev->dev_info_regs->serial_num, + IONIC_DEVINFO_SERIAL_BUFLEN); + + idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0; + idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0; + + dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); +} + +int ionic_dev_setup(struct ionic *ionic) +{ + struct ionic_dev_bar *bar = ionic->bars; + unsigned int num_bars = ionic->num_bars; + struct ionic_dev *idev = &ionic->idev; + struct device *dev = ionic->dev; + u32 sig; + + /* BAR0: dev_cmd and interrupts */ + if (num_bars < 1) { + dev_err(dev, "No bars found, aborting\n"); + return -EFAULT; + } + + if (bar->len < IONIC_BAR0_SIZE) { + dev_err(dev, "Resource bar size %lu too small, aborting\n", + bar->len); + return -EFAULT; + } + + idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; + idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; + idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; + idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; + + sig = ioread32(&idev->dev_info_regs->signature); + if (sig != IONIC_DEV_INFO_SIGNATURE) { + dev_err(dev, "Incompatible firmware signature %x", sig); + return -EFAULT; + } + + ionic_init_devinfo(ionic); + + /* BAR1: doorbells */ + bar++; + if (num_bars < 2) { + dev_err(dev, "Doorbell bar missing, aborting\n"); + return -EFAULT; + } + + idev->db_pages = bar->vaddr; + idev->phy_db_pages = bar->bus_addr; + + return 0; +} + +void ionic_dev_teardown(struct ionic *ionic) +{ + /* place holder */ +} + +/* Devcmd Interface */ +u8 ionic_dev_cmd_status(struct ionic_dev *idev) +{ + return ioread8(&idev->dev_cmd_regs->comp.comp.status); +} + +bool ionic_dev_cmd_done(struct ionic_dev *idev) +{ + return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; +} + +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) +{ + memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); +} + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) +{ + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); +} + +/* Device commands */ +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver) +{ + union ionic_dev_cmd cmd = { + .identify.opcode = IONIC_CMD_IDENTIFY, + .identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .init.opcode = IONIC_CMD_INIT, + .init.type = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .reset.opcode = IONIC_CMD_RESET, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* Port commands */ +void ionic_dev_cmd_port_identify(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, + .port_init.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_init(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_init.opcode = IONIC_CMD_PORT_INIT, + .port_init.index = 0, + .port_init.info_pa = cpu_to_le64(idev->port_info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_reset(struct ionic_dev *idev) +{ + union ionic_dev_cmd cmd = { + .port_reset.opcode = IONIC_CMD_PORT_RESET, + .port_reset.index = 0, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_STATE, + .port_setattr.state = state, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_SPEED, + .port_setattr.speed = cpu_to_le32(speed), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, + .port_setattr.an_enable = an_enable, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_FEC, + .port_setattr.fec_type = fec_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type) +{ + union ionic_dev_cmd cmd = { + .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, + .port_setattr.index = 0, + .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, + .port_setattr.pause_type = pause_type, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +/* LIF commands */ +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver) +{ + union ionic_dev_cmd cmd = { + .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, + .lif_identify.type = type, + .lif_identify.ver = ver, + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t info_pa) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_INIT, + .lif_init.index = cpu_to_le16(lif_index), + .lif_init.info_pa = cpu_to_le64(info_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index) +{ + union ionic_dev_cmd cmd = { + .lif_init.opcode = IONIC_CMD_LIF_RESET, + .lif_init.index = cpu_to_le16(lif_index), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + + union ionic_dev_cmd cmd = { + .q_init.opcode = IONIC_CMD_Q_INIT, + .q_init.lif_index = cpu_to_le16(lif_index), + .q_init.type = q->type, + .q_init.index = cpu_to_le32(q->index), + .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .q_init.pid = cpu_to_le16(q->pid), + .q_init.intr_index = cpu_to_le16(intr_index), + .q_init.ring_size = ilog2(q->num_descs), + .q_init.ring_base = cpu_to_le64(q->base_pa), + .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), + }; + + ionic_dev_cmd_go(idev, &cmd); +} + +int ionic_db_page_num(struct ionic_lif *lif, int pid) +{ + return (lif->hw_index * lif->dbid_count) + pid; +} + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size) +{ + struct ionic_cq_info *cur; + unsigned int ring_size; + unsigned int i; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + cq->lif = lif; + cq->bound_intr = intr; + cq->num_descs = num_descs; + cq->desc_size = desc_size; + cq->tail = cq->info; + cq->done_color = 1; + + cur = cq->info; + + for (i = 0; i < num_descs; i++) { + if (i + 1 == num_descs) { + cur->next = cq->info; + cur->last = true; + } else { + cur->next = cur + 1; + } + cur->index = i; + cur++; + } + + return 0; +} + +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) +{ + struct ionic_cq_info *cur; + unsigned int i; + + cq->base = base; + cq->base_pa = base_pa; + + for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) + cur->cq_desc = base + (i * cq->desc_size); +} + +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) +{ + cq->bound_q = q; +} + +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg) +{ + unsigned int work_done = 0; + + if (work_to_do == 0) + return 0; + + while (cb(cq, cq->tail)) { + if (cq->tail->last) + cq->done_color = !cq->done_color; + cq->tail = cq->tail->next; + DEBUG_STATS_CQE_CNT(cq); + + if (++work_done >= work_to_do) + break; + } + + if (work_done && done_cb) + done_cb(done_arg); + + return work_done; +} + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid) +{ + struct ionic_desc_info *cur; + unsigned int ring_size; + unsigned int i; + + if (desc_size == 0 || !is_power_of_2(num_descs)) + return -EINVAL; + + ring_size = ilog2(num_descs); + if (ring_size < 2 || ring_size > 16) + return -EINVAL; + + q->lif = lif; + q->idev = idev; + q->index = index; + q->num_descs = num_descs; + q->desc_size = desc_size; + q->sg_desc_size = sg_desc_size; + q->tail = q->info; + q->head = q->tail; + q->pid = pid; + + snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); + + cur = q->info; + + for (i = 0; i < num_descs; i++) { + if (i + 1 == num_descs) + cur->next = q->info; + else + cur->next = cur + 1; + cur->index = i; + cur->left = num_descs - i; + cur++; + } + + return 0; +} + +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->base = base; + q->base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->desc = base + (i * q->desc_size); +} + +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) +{ + struct ionic_desc_info *cur; + unsigned int i; + + q->sg_base = base; + q->sg_base_pa = base_pa; + + for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) + cur->sg_desc = base + (i * q->sg_desc_size); +} + +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg) +{ + struct device *dev = q->lif->ionic->dev; + struct ionic_lif *lif = q->lif; + + q->head->cb = cb; + q->head->cb_arg = cb_arg; + q->head = q->head->next; + + dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", + q->lif->index, q->name, q->hw_type, q->hw_index, + q->head->index, ring_doorbell); + + if (ring_doorbell) + ionic_dbell_ring(lif->kern_dbpage, q->hw_type, + q->dbval | q->head->index); +} + +static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) +{ + unsigned int mask, tail, head; + + mask = q->num_descs - 1; + tail = q->tail->index; + head = q->head->index; + + return ((pos - tail) & mask) < ((head - tail) & mask); +} + +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index) +{ + struct ionic_desc_info *desc_info; + ionic_desc_cb cb; + void *cb_arg; + + /* check for empty queue */ + if (q->tail->index == q->head->index) + return; + + /* stop index must be for a descriptor that is not yet completed */ + if (unlikely(!ionic_q_is_posted(q, stop_index))) + dev_err(q->lif->ionic->dev, + "ionic stop is not posted %s stop %u tail %u head %u\n", + q->name, stop_index, q->tail->index, q->head->index); + + do { + desc_info = q->tail; + q->tail = desc_info->next; + + cb = desc_info->cb; + cb_arg = desc_info->cb_arg; + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + if (cb) + cb(q, desc_info, cq_info, cb_arg); + } while (desc_info->index != stop_index); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h new file mode 100644 index 000000000000..9610aeb7d5f4 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEV_H_ +#define _IONIC_DEV_H_ + +#include <linux/mutex.h> +#include <linux/workqueue.h> + +#include "ionic_if.h" +#include "ionic_regs.h" + +#define IONIC_MIN_MTU ETH_MIN_MTU +#define IONIC_MAX_MTU 9194 +#define IONIC_MAX_TXRX_DESC 16384 +#define IONIC_MIN_TXRX_DESC 16 +#define IONIC_DEF_TXRX_DESC 4096 +#define IONIC_LIFS_MAX 1024 +#define IONIC_ITR_COAL_USEC_DEFAULT 64 + +#define IONIC_DEV_CMD_REG_VERSION 1 +#define IONIC_DEV_INFO_REG_COUNT 32 +#define IONIC_DEV_CMD_REG_COUNT 32 + +struct ionic_dev_bar { + void __iomem *vaddr; + phys_addr_t bus_addr; + unsigned long len; + int res_index; +}; + +/* Registers */ +static_assert(sizeof(struct ionic_intr) == 32); + +static_assert(sizeof(struct ionic_doorbell) == 8); +static_assert(sizeof(struct ionic_intr_status) == 8); +static_assert(sizeof(union ionic_dev_regs) == 4096); +static_assert(sizeof(union ionic_dev_info_regs) == 2048); +static_assert(sizeof(union ionic_dev_cmd_regs) == 2048); +static_assert(sizeof(struct ionic_lif_stats) == 1024); + +static_assert(sizeof(struct ionic_admin_cmd) == 64); +static_assert(sizeof(struct ionic_admin_comp) == 16); +static_assert(sizeof(struct ionic_nop_cmd) == 64); +static_assert(sizeof(struct ionic_nop_comp) == 16); + +/* Device commands */ +static_assert(sizeof(struct ionic_dev_identify_cmd) == 64); +static_assert(sizeof(struct ionic_dev_identify_comp) == 16); +static_assert(sizeof(struct ionic_dev_init_cmd) == 64); +static_assert(sizeof(struct ionic_dev_init_comp) == 16); +static_assert(sizeof(struct ionic_dev_reset_cmd) == 64); +static_assert(sizeof(struct ionic_dev_reset_comp) == 16); +static_assert(sizeof(struct ionic_dev_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_getattr_comp) == 16); +static_assert(sizeof(struct ionic_dev_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_dev_setattr_comp) == 16); + +/* Port commands */ +static_assert(sizeof(struct ionic_port_identify_cmd) == 64); +static_assert(sizeof(struct ionic_port_identify_comp) == 16); +static_assert(sizeof(struct ionic_port_init_cmd) == 64); +static_assert(sizeof(struct ionic_port_init_comp) == 16); +static_assert(sizeof(struct ionic_port_reset_cmd) == 64); +static_assert(sizeof(struct ionic_port_reset_comp) == 16); +static_assert(sizeof(struct ionic_port_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_getattr_comp) == 16); +static_assert(sizeof(struct ionic_port_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_port_setattr_comp) == 16); + +/* LIF commands */ +static_assert(sizeof(struct ionic_lif_init_cmd) == 64); +static_assert(sizeof(struct ionic_lif_init_comp) == 16); +static_assert(sizeof(struct ionic_lif_reset_cmd) == 64); +static_assert(sizeof(ionic_lif_reset_comp) == 16); +static_assert(sizeof(struct ionic_lif_getattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_getattr_comp) == 16); +static_assert(sizeof(struct ionic_lif_setattr_cmd) == 64); +static_assert(sizeof(struct ionic_lif_setattr_comp) == 16); + +static_assert(sizeof(struct ionic_q_init_cmd) == 64); +static_assert(sizeof(struct ionic_q_init_comp) == 16); +static_assert(sizeof(struct ionic_q_control_cmd) == 64); +static_assert(sizeof(ionic_q_control_comp) == 16); + +static_assert(sizeof(struct ionic_rx_mode_set_cmd) == 64); +static_assert(sizeof(ionic_rx_mode_set_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_add_cmd) == 64); +static_assert(sizeof(struct ionic_rx_filter_add_comp) == 16); +static_assert(sizeof(struct ionic_rx_filter_del_cmd) == 64); +static_assert(sizeof(ionic_rx_filter_del_comp) == 16); + +/* RDMA commands */ +static_assert(sizeof(struct ionic_rdma_reset_cmd) == 64); +static_assert(sizeof(struct ionic_rdma_queue_cmd) == 64); + +/* Events */ +static_assert(sizeof(struct ionic_notifyq_cmd) == 4); +static_assert(sizeof(union ionic_notifyq_comp) == 64); +static_assert(sizeof(struct ionic_notifyq_event) == 64); +static_assert(sizeof(struct ionic_link_change_event) == 64); +static_assert(sizeof(struct ionic_reset_event) == 64); +static_assert(sizeof(struct ionic_heartbeat_event) == 64); +static_assert(sizeof(struct ionic_log_event) == 64); + +/* I/O */ +static_assert(sizeof(struct ionic_txq_desc) == 16); +static_assert(sizeof(struct ionic_txq_sg_desc) == 128); +static_assert(sizeof(struct ionic_txq_comp) == 16); + +static_assert(sizeof(struct ionic_rxq_desc) == 16); +static_assert(sizeof(struct ionic_rxq_sg_desc) == 128); +static_assert(sizeof(struct ionic_rxq_comp) == 16); + +struct ionic_devinfo { + u8 asic_type; + u8 asic_rev; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN + 1]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN + 1]; +}; + +struct ionic_dev { + union ionic_dev_info_regs __iomem *dev_info_regs; + union ionic_dev_cmd_regs __iomem *dev_cmd_regs; + + u64 __iomem *db_pages; + dma_addr_t phy_db_pages; + + struct ionic_intr __iomem *intr_ctrl; + u64 __iomem *intr_status; + + u32 port_info_sz; + struct ionic_port_info *port_info; + dma_addr_t port_info_pa; + + struct ionic_devinfo dev_info; +}; + +struct ionic_cq_info { + void *cq_desc; + struct ionic_cq_info *next; + unsigned int index; + bool last; +}; + +struct ionic_queue; +struct ionic_qcq; +struct ionic_desc_info; + +typedef void (*ionic_desc_cb)(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg); + +struct ionic_desc_info { + void *desc; + void *sg_desc; + struct ionic_desc_info *next; + unsigned int index; + unsigned int left; + ionic_desc_cb cb; + void *cb_arg; +}; + +#define QUEUE_NAME_MAX_SZ 32 + +struct ionic_queue { + u64 dbell_count; + u64 drop; + u64 stop; + u64 wake; + struct ionic_lif *lif; + struct ionic_desc_info *info; + struct ionic_desc_info *tail; + struct ionic_desc_info *head; + struct ionic_dev *idev; + unsigned int index; + unsigned int type; + unsigned int hw_index; + unsigned int hw_type; + u64 dbval; + void *base; + void *sg_base; + dma_addr_t base_pa; + dma_addr_t sg_base_pa; + unsigned int num_descs; + unsigned int desc_size; + unsigned int sg_desc_size; + unsigned int pid; + char name[QUEUE_NAME_MAX_SZ]; +}; + +#define INTR_INDEX_NOT_ASSIGNED -1 +#define INTR_NAME_MAX_SZ 32 + +struct ionic_intr_info { + char name[INTR_NAME_MAX_SZ]; + unsigned int index; + unsigned int vector; + u64 rearm_count; + unsigned int cpu; + cpumask_t affinity_mask; +}; + +struct ionic_cq { + void *base; + dma_addr_t base_pa; + struct ionic_lif *lif; + struct ionic_cq_info *info; + struct ionic_cq_info *tail; + struct ionic_queue *bound_q; + struct ionic_intr_info *bound_intr; + bool done_color; + unsigned int num_descs; + u64 compl_count; + unsigned int desc_size; +}; + +struct ionic; + +static inline void ionic_intr_init(struct ionic_dev *idev, + struct ionic_intr_info *intr, + unsigned long index) +{ + ionic_intr_clean(idev->intr_ctrl, index); + intr->index = index; +} + +static inline unsigned int ionic_q_space_avail(struct ionic_queue *q) +{ + unsigned int avail = q->tail->index; + + if (q->head->index >= avail) + avail += q->head->left - 1; + else + avail -= q->head->index + 1; + + return avail; +} + +static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want) +{ + return ionic_q_space_avail(q) >= want; +} + +void ionic_init_devinfo(struct ionic *ionic); +int ionic_dev_setup(struct ionic *ionic); +void ionic_dev_teardown(struct ionic *ionic); + +void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd); +u8 ionic_dev_cmd_status(struct ionic_dev *idev); +bool ionic_dev_cmd_done(struct ionic_dev *idev); +void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp); + +void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver); +void ionic_dev_cmd_init(struct ionic_dev *idev); +void ionic_dev_cmd_reset(struct ionic_dev *idev); + +void ionic_dev_cmd_port_identify(struct ionic_dev *idev); +void ionic_dev_cmd_port_init(struct ionic_dev *idev); +void ionic_dev_cmd_port_reset(struct ionic_dev *idev); +void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state); +void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed); +void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable); +void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type); +void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type); + +void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver); +void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, + dma_addr_t addr); +void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index); +void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, + u16 lif_index, u16 intr_index); + +int ionic_db_page_num(struct ionic_lif *lif, int pid); + +int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, + struct ionic_intr_info *intr, + unsigned int num_descs, size_t desc_size); +void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); +void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); +typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +typedef void (*ionic_cq_done_cb)(void *done_arg); +unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, + ionic_cq_cb cb, ionic_cq_done_cb done_cb, + void *done_arg); + +int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, + struct ionic_queue *q, unsigned int index, const char *name, + unsigned int num_descs, size_t desc_size, + size_t sg_desc_size, unsigned int pid); +void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); +void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, + void *cb_arg); +void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); +void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, + unsigned int stop_index); + +#endif /* _IONIC_DEV_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c new file mode 100644 index 000000000000..af1647afa4e8 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_devlink.h" + +static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct ionic *ionic = devlink_priv(dl); + struct ionic_dev *idev = &ionic->idev; + char buf[16]; + int err = 0; + + err = devlink_info_driver_name_put(req, IONIC_DRV_NAME); + if (err) + goto info_out; + + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + idev->dev_info.fw_version); + if (err) + goto info_out; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_type); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, + buf); + if (err) + goto info_out; + + snprintf(buf, sizeof(buf), "0x%x", idev->dev_info.asic_rev); + err = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, + buf); + if (err) + goto info_out; + + err = devlink_info_serial_number_put(req, idev->dev_info.serial_num); + +info_out: + return err; +} + +static const struct devlink_ops ionic_dl_ops = { + .info_get = ionic_dl_info_get, +}; + +struct ionic *ionic_devlink_alloc(struct device *dev) +{ + struct devlink *dl; + + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic)); + + return devlink_priv(dl); +} + +void ionic_devlink_free(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_free(dl); +} + +int ionic_devlink_register(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + int err; + + err = devlink_register(dl, ionic->dev); + if (err) { + dev_warn(ionic->dev, "devlink_register failed: %d\n", err); + return err; + } + + devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + 0, false, 0, NULL, 0); + err = devlink_port_register(dl, &ionic->dl_port, 0); + if (err) + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); + else + devlink_port_type_eth_set(&ionic->dl_port, + ionic->master_lif->netdev); + + return err; +} + +void ionic_devlink_unregister(struct ionic *ionic) +{ + struct devlink *dl = priv_to_devlink(ionic); + + devlink_port_unregister(&ionic->dl_port); + devlink_unregister(dl); +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h new file mode 100644 index 000000000000..0690172fc57a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_DEVLINK_H_ +#define _IONIC_DEVLINK_H_ + +#include <net/devlink.h> + +struct ionic *ionic_devlink_alloc(struct device *dev); +void ionic_devlink_free(struct ionic *ionic); +int ionic_devlink_register(struct ionic *ionic); +void ionic_devlink_unregister(struct ionic *ionic); + +#endif /* _IONIC_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c new file mode 100644 index 000000000000..7d10265f782a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_ethtool.h" +#include "ionic_stats.h" + +static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define PRIV_F_SW_DBG_STATS BIT(0) + "sw-dbg-stats", +}; +#define PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings) + +static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) +{ + u32 i; + + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_strings(lif, &buf); +} + +static void ionic_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *buf) +{ + struct ionic_lif *lif; + u32 i; + + lif = netdev_priv(netdev); + + memset(buf, 0, stats->n_stats * sizeof(*buf)); + for (i = 0; i < ionic_num_stats_grps; i++) + ionic_stats_groups[i].get_values(lif, &buf); +} + +static int ionic_get_stats_count(struct ionic_lif *lif) +{ + int i, num_stats = 0; + + for (i = 0; i < ionic_num_stats_grps; i++) + num_stats += ionic_stats_groups[i].get_count(lif); + + return num_stats; +} + +static int ionic_get_sset_count(struct net_device *netdev, int sset) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int count = 0; + + switch (sset) { + case ETH_SS_STATS: + count = ionic_get_stats_count(lif); + break; + case ETH_SS_PRIV_FLAGS: + count = PRIV_FLAGS_COUNT; + break; + } + return count; +} + +static void ionic_get_strings(struct net_device *netdev, + u32 sset, u8 *buf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_STATS: + ionic_get_stats_strings(lif, buf); + break; + case ETH_SS_PRIV_FLAGS: + memcpy(buf, ionic_priv_flags_strings, + PRIV_FLAGS_COUNT * ETH_GSTRING_LEN); + break; + } +} + +static void ionic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + + strlcpy(drvinfo->driver, IONIC_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, IONIC_DRV_VERSION, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, ionic->idev.dev_info.fw_version, + sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, ionic_bus_info(ionic), + sizeof(drvinfo->bus_info)); +} + +static int ionic_get_regs_len(struct net_device *netdev) +{ + return (IONIC_DEV_INFO_REG_COUNT + IONIC_DEV_CMD_REG_COUNT) * sizeof(u32); +} + +static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int size; + + regs->version = IONIC_DEV_CMD_REG_VERSION; + + size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); + memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + + size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); + memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); +} + +static int ionic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + int copper_seen = 0; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + + /* The port_info data is found in a DMA space that the NIC keeps + * up-to-date, so there's no need to request the data from the + * NIC, we already have it in our memory space. + */ + + switch (le16_to_cpu(idev->port_info->status.xcvr.pid)) { + /* Copper */ + case IONIC_XCVR_PID_QSFP_100G_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_QSFP_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_SFP_25GBASE_CR_S: + case IONIC_XCVR_PID_SFP_25GBASE_CR_L: + case IONIC_XCVR_PID_SFP_25GBASE_CR_N: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + copper_seen++; + break; + case IONIC_XCVR_PID_SFP_10GBASE_AOC: + case IONIC_XCVR_PID_SFP_10GBASE_CU: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseCR_Full); + copper_seen++; + break; + + /* Fibre */ + case IONIC_XCVR_PID_QSFP_100G_SR4: + case IONIC_XCVR_PID_QSFP_100G_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case IONIC_XCVR_PID_QSFP_100G_ER4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case IONIC_XCVR_PID_QSFP_40GBASE_SR4: + case IONIC_XCVR_PID_QSFP_40GBASE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case IONIC_XCVR_PID_QSFP_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; + case IONIC_XCVR_PID_SFP_25GBASE_SR: + case IONIC_XCVR_PID_SFP_25GBASE_AOC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_LRM: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLRM_Full); + break; + case IONIC_XCVR_PID_SFP_10GBASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + break; + case IONIC_XCVR_PID_UNKNOWN: + /* This means there's no module plugged in */ + break; + default: + dev_info(lif->ionic->dev, "unknown xcvr type pid=%d / 0x%x\n", + idev->port_info->status.xcvr.pid, + idev->port_info->status.xcvr.pid); + break; + } + + bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_FC) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else if (idev->port_info->config.fec_type == IONIC_PORT_FEC_TYPE_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_COPPER || + copper_seen) + ks->base.port = PORT_DA; + else if (idev->port_info->status.xcvr.phy == IONIC_PHY_TYPE_FIBER) + ks->base.port = PORT_FIBRE; + else + ks->base.port = PORT_NONE; + + if (ks->base.port != PORT_NONE) { + ks->base.speed = le32_to_cpu(lif->info->status.link_speed); + + if (le16_to_cpu(lif->info->status.link_status)) + ks->base.duplex = DUPLEX_FULL; + else + ks->base.duplex = DUPLEX_UNKNOWN; + + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + + if (idev->port_info->config.an_enable) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + ks->base.autoneg = AUTONEG_ENABLE; + } + } + + return 0; +} + +static int ionic_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + struct ionic_dev *idev; + u32 req_rs, req_fc; + u8 fec_type; + int err = 0; + + idev = &lif->ionic->idev; + fec_type = IONIC_PORT_FEC_TYPE_NONE; + + /* set autoneg */ + if (ks->base.autoneg != idev->port_info->config.an_enable) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_autoneg(idev, ks->base.autoneg); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + /* set speed */ + if (ks->base.speed != le32_to_cpu(idev->port_info->config.speed)) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_speed(idev, ks->base.speed); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + /* set FEC */ + req_rs = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_RS); + req_fc = ethtool_link_ksettings_test_link_mode(ks, advertising, FEC_BASER); + if (req_rs && req_fc) { + netdev_info(netdev, "Only select one FEC mode at a time\n"); + return -EINVAL; + } else if (req_fc) { + fec_type = IONIC_PORT_FEC_TYPE_FC; + } else if (req_rs) { + fec_type = IONIC_PORT_FEC_TYPE_RS; + } else if (!(req_rs | req_fc)) { + fec_type = IONIC_PORT_FEC_TYPE_NONE; + } + + if (fec_type != idev->port_info->config.fec_type) { + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_fec(idev, fec_type); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + } + + return 0; +} + +static void ionic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u8 pause_type; + + pause->autoneg = 0; + + pause_type = lif->ionic->idev.port_info->config.pause_type; + if (pause_type) { + pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0; + pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0; + } +} + +static int ionic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + u32 requested_pause; + int err; + + if (pause->autoneg) + return -EOPNOTSUPP; + + /* change both at the same time */ + requested_pause = IONIC_PORT_PAUSE_TYPE_LINK; + if (pause->rx_pause) + requested_pause |= IONIC_PAUSE_F_RX; + if (pause->tx_pause) + requested_pause |= IONIC_PAUSE_F_TX; + + if (requested_pause == lif->ionic->idev.port_info->config.pause_type) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_pause(&lif->ionic->idev, requested_pause); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return err; + + return 0; +} + +static int ionic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + /* Tx uses Rx interrupt */ + coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs; + coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs; + + return 0; +} + +static int ionic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_identity *ident; + struct ionic_qcq *qcq; + unsigned int i; + u32 usecs; + u32 coal; + + if (coalesce->rx_max_coalesced_frames || + coalesce->rx_coalesce_usecs_irq || + coalesce->rx_max_coalesced_frames_irq || + coalesce->tx_max_coalesced_frames || + coalesce->tx_coalesce_usecs_irq || + coalesce->tx_max_coalesced_frames_irq || + coalesce->stats_block_coalesce_usecs || + coalesce->use_adaptive_rx_coalesce || + coalesce->use_adaptive_tx_coalesce || + coalesce->pkt_rate_low || + coalesce->rx_coalesce_usecs_low || + coalesce->rx_max_coalesced_frames_low || + coalesce->tx_coalesce_usecs_low || + coalesce->tx_max_coalesced_frames_low || + coalesce->pkt_rate_high || + coalesce->rx_coalesce_usecs_high || + coalesce->rx_max_coalesced_frames_high || + coalesce->tx_coalesce_usecs_high || + coalesce->tx_max_coalesced_frames_high || + coalesce->rate_sample_interval) + return -EINVAL; + + ident = &lif->ionic->ident; + if (ident->dev.intr_coal_div == 0) { + netdev_warn(netdev, "bad HW value in dev.intr_coal_div = %d\n", + ident->dev.intr_coal_div); + return -EIO; + } + + /* Tx uses Rx interrupt, so only change Rx */ + if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { + netdev_warn(netdev, "only the rx-usecs can be changed\n"); + return -EINVAL; + } + + coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); + + if (coal > IONIC_INTR_CTRL_COAL_MAX) + return -ERANGE; + + /* If they asked for non-zero and it resolved to zero, bump it up */ + if (!coal && coalesce->rx_coalesce_usecs) + coal = 1; + + /* Convert it back to get device resolution */ + usecs = ionic_coal_hw_to_usec(lif->ionic, coal); + + if (usecs != lif->rx_coalesce_usecs) { + lif->rx_coalesce_usecs = usecs; + + if (test_bit(IONIC_LIF_UP, lif->state)) { + for (i = 0; i < lif->nxqs; i++) { + qcq = lif->rxqcqs[i].qcq; + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + qcq->intr.index, coal); + } + } + } + + return 0; +} + +static void ionic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + ring->tx_max_pending = IONIC_MAX_TXRX_DESC; + ring->tx_pending = lif->ntxq_descs; + ring->rx_max_pending = IONIC_MAX_TXRX_DESC; + ring->rx_pending = lif->nrxq_descs; +} + +static int ionic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ionic_lif *lif = netdev_priv(netdev); + bool running; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) { + netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); + return -EINVAL; + } + + if (!is_power_of_2(ring->tx_pending) || + !is_power_of_2(ring->rx_pending)) { + netdev_info(netdev, "Descriptor count must be a power of 2\n"); + return -EINVAL; + } + + /* if nothing to do return success */ + if (ring->tx_pending == lif->ntxq_descs && + ring->rx_pending == lif->nrxq_descs) + return 0; + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = test_bit(IONIC_LIF_UP, lif->state); + if (running) + ionic_stop(netdev); + + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; + + if (running) + ionic_open(netdev); + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return 0; +} + +static void ionic_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + /* report maximum channels */ + ch->max_combined = lif->ionic->ntxqs_per_lif; + + /* report current channels */ + ch->combined_count = lif->nxqs; +} + +static int ionic_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct ionic_lif *lif = netdev_priv(netdev); + bool running; + + if (!ch->combined_count || ch->other_count || + ch->rx_count || ch->tx_count) + return -EINVAL; + + if (ch->combined_count == lif->nxqs) + return 0; + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = test_bit(IONIC_LIF_UP, lif->state); + if (running) + ionic_stop(netdev); + + lif->nxqs = ch->combined_count; + + if (running) + ionic_open(netdev); + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return 0; +} + +static u32 ionic_get_priv_flags(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 priv_flags = 0; + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) + priv_flags |= PRIV_F_SW_DBG_STATS; + + return priv_flags; +} + +static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct ionic_lif *lif = netdev_priv(netdev); + u32 flags = lif->flags; + + clear_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state); + if (priv_flags & PRIV_F_SW_DBG_STATS) + set_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state); + + if (flags != lif->flags) + lif->flags = flags; + + return 0; +} + +static int ionic_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = lif->nxqs; + break; + default: + netdev_err(netdev, "Command parameter %d is not supported\n", + info->cmd); + err = -EOPNOTSUPP; + } + + return err; +} + +static u32 ionic_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + return le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); +} + +static u32 ionic_get_rxfh_key_size(struct net_device *netdev) +{ + return IONIC_RSS_HASH_KEY_SIZE; +} + +static int ionic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ionic_lif *lif = netdev_priv(netdev); + unsigned int i, tbl_sz; + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + indir[i] = lif->rss_ind_tbl[i]; + } + + if (key) + memcpy(key, lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + err = ionic_lif_rss_config(lif, lif->rss_types, key, indir); + if (err) + return err; + + return 0; +} + +static int ionic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct ionic_lif *lif = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + lif->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_tunable(struct net_device *netdev, + const struct ethtool_tunable *tuna, void *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = lif->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ionic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) + +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + + xcvr = &idev->port_info->status.xcvr; + + /* report the module data type and length */ + switch (xcvr->sprom[0]) { + case 0x03: /* SFP */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + break; + case 0x0D: /* QSFP */ + case 0x11: /* QSFP28 */ + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + default: + netdev_info(netdev, "unknown xcvr type 0x%02x\n", + xcvr->sprom[0]); + break; + } + + return 0; +} + +static int ionic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_xcvr_status *xcvr; + char tbuf[sizeof(xcvr->sprom)]; + int count = 10; + u32 len; + + /* The NIC keeps the module prom up-to-date in the DMA space + * so we can simply copy the module bytes into the data buffer. + */ + xcvr = &idev->port_info->status.xcvr; + len = min_t(u32, sizeof(xcvr->sprom), ee->len); + + do { + memcpy(data, xcvr->sprom, len); + memcpy(tbuf, xcvr->sprom, len); + + /* Let's make sure we got a consistent copy */ + if (!memcmp(data, tbuf, len)) + break; + + } while (--count); + + if (!count) + return -ETIMEDOUT; + + return 0; +} + +static int ionic_nway_reset(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic *ionic = lif->ionic; + int err = 0; + + /* flap the link to force auto-negotiation */ + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_DOWN); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + if (!err) { + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +static const struct ethtool_ops ionic_ethtool_ops = { + .get_drvinfo = ionic_get_drvinfo, + .get_regs_len = ionic_get_regs_len, + .get_regs = ionic_get_regs, + .get_link = ethtool_op_get_link, + .get_link_ksettings = ionic_get_link_ksettings, + .get_coalesce = ionic_get_coalesce, + .set_coalesce = ionic_set_coalesce, + .get_ringparam = ionic_get_ringparam, + .set_ringparam = ionic_set_ringparam, + .get_channels = ionic_get_channels, + .set_channels = ionic_set_channels, + .get_strings = ionic_get_strings, + .get_ethtool_stats = ionic_get_stats, + .get_sset_count = ionic_get_sset_count, + .get_priv_flags = ionic_get_priv_flags, + .set_priv_flags = ionic_set_priv_flags, + .get_rxnfc = ionic_get_rxnfc, + .get_rxfh_indir_size = ionic_get_rxfh_indir_size, + .get_rxfh_key_size = ionic_get_rxfh_key_size, + .get_rxfh = ionic_get_rxfh, + .set_rxfh = ionic_set_rxfh, + .get_tunable = ionic_get_tunable, + .set_tunable = ionic_set_tunable, + .get_module_info = ionic_get_module_info, + .get_module_eeprom = ionic_get_module_eeprom, + .get_pauseparam = ionic_get_pauseparam, + .set_pauseparam = ionic_set_pauseparam, + .set_link_ksettings = ionic_set_link_ksettings, + .nway_reset = ionic_nway_reset, +}; + +void ionic_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &ionic_ethtool_ops; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h new file mode 100644 index 000000000000..38b91b1d70ae --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_ETHTOOL_H_ +#define _IONIC_ETHTOOL_H_ + +void ionic_ethtool_set_ops(struct net_device *netdev); + +#endif /* _IONIC_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h new file mode 100644 index 000000000000..5bfdda19f64d --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -0,0 +1,2482 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* Copyright (c) 2017-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef _IONIC_IF_H_ +#define _IONIC_IF_H_ + +#pragma pack(push, 1) + +#define IONIC_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */ +#define IONIC_DEV_INFO_VERSION 1 +#define IONIC_IFNAMSIZ 16 + +/** + * Commands + */ +enum ionic_cmd_opcode { + IONIC_CMD_NOP = 0, + + /* Device commands */ + IONIC_CMD_IDENTIFY = 1, + IONIC_CMD_INIT = 2, + IONIC_CMD_RESET = 3, + IONIC_CMD_GETATTR = 4, + IONIC_CMD_SETATTR = 5, + + /* Port commands */ + IONIC_CMD_PORT_IDENTIFY = 10, + IONIC_CMD_PORT_INIT = 11, + IONIC_CMD_PORT_RESET = 12, + IONIC_CMD_PORT_GETATTR = 13, + IONIC_CMD_PORT_SETATTR = 14, + + /* LIF commands */ + IONIC_CMD_LIF_IDENTIFY = 20, + IONIC_CMD_LIF_INIT = 21, + IONIC_CMD_LIF_RESET = 22, + IONIC_CMD_LIF_GETATTR = 23, + IONIC_CMD_LIF_SETATTR = 24, + + IONIC_CMD_RX_MODE_SET = 30, + IONIC_CMD_RX_FILTER_ADD = 31, + IONIC_CMD_RX_FILTER_DEL = 32, + + /* Queue commands */ + IONIC_CMD_Q_INIT = 40, + IONIC_CMD_Q_CONTROL = 41, + + /* RDMA commands */ + IONIC_CMD_RDMA_RESET_LIF = 50, + IONIC_CMD_RDMA_CREATE_EQ = 51, + IONIC_CMD_RDMA_CREATE_CQ = 52, + IONIC_CMD_RDMA_CREATE_ADMINQ = 53, + + /* QoS commands */ + IONIC_CMD_QOS_CLASS_IDENTIFY = 240, + IONIC_CMD_QOS_CLASS_INIT = 241, + IONIC_CMD_QOS_CLASS_RESET = 242, + + /* Firmware commands */ + IONIC_CMD_FW_DOWNLOAD = 254, + IONIC_CMD_FW_CONTROL = 255, +}; + +/** + * Command Return codes + */ +enum ionic_status_code { + IONIC_RC_SUCCESS = 0, /* Success */ + IONIC_RC_EVERSION = 1, /* Incorrect version for request */ + IONIC_RC_EOPCODE = 2, /* Invalid cmd opcode */ + IONIC_RC_EIO = 3, /* I/O error */ + IONIC_RC_EPERM = 4, /* Permission denied */ + IONIC_RC_EQID = 5, /* Bad qid */ + IONIC_RC_EQTYPE = 6, /* Bad qtype */ + IONIC_RC_ENOENT = 7, /* No such element */ + IONIC_RC_EINTR = 8, /* operation interrupted */ + IONIC_RC_EAGAIN = 9, /* Try again */ + IONIC_RC_ENOMEM = 10, /* Out of memory */ + IONIC_RC_EFAULT = 11, /* Bad address */ + IONIC_RC_EBUSY = 12, /* Device or resource busy */ + IONIC_RC_EEXIST = 13, /* object already exists */ + IONIC_RC_EINVAL = 14, /* Invalid argument */ + IONIC_RC_ENOSPC = 15, /* No space left or alloc failure */ + IONIC_RC_ERANGE = 16, /* Parameter out of range */ + IONIC_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */ + IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ + IONIC_RC_ENOSUPP = 19, /* Operation not supported */ + IONIC_RC_ERROR = 29, /* Generic error */ + + IONIC_RC_ERDMA = 30, /* Generic RDMA error */ +}; + +enum ionic_notifyq_opcode { + IONIC_EVENT_LINK_CHANGE = 1, + IONIC_EVENT_RESET = 2, + IONIC_EVENT_HEARTBEAT = 3, + IONIC_EVENT_LOG = 4, +}; + +/** + * struct cmd - General admin command format + * @opcode: Opcode for the command + * @lif_index: LIF index + * @cmd_data: Opcode-specific command bytes + */ +struct ionic_admin_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 cmd_data[60]; +}; + +/** + * struct admin_comp - General admin command completion format + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @cmd_data: Command-specific bytes. + * @color: Color bit. (Always 0 for commands issued to the + * Device Cmd Registers.) + */ +struct ionic_admin_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 cmd_data[11]; + u8 color; +#define IONIC_COMP_COLOR_MASK 0x80 +}; + +static inline u8 color_match(u8 color, u8 done_color) +{ + return (!!(color & IONIC_COMP_COLOR_MASK)) == done_color; +} + +/** + * struct nop_cmd - NOP command + * @opcode: opcode + */ +struct ionic_nop_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct nop_comp - NOP command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_nop_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct dev_init_cmd - Device init command + * @opcode: opcode + * @type: device type + */ +struct ionic_dev_init_cmd { + u8 opcode; + u8 type; + u8 rsvd[62]; +}; + +/** + * struct init_comp - Device init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct dev_reset_cmd - Device reset command + * @opcode: opcode + */ +struct ionic_dev_reset_cmd { + u8 opcode; + u8 rsvd[63]; +}; + +/** + * struct reset_comp - Reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_dev_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +#define IONIC_IDENTITY_VERSION_1 1 + +/** + * struct dev_identify_cmd - Driver/device identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + */ +struct ionic_dev_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct dev_identify_comp - Driver/device identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_dev_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +enum ionic_os_type { + IONIC_OS_TYPE_LINUX = 1, + IONIC_OS_TYPE_WIN = 2, + IONIC_OS_TYPE_DPDK = 3, + IONIC_OS_TYPE_FREEBSD = 4, + IONIC_OS_TYPE_IPXE = 5, + IONIC_OS_TYPE_ESXI = 6, +}; + +/** + * union drv_identity - driver identity information + * @os_type: OS type (see enum os_type) + * @os_dist: OS distribution, numeric format + * @os_dist_str: OS distribution, string format + * @kernel_ver: Kernel version, numeric format + * @kernel_ver_str: Kernel version, string format + * @driver_ver_str: Driver version, string format + */ +union ionic_drv_identity { + struct { + __le32 os_type; + __le32 os_dist; + char os_dist_str[128]; + __le32 kernel_ver; + char kernel_ver_str[32]; + char driver_ver_str[32]; + }; + __le32 words[512]; +}; + +/** + * union dev_identity - device identity information + * @version: Version of device identify + * @type: Identify type (0 for now) + * @nports: Number of ports provisioned + * @nlifs: Number of LIFs provisioned + * @nintrs: Number of interrupts provisioned + * @ndbpgs_per_lif: Number of doorbell pages per LIF + * @intr_coal_mult: Interrupt coalescing multiplication factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * @intr_coal_div: Interrupt coalescing division factor. + * Scale user-supplied interrupt coalescing + * value in usecs to device units using: + * device units = usecs * mult / div + * + */ +union ionic_dev_identity { + struct { + u8 version; + u8 type; + u8 rsvd[2]; + u8 nports; + u8 rsvd2[3]; + __le32 nlifs; + __le32 nintrs; + __le32 ndbpgs_per_lif; + __le32 intr_coal_mult; + __le32 intr_coal_div; + }; + __le32 words[512]; +}; + +enum ionic_lif_type { + IONIC_LIF_TYPE_CLASSIC = 0, + IONIC_LIF_TYPE_MACVLAN = 1, + IONIC_LIF_TYPE_NETQUEUE = 2, +}; + +/** + * struct lif_identify_cmd - lif identify command + * @opcode: opcode + * @type: lif type (enum lif_type) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_cmd { + u8 opcode; + u8 type; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct lif_identify_comp - lif identify command completion + * @status: status of the command (enum status_code) + * @ver: version of identify returned by device + */ +struct ionic_lif_identify_comp { + u8 status; + u8 ver; + u8 rsvd2[14]; +}; + +enum ionic_lif_capability { + IONIC_LIF_CAP_ETH = BIT(0), + IONIC_LIF_CAP_RDMA = BIT(1), +}; + +/** + * Logical Queue Types + */ +enum ionic_logical_qtype { + IONIC_QTYPE_ADMINQ = 0, + IONIC_QTYPE_NOTIFYQ = 1, + IONIC_QTYPE_RXQ = 2, + IONIC_QTYPE_TXQ = 3, + IONIC_QTYPE_EQ = 4, + IONIC_QTYPE_MAX = 16, +}; + +/** + * struct lif_logical_qtype - Descriptor of logical to hardware queue type. + * @qtype: Hardware Queue Type. + * @qid_count: Number of Queue IDs of the logical type. + * @qid_base: Minimum Queue ID of the logical type. + */ +struct ionic_lif_logical_qtype { + u8 qtype; + u8 rsvd[3]; + __le32 qid_count; + __le32 qid_base; +}; + +enum ionic_lif_state { + IONIC_LIF_DISABLE = 0, + IONIC_LIF_ENABLE = 1, + IONIC_LIF_HANG_RESET = 2, +}; + +/** + * LIF configuration + * @state: lif state (enum lif_state) + * @name: lif name + * @mtu: mtu + * @mac: station mac address + * @features: features (enum eth_hw_features) + * @queue_count: queue counts per queue-type + */ +union ionic_lif_config { + struct { + u8 state; + u8 rsvd[3]; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + u8 rsvd2[2]; + __le64 features; + __le32 queue_count[IONIC_QTYPE_MAX]; + }; + __le32 words[64]; +}; + +/** + * struct lif_identity - lif identity information (type-specific) + * + * @capabilities LIF capabilities + * + * Ethernet: + * @version: Ethernet identify structure version. + * @features: Ethernet features supported on this lif type. + * @max_ucast_filters: Number of perfect unicast addresses supported. + * @max_mcast_filters: Number of perfect multicast addresses supported. + * @min_frame_size: Minimum size of frames to be sent + * @max_frame_size: Maximim size of frames to be sent + * @config: LIF config struct with features, mtu, mac, q counts + * + * RDMA: + * @version: RDMA version of opcodes and queue descriptors. + * @qp_opcodes: Number of rdma queue pair opcodes supported. + * @admin_opcodes: Number of rdma admin opcodes supported. + * @npts_per_lif: Page table size per lif + * @nmrs_per_lif: Number of memory regions per lif + * @nahs_per_lif: Number of address handles per lif + * @max_stride: Max work request stride. + * @cl_stride: Cache line stride. + * @pte_stride: Page table entry stride. + * @rrq_stride: Remote RQ work request stride. + * @rsq_stride: Remote SQ work request stride. + * @dcqcn_profiles: Number of DCQCN profiles + * @aq_qtype: RDMA Admin Qtype. + * @sq_qtype: RDMA Send Qtype. + * @rq_qtype: RDMA Receive Qtype. + * @cq_qtype: RDMA Completion Qtype. + * @eq_qtype: RDMA Event Qtype. + */ +union ionic_lif_identity { + struct { + __le64 capabilities; + + struct { + u8 version; + u8 rsvd[3]; + __le32 max_ucast_filters; + __le32 max_mcast_filters; + __le16 rss_ind_tbl_sz; + __le32 min_frame_size; + __le32 max_frame_size; + u8 rsvd2[106]; + union ionic_lif_config config; + } eth; + + struct { + u8 version; + u8 qp_opcodes; + u8 admin_opcodes; + u8 rsvd; + __le32 npts_per_lif; + __le32 nmrs_per_lif; + __le32 nahs_per_lif; + u8 max_stride; + u8 cl_stride; + u8 pte_stride; + u8 rrq_stride; + u8 rsq_stride; + u8 dcqcn_profiles; + u8 rsvd_dimensions[10]; + struct ionic_lif_logical_qtype aq_qtype; + struct ionic_lif_logical_qtype sq_qtype; + struct ionic_lif_logical_qtype rq_qtype; + struct ionic_lif_logical_qtype cq_qtype; + struct ionic_lif_logical_qtype eq_qtype; + } rdma; + }; + __le32 words[512]; +}; + +/** + * struct lif_init_cmd - LIF init command + * @opcode: opcode + * @type: LIF type (enum lif_type) + * @index: LIF index + * @info_pa: destination address for lif info (struct lif_info) + */ +struct ionic_lif_init_cmd { + u8 opcode; + u8 type; + __le16 index; + __le32 rsvd; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct lif_init_comp - LIF init command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_lif_init_comp { + u8 status; + u8 rsvd; + __le16 hw_index; + u8 rsvd2[12]; +}; + +/** + * struct q_init_cmd - Queue init command + * @opcode: opcode + * @type: Logical queue type + * @ver: Queue version (defines opcode/descriptor scope) + * @lif_index: LIF index + * @index: (lif, qtype) relative admin queue index + * @intr_index: Interrupt control register index + * @pid: Process ID + * @flags: + * IRQ: Interrupt requested on completion + * ENA: Enable the queue. If ENA=0 the queue is initialized + * but remains disabled, to be later enabled with the + * Queue Enable command. If ENA=1, then queue is + * initialized and then enabled. + * SG: Enable Scatter-Gather on the queue. + * in number of descs. The actual ring size is + * (1 << ring_size). For example, to + * select a ring size of 64 descriptors write + * ring_size = 6. The minimum ring_size value is 2 + * for a ring size of 4 descriptors. The maximum + * ring_size value is 16 for a ring size of 64k + * descriptors. Values of ring_size <2 and >16 are + * reserved. + * EQ: Enable the Event Queue + * @cos: Class of service for this queue. + * @ring_size: Queue ring size, encoded as a log2(size) + * @ring_base: Queue ring base address + * @cq_ring_base: Completion queue ring base address + * @sg_ring_base: Scatter/Gather ring base address + * @eq_index: Event queue index + */ +struct ionic_q_init_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 type; + u8 ver; + u8 rsvd1[2]; + __le32 index; + __le16 pid; + __le16 intr_index; + __le16 flags; +#define IONIC_QINIT_F_IRQ 0x01 /* Request interrupt on completion */ +#define IONIC_QINIT_F_ENA 0x02 /* Enable the queue */ +#define IONIC_QINIT_F_SG 0x04 /* Enable scatter/gather on the queue */ +#define IONIC_QINIT_F_EQ 0x08 /* Enable event queue */ +#define IONIC_QINIT_F_DEBUG 0x80 /* Enable queue debugging */ + u8 cos; + u8 ring_size; + __le64 ring_base; + __le64 cq_ring_base; + __le64 sg_ring_base; + __le32 eq_index; + u8 rsvd2[16]; +}; + +/** + * struct q_init_comp - Queue init command completion + * @status: The status of the command (enum status_code) + * @ver: Queue version (defines opcode/descriptor scope) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @hw_index: Hardware Queue ID + * @hw_type: Hardware Queue type + * @color: Color + */ +struct ionic_q_init_comp { + u8 status; + u8 ver; + __le16 comp_index; + __le32 hw_index; + u8 hw_type; + u8 rsvd2[6]; + u8 color; +}; + +/* the device's internal addressing uses up to 52 bits */ +#define IONIC_ADDR_LEN 52 +#define IONIC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) + +enum ionic_txq_desc_opcode { + IONIC_TXQ_DESC_OPCODE_CSUM_NONE = 0, + IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL = 1, + IONIC_TXQ_DESC_OPCODE_CSUM_HW = 2, + IONIC_TXQ_DESC_OPCODE_TSO = 3, +}; + +/** + * struct txq_desc - Ethernet Tx queue descriptor format + * @opcode: Tx operation, see TXQ_DESC_OPCODE_*: + * + * IONIC_TXQ_DESC_OPCODE_CSUM_NONE: + * + * Non-offload send. No segmentation, + * fragmentation or checksum calc/insertion is + * performed by device; packet is prepared + * to send by software stack and requires + * no further manipulation from device. + * + * IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL: + * + * Offload 16-bit L4 checksum + * calculation/insertion. The device will + * calculate the L4 checksum value and + * insert the result in the packet's L4 + * header checksum field. The L4 checksum + * is calculated starting at @csum_start bytes + * into the packet to the end of the packet. + * The checksum insertion position is given + * in @csum_offset. This feature is only + * applicable to protocols such as TCP, UDP + * and ICMP where a standard (i.e. the + * 'IP-style' checksum) one's complement + * 16-bit checksum is used, using an IP + * pseudo-header to seed the calculation. + * Software will preload the L4 checksum + * field with the IP pseudo-header checksum. + * + * For tunnel encapsulation, @csum_start and + * @csum_offset refer to the inner L4 + * header. Supported tunnels encapsulations + * are: IPIP, GRE, and UDP. If the @encap + * is clear, no further processing by the + * device is required; software will + * calculate the outer header checksums. If + * the @encap is set, the device will + * offload the outer header checksums using + * LCO (local checksum offload) (see + * Documentation/networking/checksum- + * offloads.txt for more info). + * + * IONIC_TXQ_DESC_OPCODE_CSUM_HW: + * + * Offload 16-bit checksum computation to hardware. + * If @csum_l3 is set then the packet's L3 checksum is + * updated. Similarly, if @csum_l4 is set the the L4 + * checksum is updated. If @encap is set then encap header + * checksums are also updated. + * + * IONIC_TXQ_DESC_OPCODE_TSO: + * + * Device preforms TCP segmentation offload + * (TSO). @hdr_len is the number of bytes + * to the end of TCP header (the offset to + * the TCP payload). @mss is the desired + * MSS, the TCP payload length for each + * segment. The device will calculate/ + * insert IP (IPv4 only) and TCP checksums + * for each segment. In the first data + * buffer containing the header template, + * the driver will set IPv4 checksum to 0 + * and preload TCP checksum with the IP + * pseudo header calculated with IP length = 0. + * + * Supported tunnel encapsulations are IPIP, + * layer-3 GRE, and UDP. @hdr_len includes + * both outer and inner headers. The driver + * will set IPv4 checksum to zero and + * preload TCP checksum with IP pseudo + * header on the inner header. + * + * TCP ECN offload is supported. The device + * will set CWR flag in the first segment if + * CWR is set in the template header, and + * clear CWR in remaining segments. + * @flags: + * vlan: + * Insert an L2 VLAN header using @vlan_tci. + * encap: + * Calculate encap header checksum. + * csum_l3: + * Compute L3 header checksum. + * csum_l4: + * Compute L4 header checksum. + * tso_sot: + * TSO start + * tso_eot: + * TSO end + * @num_sg_elems: Number of scatter-gather elements in SG + * descriptor + * @addr: First data buffer's DMA address. + * (Subsequent data buffers are on txq_sg_desc). + * @len: First data buffer's length, in bytes + * @vlan_tci: VLAN tag to insert in the packet (if requested + * by @V-bit). Includes .1p and .1q tags + * @hdr_len: Length of packet headers, including + * encapsulating outer header, if applicable. + * Valid for opcodes TXQ_DESC_OPCODE_CALC_CSUM and + * TXQ_DESC_OPCODE_TSO. Should be set to zero for + * all other modes. For + * TXQ_DESC_OPCODE_CALC_CSUM, @hdr_len is length + * of headers up to inner-most L4 header. For + * TXQ_DESC_OPCODE_TSO, @hdr_len is up to + * inner-most L4 payload, so inclusive of + * inner-most L4 header. + * @mss: Desired MSS value for TSO. Only applicable for + * TXQ_DESC_OPCODE_TSO. + * @csum_start: Offset into inner-most L3 header of checksum + * @csum_offset: Offset into inner-most L4 header of checksum + */ + +#define IONIC_TXQ_DESC_OPCODE_MASK 0xf +#define IONIC_TXQ_DESC_OPCODE_SHIFT 4 +#define IONIC_TXQ_DESC_FLAGS_MASK 0xf +#define IONIC_TXQ_DESC_FLAGS_SHIFT 0 +#define IONIC_TXQ_DESC_NSGE_MASK 0xf +#define IONIC_TXQ_DESC_NSGE_SHIFT 8 +#define IONIC_TXQ_DESC_ADDR_MASK (BIT_ULL(IONIC_ADDR_LEN) - 1) +#define IONIC_TXQ_DESC_ADDR_SHIFT 12 + +/* common flags */ +#define IONIC_TXQ_DESC_FLAG_VLAN 0x1 +#define IONIC_TXQ_DESC_FLAG_ENCAP 0x2 + +/* flags for csum_hw opcode */ +#define IONIC_TXQ_DESC_FLAG_CSUM_L3 0x4 +#define IONIC_TXQ_DESC_FLAG_CSUM_L4 0x8 + +/* flags for tso opcode */ +#define IONIC_TXQ_DESC_FLAG_TSO_SOT 0x4 +#define IONIC_TXQ_DESC_FLAG_TSO_EOT 0x8 + +struct ionic_txq_desc { + __le64 cmd; + __le16 len; + union { + __le16 vlan_tci; + __le16 hword0; + }; + union { + __le16 csum_start; + __le16 hdr_len; + __le16 hword1; + }; + union { + __le16 csum_offset; + __le16 mss; + __le16 hword2; + }; +}; + +static inline u64 encode_txq_desc_cmd(u8 opcode, u8 flags, + u8 nsge, u64 addr) +{ + u64 cmd; + + cmd = (opcode & IONIC_TXQ_DESC_OPCODE_MASK) << IONIC_TXQ_DESC_OPCODE_SHIFT; + cmd |= (flags & IONIC_TXQ_DESC_FLAGS_MASK) << IONIC_TXQ_DESC_FLAGS_SHIFT; + cmd |= (nsge & IONIC_TXQ_DESC_NSGE_MASK) << IONIC_TXQ_DESC_NSGE_SHIFT; + cmd |= (addr & IONIC_TXQ_DESC_ADDR_MASK) << IONIC_TXQ_DESC_ADDR_SHIFT; + + return cmd; +}; + +static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, + u8 *nsge, u64 *addr) +{ + *opcode = (cmd >> IONIC_TXQ_DESC_OPCODE_SHIFT) & IONIC_TXQ_DESC_OPCODE_MASK; + *flags = (cmd >> IONIC_TXQ_DESC_FLAGS_SHIFT) & IONIC_TXQ_DESC_FLAGS_MASK; + *nsge = (cmd >> IONIC_TXQ_DESC_NSGE_SHIFT) & IONIC_TXQ_DESC_NSGE_MASK; + *addr = (cmd >> IONIC_TXQ_DESC_ADDR_SHIFT) & IONIC_TXQ_DESC_ADDR_MASK; +}; + +#define IONIC_TX_MAX_SG_ELEMS 8 +#define IONIC_RX_MAX_SG_ELEMS 8 + +/** + * struct txq_sg_desc - Transmit scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_txq_sg_desc { + struct ionic_txq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_TX_MAX_SG_ELEMS]; +}; + +/** + * struct txq_comp - Ethernet transmit queue completion descriptor + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @color: Color bit. + */ +struct ionic_txq_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 rsvd2[11]; + u8 color; +}; + +enum ionic_rxq_desc_opcode { + IONIC_RXQ_DESC_OPCODE_SIMPLE = 0, + IONIC_RXQ_DESC_OPCODE_SG = 1, +}; + +/** + * struct rxq_desc - Ethernet Rx queue descriptor format + * @opcode: Rx operation, see RXQ_DESC_OPCODE_*: + * + * RXQ_DESC_OPCODE_SIMPLE: + * + * Receive full packet into data buffer + * starting at @addr. Results of + * receive, including actual bytes received, + * are recorded in Rx completion descriptor. + * + * @len: Data buffer's length, in bytes. + * @addr: Data buffer's DMA address + */ +struct ionic_rxq_desc { + u8 opcode; + u8 rsvd[5]; + __le16 len; + __le64 addr; +}; + +/** + * struct rxq_sg_desc - Receive scatter-gather (SG) list + * @addr: DMA address of SG element data buffer + * @len: Length of SG element data buffer, in bytes + */ +struct ionic_rxq_sg_desc { + struct ionic_rxq_sg_elem { + __le64 addr; + __le16 len; + __le16 rsvd[3]; + } elems[IONIC_RX_MAX_SG_ELEMS]; +}; + +/** + * struct rxq_comp - Ethernet receive queue completion descriptor + * @status: The status of the command (enum status_code) + * @num_sg_elems: Number of SG elements used by this descriptor + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @rss_hash: 32-bit RSS hash + * @csum: 16-bit sum of the packet's L2 payload. + * If the packet's L2 payload is odd length, an extra + * zero-value byte is included in the @csum calculation but + * not included in @len. + * @vlan_tci: VLAN tag stripped from the packet. Valid if @VLAN is + * set. Includes .1p and .1q tags. + * @len: Received packet length, in bytes. Excludes FCS. + * @csum_calc L2 payload checksum is computed or not + * @csum_tcp_ok: The TCP checksum calculated by the device + * matched the checksum in the receive packet's + * TCP header + * @csum_tcp_bad: The TCP checksum calculated by the device did + * not match the checksum in the receive packet's + * TCP header. + * @csum_udp_ok: The UDP checksum calculated by the device + * matched the checksum in the receive packet's + * UDP header + * @csum_udp_bad: The UDP checksum calculated by the device did + * not match the checksum in the receive packet's + * UDP header. + * @csum_ip_ok: The IPv4 checksum calculated by the device + * matched the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for the both IPv4 headers. + * @csum_ip_bad: The IPv4 checksum calculated by the device did + * not match the checksum in the receive packet's + * first IPv4 header. If the receive packet + * contains both a tunnel IPv4 header and a + * transport IPv4 header, the device validates the + * checksum for both IP headers. + * @VLAN: VLAN header was stripped and placed in @vlan_tci. + * @pkt_type: Packet type + * @color: Color bit. + */ +struct ionic_rxq_comp { + u8 status; + u8 num_sg_elems; + __le16 comp_index; + __le32 rss_hash; + __le16 csum; + __le16 vlan_tci; + __le16 len; + u8 csum_flags; +#define IONIC_RXQ_COMP_CSUM_F_TCP_OK 0x01 +#define IONIC_RXQ_COMP_CSUM_F_TCP_BAD 0x02 +#define IONIC_RXQ_COMP_CSUM_F_UDP_OK 0x04 +#define IONIC_RXQ_COMP_CSUM_F_UDP_BAD 0x08 +#define IONIC_RXQ_COMP_CSUM_F_IP_OK 0x10 +#define IONIC_RXQ_COMP_CSUM_F_IP_BAD 0x20 +#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40 +#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80 + u8 pkt_type_color; +#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f +}; + +enum ionic_pkt_type { + IONIC_PKT_TYPE_NON_IP = 0x000, + IONIC_PKT_TYPE_IPV4 = 0x001, + IONIC_PKT_TYPE_IPV4_TCP = 0x003, + IONIC_PKT_TYPE_IPV4_UDP = 0x005, + IONIC_PKT_TYPE_IPV6 = 0x008, + IONIC_PKT_TYPE_IPV6_TCP = 0x018, + IONIC_PKT_TYPE_IPV6_UDP = 0x028, +}; + +enum ionic_eth_hw_features { + IONIC_ETH_HW_VLAN_TX_TAG = BIT(0), + IONIC_ETH_HW_VLAN_RX_STRIP = BIT(1), + IONIC_ETH_HW_VLAN_RX_FILTER = BIT(2), + IONIC_ETH_HW_RX_HASH = BIT(3), + IONIC_ETH_HW_RX_CSUM = BIT(4), + IONIC_ETH_HW_TX_SG = BIT(5), + IONIC_ETH_HW_RX_SG = BIT(6), + IONIC_ETH_HW_TX_CSUM = BIT(7), + IONIC_ETH_HW_TSO = BIT(8), + IONIC_ETH_HW_TSO_IPV6 = BIT(9), + IONIC_ETH_HW_TSO_ECN = BIT(10), + IONIC_ETH_HW_TSO_GRE = BIT(11), + IONIC_ETH_HW_TSO_GRE_CSUM = BIT(12), + IONIC_ETH_HW_TSO_IPXIP4 = BIT(13), + IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), + IONIC_ETH_HW_TSO_UDP = BIT(15), + IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), +}; + +/** + * struct q_control_cmd - Queue control command + * @opcode: opcode + * @type: Queue type + * @lif_index: LIF index + * @index: Queue index + * @oper: Operation (enum q_control_oper) + */ +struct ionic_q_control_cmd { + u8 opcode; + u8 type; + __le16 lif_index; + __le32 index; + u8 oper; + u8 rsvd[55]; +}; + +typedef struct ionic_admin_comp ionic_q_control_comp; + +enum q_control_oper { + IONIC_Q_DISABLE = 0, + IONIC_Q_ENABLE = 1, + IONIC_Q_HANG_RESET = 2, +}; + +/** + * Physical connection type + */ +enum ionic_phy_type { + IONIC_PHY_TYPE_NONE = 0, + IONIC_PHY_TYPE_COPPER = 1, + IONIC_PHY_TYPE_FIBER = 2, +}; + +/** + * Transceiver status + */ +enum ionic_xcvr_state { + IONIC_XCVR_STATE_REMOVED = 0, + IONIC_XCVR_STATE_INSERTED = 1, + IONIC_XCVR_STATE_PENDING = 2, + IONIC_XCVR_STATE_SPROM_READ = 3, + IONIC_XCVR_STATE_SPROM_READ_ERR = 4, +}; + +/** + * Supported link modes + */ +enum ionic_xcvr_pid { + IONIC_XCVR_PID_UNKNOWN = 0, + + /* CU */ + IONIC_XCVR_PID_QSFP_100G_CR4 = 1, + IONIC_XCVR_PID_QSFP_40GBASE_CR4 = 2, + IONIC_XCVR_PID_SFP_25GBASE_CR_S = 3, + IONIC_XCVR_PID_SFP_25GBASE_CR_L = 4, + IONIC_XCVR_PID_SFP_25GBASE_CR_N = 5, + + /* Fiber */ + IONIC_XCVR_PID_QSFP_100G_AOC = 50, + IONIC_XCVR_PID_QSFP_100G_ACC = 51, + IONIC_XCVR_PID_QSFP_100G_SR4 = 52, + IONIC_XCVR_PID_QSFP_100G_LR4 = 53, + IONIC_XCVR_PID_QSFP_100G_ER4 = 54, + IONIC_XCVR_PID_QSFP_40GBASE_ER4 = 55, + IONIC_XCVR_PID_QSFP_40GBASE_SR4 = 56, + IONIC_XCVR_PID_QSFP_40GBASE_LR4 = 57, + IONIC_XCVR_PID_QSFP_40GBASE_AOC = 58, + IONIC_XCVR_PID_SFP_25GBASE_SR = 59, + IONIC_XCVR_PID_SFP_25GBASE_LR = 60, + IONIC_XCVR_PID_SFP_25GBASE_ER = 61, + IONIC_XCVR_PID_SFP_25GBASE_AOC = 62, + IONIC_XCVR_PID_SFP_10GBASE_SR = 63, + IONIC_XCVR_PID_SFP_10GBASE_LR = 64, + IONIC_XCVR_PID_SFP_10GBASE_LRM = 65, + IONIC_XCVR_PID_SFP_10GBASE_ER = 66, + IONIC_XCVR_PID_SFP_10GBASE_AOC = 67, + IONIC_XCVR_PID_SFP_10GBASE_CU = 68, + IONIC_XCVR_PID_QSFP_100G_CWDM4 = 69, + IONIC_XCVR_PID_QSFP_100G_PSM4 = 70, +}; + +/** + * Port types + */ +enum ionic_port_type { + IONIC_PORT_TYPE_NONE = 0, /* port type not configured */ + IONIC_PORT_TYPE_ETH = 1, /* port carries ethernet traffic (inband) */ + IONIC_PORT_TYPE_MGMT = 2, /* port carries mgmt traffic (out-of-band) */ +}; + +/** + * Port config state + */ +enum ionic_port_admin_state { + IONIC_PORT_ADMIN_STATE_NONE = 0, /* port admin state not configured */ + IONIC_PORT_ADMIN_STATE_DOWN = 1, /* port is admin disabled */ + IONIC_PORT_ADMIN_STATE_UP = 2, /* port is admin enabled */ +}; + +/** + * Port operational status + */ +enum ionic_port_oper_status { + IONIC_PORT_OPER_STATUS_NONE = 0, /* port is disabled */ + IONIC_PORT_OPER_STATUS_UP = 1, /* port is linked up */ + IONIC_PORT_OPER_STATUS_DOWN = 2, /* port link status is down */ +}; + +/** + * Ethernet Forward error correction (fec) modes + */ +enum ionic_port_fec_type { + IONIC_PORT_FEC_TYPE_NONE = 0, /* Disabled */ + IONIC_PORT_FEC_TYPE_FC = 1, /* FireCode */ + IONIC_PORT_FEC_TYPE_RS = 2, /* ReedSolomon */ +}; + +/** + * Ethernet pause (flow control) modes + */ +enum ionic_port_pause_type { + IONIC_PORT_PAUSE_TYPE_NONE = 0, /* Disable Pause */ + IONIC_PORT_PAUSE_TYPE_LINK = 1, /* Link level pause */ + IONIC_PORT_PAUSE_TYPE_PFC = 2, /* Priority-Flow control */ +}; + +/** + * Loopback modes + */ +enum ionic_port_loopback_mode { + IONIC_PORT_LOOPBACK_MODE_NONE = 0, /* Disable loopback */ + IONIC_PORT_LOOPBACK_MODE_MAC = 1, /* MAC loopback */ + IONIC_PORT_LOOPBACK_MODE_PHY = 2, /* PHY/Serdes loopback */ +}; + +/** + * Transceiver Status information + * @state: Transceiver status (enum xcvr_state) + * @phy: Physical connection type (enum phy_type) + * @pid: Transceiver link mode (enum pid) + * @sprom: Transceiver sprom contents + */ +struct ionic_xcvr_status { + u8 state; + u8 phy; + __le16 pid; + u8 sprom[256]; +}; + +/** + * Port configuration + * @speed: port speed (in Mbps) + * @mtu: mtu + * @state: port admin state (enum port_admin_state) + * @an_enable: autoneg enable + * @fec_type: fec type (enum port_fec_type) + * @pause_type: pause type (enum port_pause_type) + * @loopback_mode: loopback mode (enum port_loopback_mode) + */ +union ionic_port_config { + struct { +#define IONIC_SPEED_100G 100000 /* 100G in Mbps */ +#define IONIC_SPEED_50G 50000 /* 50G in Mbps */ +#define IONIC_SPEED_40G 40000 /* 40G in Mbps */ +#define IONIC_SPEED_25G 25000 /* 25G in Mbps */ +#define IONIC_SPEED_10G 10000 /* 10G in Mbps */ +#define IONIC_SPEED_1G 1000 /* 1G in Mbps */ + __le32 speed; + __le32 mtu; + u8 state; + u8 an_enable; + u8 fec_type; +#define IONIC_PAUSE_TYPE_MASK 0x0f +#define IONIC_PAUSE_FLAGS_MASK 0xf0 +#define IONIC_PAUSE_F_TX 0x10 +#define IONIC_PAUSE_F_RX 0x20 + u8 pause_type; + u8 loopback_mode; + }; + __le32 words[64]; +}; + +/** + * Port Status information + * @status: link status (enum port_oper_status) + * @id: port id + * @speed: link speed (in Mbps) + * @xcvr: tranceiver status + */ +struct ionic_port_status { + __le32 id; + __le32 speed; + u8 status; + u8 rsvd[51]; + struct ionic_xcvr_status xcvr; +}; + +/** + * struct port_identify_cmd - Port identify command + * @opcode: opcode + * @index: port index + * @ver: Highest version of identify supported by driver + */ +struct ionic_port_identify_cmd { + u8 opcode; + u8 index; + u8 ver; + u8 rsvd[61]; +}; + +/** + * struct port_identify_comp - Port identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_port_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +/** + * struct port_init_cmd - Port initialization command + * @opcode: opcode + * @index: port index + * @info_pa: destination address for port info (struct port_info) + */ +struct ionic_port_init_cmd { + u8 opcode; + u8 index; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd2[48]; +}; + +/** + * struct port_init_comp - Port initialization command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_init_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * struct port_reset_cmd - Port reset command + * @opcode: opcode + * @index: port index + */ +struct ionic_port_reset_cmd { + u8 opcode; + u8 index; + u8 rsvd[62]; +}; + +/** + * struct port_reset_comp - Port reset command completion + * @status: The status of the command (enum status_code) + */ +struct ionic_port_reset_comp { + u8 status; + u8 rsvd[15]; +}; + +/** + * enum stats_ctl_cmd - List of commands for stats control + */ +enum ionic_stats_ctl_cmd { + IONIC_STATS_CTL_RESET = 0, +}; + + +/** + * enum ionic_port_attr - List of device attributes + */ +enum ionic_port_attr { + IONIC_PORT_ATTR_STATE = 0, + IONIC_PORT_ATTR_SPEED = 1, + IONIC_PORT_ATTR_MTU = 2, + IONIC_PORT_ATTR_AUTONEG = 3, + IONIC_PORT_ATTR_FEC = 4, + IONIC_PORT_ATTR_PAUSE = 5, + IONIC_PORT_ATTR_LOOPBACK = 6, + IONIC_PORT_ATTR_STATS_CTRL = 7, +}; + +/** + * struct port_setattr_cmd - Set port attributes on the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_setattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 stats_ctl; + u8 rsvd2[60]; + }; +}; + +/** + * struct port_setattr_comp - Port set attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_setattr_comp { + u8 status; + u8 rsvd[14]; + u8 color; +}; + +/** + * struct port_getattr_cmd - Get port attributes from the NIC + * @opcode: Opcode + * @index: port index + * @attr: Attribute type (enum ionic_port_attr) + */ +struct ionic_port_getattr_cmd { + u8 opcode; + u8 index; + u8 attr; + u8 rsvd[61]; +}; + +/** + * struct port_getattr_comp - Port get attr command completion + * @status: The status of the command (enum status_code) + * @color: Color bit + */ +struct ionic_port_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + u8 state; + __le32 speed; + __le32 mtu; + u8 an_enable; + u8 fec_type; + u8 pause_type; + u8 loopback_mode; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct lif_status - Lif status register + * @eid: most recent NotifyQ event id + * @port_num: port the lif is connected to + * @link_status: port status (enum port_oper_status) + * @link_speed: speed of link in Mbps + * @link_down_count: number of times link status changes + */ +struct ionic_lif_status { + __le64 eid; + u8 port_num; + u8 rsvd; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: eg 10000 = 10Gbps */ + __le16 link_down_count; + u8 rsvd2[46]; +}; + +/** + * struct lif_reset_cmd - LIF reset command + * @opcode: opcode + * @index: LIF index + */ +struct ionic_lif_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 index; + __le32 rsvd2[15]; +}; + +typedef struct ionic_admin_comp ionic_lif_reset_comp; + +enum ionic_dev_state { + IONIC_DEV_DISABLE = 0, + IONIC_DEV_ENABLE = 1, + IONIC_DEV_HANG_RESET = 2, +}; + +/** + * enum dev_attr - List of device attributes + */ +enum ionic_dev_attr { + IONIC_DEV_ATTR_STATE = 0, + IONIC_DEV_ATTR_NAME = 1, + IONIC_DEV_ATTR_FEATURES = 2, +}; + +/** + * struct dev_setattr_cmd - Set Device attributes on the NIC + * @opcode: Opcode + * @attr: Attribute type (enum dev_attr) + * @state: Device state (enum dev_state) + * @name: The bus info, e.g. PCI slot-device-function, 0 terminated + * @features: Device features + */ +struct ionic_dev_setattr_cmd { + u8 opcode; + u8 attr; + __le16 rsvd; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le64 features; + u8 rsvd2[60]; + }; +}; + +/** + * struct dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_setattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct dev_getattr_cmd - Get Device attributes from the NIC + * @opcode: opcode + * @attr: Attribute type (enum dev_attr) + */ +struct ionic_dev_getattr_cmd { + u8 opcode; + u8 attr; + u8 rsvd[62]; +}; + +/** + * struct dev_setattr_comp - Device set attr command completion + * @status: The status of the command (enum status_code) + * @features: Device features + * @color: Color bit + */ +struct ionic_dev_getattr_comp { + u8 status; + u8 rsvd[3]; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * RSS parameters + */ +#define IONIC_RSS_HASH_KEY_SIZE 40 + +enum ionic_rss_hash_types { + IONIC_RSS_TYPE_IPV4 = BIT(0), + IONIC_RSS_TYPE_IPV4_TCP = BIT(1), + IONIC_RSS_TYPE_IPV4_UDP = BIT(2), + IONIC_RSS_TYPE_IPV6 = BIT(3), + IONIC_RSS_TYPE_IPV6_TCP = BIT(4), + IONIC_RSS_TYPE_IPV6_UDP = BIT(5), +}; + +/** + * enum lif_attr - List of LIF attributes + */ +enum ionic_lif_attr { + IONIC_LIF_ATTR_STATE = 0, + IONIC_LIF_ATTR_NAME = 1, + IONIC_LIF_ATTR_MTU = 2, + IONIC_LIF_ATTR_MAC = 3, + IONIC_LIF_ATTR_FEATURES = 4, + IONIC_LIF_ATTR_RSS = 5, + IONIC_LIF_ATTR_STATS_CTRL = 6, +}; + +/** + * struct lif_setattr_cmd - Set LIF attributes on the NIC + * @opcode: Opcode + * @type: Attribute type (enum lif_attr) + * @index: LIF index + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum eth_hw_features) + * @rss: RSS properties + * @types: The hash types to enable (see rss_hash_types). + * @key: The hash secret key. + * @addr: Address for the indirection table shared memory. + * @stats_ctl: stats control commands (enum stats_ctl_cmd) + */ +struct ionic_lif_setattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + union { + u8 state; + char name[IONIC_IFNAMSIZ]; + __le32 mtu; + u8 mac[6]; + __le64 features; + struct { + __le16 types; + u8 key[IONIC_RSS_HASH_KEY_SIZE]; + u8 rsvd[6]; + __le64 addr; + } rss; + u8 stats_ctl; + u8 rsvd[60]; + }; +}; + +/** + * struct lif_setattr_comp - LIF set attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @features: features (enum eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_setattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +/** + * struct lif_getattr_cmd - Get LIF attributes from the NIC + * @opcode: Opcode + * @attr: Attribute type (enum lif_attr) + * @index: LIF index + */ +struct ionic_lif_getattr_cmd { + u8 opcode; + u8 attr; + __le16 index; + u8 rsvd[60]; +}; + +/** + * struct lif_getattr_comp - LIF get attr command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @state: lif state (enum lif_state) + * @name: The netdev name string, 0 terminated + * @mtu: Mtu + * @mac: Station mac + * @features: Features (enum eth_hw_features) + * @color: Color bit + */ +struct ionic_lif_getattr_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + union { + u8 state; + __le32 mtu; + u8 mac[6]; + __le64 features; + u8 rsvd2[11]; + }; + u8 color; +}; + +enum ionic_rx_mode { + IONIC_RX_MODE_F_UNICAST = BIT(0), + IONIC_RX_MODE_F_MULTICAST = BIT(1), + IONIC_RX_MODE_F_BROADCAST = BIT(2), + IONIC_RX_MODE_F_PROMISC = BIT(3), + IONIC_RX_MODE_F_ALLMULTI = BIT(4), +}; + +/** + * struct rx_mode_set_cmd - Set LIF's Rx mode command + * @opcode: opcode + * @lif_index: LIF index + * @rx_mode: Rx mode flags: + * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets. + * IONIC_RX_MODE_F_MULTICAST: Accept known multicast packets. + * IONIC_RX_MODE_F_BROADCAST: Accept broadcast packets. + * IONIC_RX_MODE_F_PROMISC: Accept any packets. + * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets. + */ +struct ionic_rx_mode_set_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le16 rx_mode; + __le16 rsvd2[29]; +}; + +typedef struct ionic_admin_comp ionic_rx_mode_set_comp; + +enum ionic_rx_filter_match_type { + IONIC_RX_FILTER_MATCH_VLAN = 0, + IONIC_RX_FILTER_MATCH_MAC, + IONIC_RX_FILTER_MATCH_MAC_VLAN, +}; + +/** + * struct rx_filter_add_cmd - Add LIF Rx filter command + * @opcode: opcode + * @qtype: Queue type + * @lif_index: LIF index + * @qid: Queue ID + * @match: Rx filter match type. (See IONIC_RX_FILTER_MATCH_xxx) + * @vlan: VLAN ID + * @addr: MAC address (network-byte order) + */ +struct ionic_rx_filter_add_cmd { + u8 opcode; + u8 qtype; + __le16 lif_index; + __le32 qid; + __le16 match; + union { + struct { + __le16 vlan; + } vlan; + struct { + u8 addr[6]; + } mac; + struct { + __le16 vlan; + u8 addr[6]; + } mac_vlan; + u8 rsvd[54]; + }; +}; + +/** + * struct rx_filter_add_comp - Add LIF Rx filter command completion + * @status: The status of the command (enum status_code) + * @comp_index: The index in the descriptor ring for which this + * is the completion. + * @filter_id: Filter ID + * @color: Color bit. + */ +struct ionic_rx_filter_add_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + __le32 filter_id; + u8 rsvd2[7]; + u8 color; +}; + +/** + * struct rx_filter_del_cmd - Delete LIF Rx filter command + * @opcode: opcode + * @lif_index: LIF index + * @filter_id: Filter ID + */ +struct ionic_rx_filter_del_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 filter_id; + u8 rsvd2[56]; +}; + +typedef struct ionic_admin_comp ionic_rx_filter_del_comp; + +/** + * struct qos_identify_cmd - QoS identify command + * @opcode: opcode + * @ver: Highest version of identify supported by driver + * + */ +struct ionic_qos_identify_cmd { + u8 opcode; + u8 ver; + u8 rsvd[62]; +}; + +/** + * struct qos_identify_comp - QoS identify command completion + * @status: The status of the command (enum status_code) + * @ver: Version of identify returned by device + */ +struct ionic_qos_identify_comp { + u8 status; + u8 ver; + u8 rsvd[14]; +}; + +#define IONIC_QOS_CLASS_MAX 7 +#define IONIC_QOS_CLASS_NAME_SZ 32 +#define IONIC_QOS_DSCP_MAX_VALUES 64 + +/** + * enum qos_class + */ +enum ionic_qos_class { + IONIC_QOS_CLASS_DEFAULT = 0, + IONIC_QOS_CLASS_USER_DEFINED_1 = 1, + IONIC_QOS_CLASS_USER_DEFINED_2 = 2, + IONIC_QOS_CLASS_USER_DEFINED_3 = 3, + IONIC_QOS_CLASS_USER_DEFINED_4 = 4, + IONIC_QOS_CLASS_USER_DEFINED_5 = 5, + IONIC_QOS_CLASS_USER_DEFINED_6 = 6, +}; + +/** + * enum qos_class_type - Traffic classification criteria + */ +enum ionic_qos_class_type { + IONIC_QOS_CLASS_TYPE_NONE = 0, + IONIC_QOS_CLASS_TYPE_PCP = 1, /* Dot1Q pcp */ + IONIC_QOS_CLASS_TYPE_DSCP = 2, /* IP dscp */ +}; + +/** + * enum qos_sched_type - Qos class scheduling type + */ +enum ionic_qos_sched_type { + IONIC_QOS_SCHED_TYPE_STRICT = 0, /* Strict priority */ + IONIC_QOS_SCHED_TYPE_DWRR = 1, /* Deficit weighted round-robin */ +}; + +/** + * union qos_config - Qos configuration structure + * @flags: Configuration flags + * IONIC_QOS_CONFIG_F_ENABLE enable + * IONIC_QOS_CONFIG_F_DROP drop/nodrop + * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite + * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * @sched_type: Qos class scheduling type (enum qos_sched_type) + * @class_type: Qos class type (enum qos_class_type) + * @pause_type: Qos pause type (enum qos_pause_type) + * @name: Qos class name + * @mtu: MTU of the class + * @pfc_dot1q_pcp: Pcp value for pause frames (valid iff F_NODROP) + * @dwrr_weight: Qos class scheduling weight + * @strict_rlmt: Rate limit for strict priority scheduling + * @rw_dot1q_pcp: Rewrite dot1q pcp to this value (valid iff F_RW_DOT1Q_PCP) + * @rw_ip_dscp: Rewrite ip dscp to this value (valid iff F_RW_IP_DSCP) + * @dot1q_pcp: Dot1q pcp value + * @ndscp: Number of valid dscp values in the ip_dscp field + * @ip_dscp: IP dscp values + */ +union ionic_qos_config { + struct { +#define IONIC_QOS_CONFIG_F_ENABLE BIT(0) +#define IONIC_QOS_CONFIG_F_DROP BIT(1) +#define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) +#define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) + u8 flags; + u8 sched_type; + u8 class_type; + u8 pause_type; + char name[IONIC_QOS_CLASS_NAME_SZ]; + __le32 mtu; + /* flow control */ + u8 pfc_cos; + /* scheduler */ + union { + u8 dwrr_weight; + __le64 strict_rlmt; + }; + /* marking */ + union { + u8 rw_dot1q_pcp; + u8 rw_ip_dscp; + }; + /* classification */ + union { + u8 dot1q_pcp; + struct { + u8 ndscp; + u8 ip_dscp[IONIC_QOS_DSCP_MAX_VALUES]; + }; + }; + }; + __le32 words[64]; +}; + +/** + * union qos_identity - QoS identity structure + * @version: Version of the identify structure + * @type: QoS system type + * @nclasses: Number of usable QoS classes + * @config: Current configuration of classes + */ +union ionic_qos_identity { + struct { + u8 version; + u8 type; + u8 rsvd[62]; + union ionic_qos_config config[IONIC_QOS_CLASS_MAX]; + }; + __le32 words[512]; +}; + +/** + * struct qos_init_cmd - QoS config init command + * @opcode: Opcode + * @group: Qos class id + * @info_pa: destination address for qos info + */ +struct ionic_qos_init_cmd { + u8 opcode; + u8 group; + u8 rsvd[6]; + __le64 info_pa; + u8 rsvd1[48]; +}; + +typedef struct ionic_admin_comp ionic_qos_init_comp; + +/** + * struct qos_reset_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_reset_cmd { + u8 opcode; + u8 group; + u8 rsvd[62]; +}; + +typedef struct ionic_admin_comp ionic_qos_reset_comp; + +/** + * struct fw_download_cmd - Firmware download command + * @opcode: opcode + * @addr: dma address of the firmware buffer + * @offset: offset of the firmware buffer within the full image + * @length: number of valid bytes in the firmware buffer + */ +struct ionic_fw_download_cmd { + u8 opcode; + u8 rsvd[3]; + __le32 offset; + __le64 addr; + __le32 length; +}; + +typedef struct ionic_admin_comp ionic_fw_download_comp; + +enum ionic_fw_control_oper { + IONIC_FW_RESET = 0, /* Reset firmware */ + IONIC_FW_INSTALL = 1, /* Install firmware */ + IONIC_FW_ACTIVATE = 2, /* Activate firmware */ +}; + +/** + * struct fw_control_cmd - Firmware control command + * @opcode: opcode + * @oper: firmware control operation (enum fw_control_oper) + * @slot: slot to activate + */ +struct ionic_fw_control_cmd { + u8 opcode; + u8 rsvd[3]; + u8 oper; + u8 slot; + u8 rsvd1[58]; +}; + +/** + * struct fw_control_comp - Firmware control copletion + * @opcode: opcode + * @slot: slot where the firmware was installed + */ +struct ionic_fw_control_comp { + u8 status; + u8 rsvd; + __le16 comp_index; + u8 slot; + u8 rsvd1[10]; + u8 color; +}; + +/****************************************************************** + ******************* RDMA Commands ******************************** + ******************************************************************/ + +/** + * struct rdma_reset_cmd - Reset RDMA LIF cmd + * @opcode: opcode + * @lif_index: lif index + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct admin_comp. Only the status is indicated. Nonzero status + * means the LIF does not support rdma. + **/ +struct ionic_rdma_reset_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + u8 rsvd2[60]; +}; + +/** + * struct rdma_queue_cmd - Create RDMA Queue command + * @opcode: opcode, 52, 53 + * @lif_index lif index + * @qid_ver: (qid | (rdma version << 24)) + * @cid: intr, eq_id, or cq_id + * @dbid: doorbell page id + * @depth_log2: log base two of queue depth + * @stride_log2: log base two of queue stride + * @dma_addr: address of the queue memory + * @xxx_table_index: temporary, but should not need pgtbl for contig. queues. + * + * The same command struct is used to create an rdma event queue, completion + * queue, or rdma admin queue. The cid is an interrupt number for an event + * queue, an event queue id for a completion queue, or a completion queue id + * for an rdma admin queue. + * + * The queue created via a dev command must be contiguous in dma space. + * + * The dev commands are intended only to be used during driver initialization, + * to create queues supporting the rdma admin queue. Other queues, and other + * types of rdma resources like memory regions, will be created and registered + * via the rdma admin queue, and will support a more complete interface + * providing scatter gather lists for larger, scattered queue buffers and + * memory registration. + * + * There is no rdma specific dev command completion struct. Completion uses + * the common struct admin_comp. Only the status is indicated. + **/ +struct ionic_rdma_queue_cmd { + u8 opcode; + u8 rsvd; + __le16 lif_index; + __le32 qid_ver; + __le32 cid; + __le16 dbid; + u8 depth_log2; + u8 stride_log2; + __le64 dma_addr; + u8 rsvd2[36]; + __le32 xxx_table_index; +}; + +/****************************************************************** + ******************* Notify Events ******************************** + ******************************************************************/ + +/** + * struct notifyq_event + * @eid: event number + * @ecode: event code + * @data: unspecified data about the event + * + * This is the generic event report struct from which the other + * actual events will be formed. + */ +struct ionic_notifyq_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct link_change_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LINK_CHANGE + * @link_status: link up or down, with error bits (enum port_status) + * @link_speed: speed of the network link + * + * Sent when the network link state changes between UP and DOWN + */ +struct ionic_link_change_event { + __le64 eid; + __le16 ecode; + __le16 link_status; + __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */ + u8 rsvd[48]; +}; + +/** + * struct reset_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_RESET + * @reset_code: reset type + * @state: 0=pending, 1=complete, 2=error + * + * Sent when the NIC or some subsystem is going to be or + * has been reset. + */ +struct ionic_reset_event { + __le64 eid; + __le16 ecode; + u8 reset_code; + u8 state; + u8 rsvd[52]; +}; + +/** + * struct heartbeat_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_HEARTBEAT + * + * Sent periodically by the NIC to indicate continued health + */ +struct ionic_heartbeat_event { + __le64 eid; + __le16 ecode; + u8 rsvd[54]; +}; + +/** + * struct log_event + * @eid: event number + * @ecode: event code = EVENT_OPCODE_LOG + * @data: log data + * + * Sent to notify the driver of an internal error. + */ +struct ionic_log_event { + __le64 eid; + __le16 ecode; + u8 data[54]; +}; + +/** + * struct port_stats + */ +struct ionic_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length; + __le64 frames_rx_undersized; + __le64 frames_rx_oversized; + __le64 frames_rx_fragments; + __le64 frames_rx_jabber; + __le64 frames_rx_pripause; + __le64 frames_rx_stomped_crc; + __le64 frames_rx_too_long; + __le64 frames_rx_vlan_good; + __le64 frames_rx_dropped; + __le64 frames_rx_less_than_64b; + __le64 frames_rx_64b; + __le64 frames_rx_65b_127b; + __le64 frames_rx_128b_255b; + __le64 frames_rx_256b_511b; + __le64 frames_rx_512b_1023b; + __le64 frames_rx_1024b_1518b; + __le64 frames_rx_1519b_2047b; + __le64 frames_rx_2048b_4095b; + __le64 frames_rx_4096b_8191b; + __le64 frames_rx_8192b_9215b; + __le64 frames_rx_other; + __le64 frames_tx_ok; + __le64 frames_tx_all; + __le64 frames_tx_bad; + __le64 octets_tx_ok; + __le64 octets_tx_total; + __le64 frames_tx_unicast; + __le64 frames_tx_multicast; + __le64 frames_tx_broadcast; + __le64 frames_tx_pause; + __le64 frames_tx_pripause; + __le64 frames_tx_vlan; + __le64 frames_tx_less_than_64b; + __le64 frames_tx_64b; + __le64 frames_tx_65b_127b; + __le64 frames_tx_128b_255b; + __le64 frames_tx_256b_511b; + __le64 frames_tx_512b_1023b; + __le64 frames_tx_1024b_1518b; + __le64 frames_tx_1519b_2047b; + __le64 frames_tx_2048b_4095b; + __le64 frames_tx_4096b_8191b; + __le64 frames_tx_8192b_9215b; + __le64 frames_tx_other; + __le64 frames_tx_pri_0; + __le64 frames_tx_pri_1; + __le64 frames_tx_pri_2; + __le64 frames_tx_pri_3; + __le64 frames_tx_pri_4; + __le64 frames_tx_pri_5; + __le64 frames_tx_pri_6; + __le64 frames_tx_pri_7; + __le64 frames_rx_pri_0; + __le64 frames_rx_pri_1; + __le64 frames_rx_pri_2; + __le64 frames_rx_pri_3; + __le64 frames_rx_pri_4; + __le64 frames_rx_pri_5; + __le64 frames_rx_pri_6; + __le64 frames_rx_pri_7; + __le64 tx_pripause_0_1us_count; + __le64 tx_pripause_1_1us_count; + __le64 tx_pripause_2_1us_count; + __le64 tx_pripause_3_1us_count; + __le64 tx_pripause_4_1us_count; + __le64 tx_pripause_5_1us_count; + __le64 tx_pripause_6_1us_count; + __le64 tx_pripause_7_1us_count; + __le64 rx_pripause_0_1us_count; + __le64 rx_pripause_1_1us_count; + __le64 rx_pripause_2_1us_count; + __le64 rx_pripause_3_1us_count; + __le64 rx_pripause_4_1us_count; + __le64 rx_pripause_5_1us_count; + __le64 rx_pripause_6_1us_count; + __le64 rx_pripause_7_1us_count; + __le64 rx_pause_1us_count; + __le64 frames_tx_truncated; +}; + +struct ionic_mgmt_port_stats { + __le64 frames_rx_ok; + __le64 frames_rx_all; + __le64 frames_rx_bad_fcs; + __le64 frames_rx_bad_all; + __le64 octets_rx_ok; + __le64 octets_rx_all; + __le64 frames_rx_unicast; + __le64 frames_rx_multicast; + __le64 frames_rx_broadcast; + __le64 frames_rx_pause; + __le64 frames_rx_bad_length0; + __le64 frames_rx_undersized1; + __le64 frames_rx_oversized2; + __le64 frames_rx_fragments3; + __le64 frames_rx_jabber4; + __le64 frames_rx_64b5; + __le64 frames_rx_65b_127b6; + __le64 frames_rx_128b_255b7; + __le64 frames_rx_256b_511b8; + __le64 frames_rx_512b_1023b9; + __le64 frames_rx_1024b_1518b0; + __le64 frames_rx_gt_1518b1; + __le64 frames_rx_fifo_full2; + __le64 frames_tx_ok3; + __le64 frames_tx_all4; + __le64 frames_tx_bad5; + __le64 octets_tx_ok6; + __le64 octets_tx_total7; + __le64 frames_tx_unicast8; + __le64 frames_tx_multicast9; + __le64 frames_tx_broadcast0; + __le64 frames_tx_pause1; +}; + +/** + * struct port_identity - port identity structure + * @version: identity structure version + * @type: type of port (enum port_type) + * @num_lanes: number of lanes for the port + * @autoneg: autoneg supported + * @min_frame_size: minimum frame size supported + * @max_frame_size: maximum frame size supported + * @fec_type: supported fec types + * @pause_type: supported pause types + * @loopback_mode: supported loopback mode + * @speeds: supported speeds + * @config: current port configuration + */ +union ionic_port_identity { + struct { + u8 version; + u8 type; + u8 num_lanes; + u8 autoneg; + __le32 min_frame_size; + __le32 max_frame_size; + u8 fec_type[4]; + u8 pause_type[2]; + u8 loopback_mode[2]; + __le32 speeds[16]; + u8 rsvd2[44]; + union ionic_port_config config; + }; + __le32 words[512]; +}; + +/** + * struct port_info - port info structure + * @port_status: port status + * @port_stats: port stats + */ +struct ionic_port_info { + union ionic_port_config config; + struct ionic_port_status status; + struct ionic_port_stats stats; +}; + +/** + * struct lif_stats + */ +struct ionic_lif_stats { + /* RX */ + __le64 rx_ucast_bytes; + __le64 rx_ucast_packets; + __le64 rx_mcast_bytes; + __le64 rx_mcast_packets; + __le64 rx_bcast_bytes; + __le64 rx_bcast_packets; + __le64 rsvd0; + __le64 rsvd1; + /* RX drops */ + __le64 rx_ucast_drop_bytes; + __le64 rx_ucast_drop_packets; + __le64 rx_mcast_drop_bytes; + __le64 rx_mcast_drop_packets; + __le64 rx_bcast_drop_bytes; + __le64 rx_bcast_drop_packets; + __le64 rx_dma_error; + __le64 rsvd2; + /* TX */ + __le64 tx_ucast_bytes; + __le64 tx_ucast_packets; + __le64 tx_mcast_bytes; + __le64 tx_mcast_packets; + __le64 tx_bcast_bytes; + __le64 tx_bcast_packets; + __le64 rsvd3; + __le64 rsvd4; + /* TX drops */ + __le64 tx_ucast_drop_bytes; + __le64 tx_ucast_drop_packets; + __le64 tx_mcast_drop_bytes; + __le64 tx_mcast_drop_packets; + __le64 tx_bcast_drop_bytes; + __le64 tx_bcast_drop_packets; + __le64 tx_dma_error; + __le64 rsvd5; + /* Rx Queue/Ring drops */ + __le64 rx_queue_disabled; + __le64 rx_queue_empty; + __le64 rx_queue_error; + __le64 rx_desc_fetch_error; + __le64 rx_desc_data_error; + __le64 rsvd6; + __le64 rsvd7; + __le64 rsvd8; + /* Tx Queue/Ring drops */ + __le64 tx_queue_disabled; + __le64 tx_queue_error; + __le64 tx_desc_fetch_error; + __le64 tx_desc_data_error; + __le64 rsvd9; + __le64 rsvd10; + __le64 rsvd11; + __le64 rsvd12; + + /* RDMA/ROCE TX */ + __le64 tx_rdma_ucast_bytes; + __le64 tx_rdma_ucast_packets; + __le64 tx_rdma_mcast_bytes; + __le64 tx_rdma_mcast_packets; + __le64 tx_rdma_cnp_packets; + __le64 rsvd13; + __le64 rsvd14; + __le64 rsvd15; + + /* RDMA/ROCE RX */ + __le64 rx_rdma_ucast_bytes; + __le64 rx_rdma_ucast_packets; + __le64 rx_rdma_mcast_bytes; + __le64 rx_rdma_mcast_packets; + __le64 rx_rdma_cnp_packets; + __le64 rx_rdma_ecn_packets; + __le64 rsvd16; + __le64 rsvd17; + + __le64 rsvd18; + __le64 rsvd19; + __le64 rsvd20; + __le64 rsvd21; + __le64 rsvd22; + __le64 rsvd23; + __le64 rsvd24; + __le64 rsvd25; + + __le64 rsvd26; + __le64 rsvd27; + __le64 rsvd28; + __le64 rsvd29; + __le64 rsvd30; + __le64 rsvd31; + __le64 rsvd32; + __le64 rsvd33; + + __le64 rsvd34; + __le64 rsvd35; + __le64 rsvd36; + __le64 rsvd37; + __le64 rsvd38; + __le64 rsvd39; + __le64 rsvd40; + __le64 rsvd41; + + __le64 rsvd42; + __le64 rsvd43; + __le64 rsvd44; + __le64 rsvd45; + __le64 rsvd46; + __le64 rsvd47; + __le64 rsvd48; + __le64 rsvd49; + + /* RDMA/ROCE REQ Error/Debugs (768 - 895) */ + __le64 rdma_req_rx_pkt_seq_err; + __le64 rdma_req_rx_rnr_retry_err; + __le64 rdma_req_rx_remote_access_err; + __le64 rdma_req_rx_remote_inv_req_err; + __le64 rdma_req_rx_remote_oper_err; + __le64 rdma_req_rx_implied_nak_seq_err; + __le64 rdma_req_rx_cqe_err; + __le64 rdma_req_rx_cqe_flush_err; + + __le64 rdma_req_rx_dup_responses; + __le64 rdma_req_rx_invalid_packets; + __le64 rdma_req_tx_local_access_err; + __le64 rdma_req_tx_local_oper_err; + __le64 rdma_req_tx_memory_mgmt_err; + __le64 rsvd52; + __le64 rsvd53; + __le64 rsvd54; + + /* RDMA/ROCE RESP Error/Debugs (896 - 1023) */ + __le64 rdma_resp_rx_dup_requests; + __le64 rdma_resp_rx_out_of_buffer; + __le64 rdma_resp_rx_out_of_seq_pkts; + __le64 rdma_resp_rx_cqe_err; + __le64 rdma_resp_rx_cqe_flush_err; + __le64 rdma_resp_rx_local_len_err; + __le64 rdma_resp_rx_inv_request_err; + __le64 rdma_resp_rx_local_qp_oper_err; + + __le64 rdma_resp_rx_out_of_atomic_resource; + __le64 rdma_resp_tx_pkt_seq_err; + __le64 rdma_resp_tx_remote_inv_req_err; + __le64 rdma_resp_tx_remote_access_err; + __le64 rdma_resp_tx_remote_oper_err; + __le64 rdma_resp_tx_rnr_retry_err; + __le64 rsvd57; + __le64 rsvd58; +}; + +/** + * struct lif_info - lif info structure + */ +struct ionic_lif_info { + union ionic_lif_config config; + struct ionic_lif_status status; + struct ionic_lif_stats stats; +}; + +union ionic_dev_cmd { + u32 words[16]; + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + + struct ionic_dev_identify_cmd identify; + struct ionic_dev_init_cmd init; + struct ionic_dev_reset_cmd reset; + struct ionic_dev_getattr_cmd getattr; + struct ionic_dev_setattr_cmd setattr; + + struct ionic_port_identify_cmd port_identify; + struct ionic_port_init_cmd port_init; + struct ionic_port_reset_cmd port_reset; + struct ionic_port_getattr_cmd port_getattr; + struct ionic_port_setattr_cmd port_setattr; + + struct ionic_lif_identify_cmd lif_identify; + struct ionic_lif_init_cmd lif_init; + struct ionic_lif_reset_cmd lif_reset; + + struct ionic_qos_identify_cmd qos_identify; + struct ionic_qos_init_cmd qos_init; + struct ionic_qos_reset_cmd qos_reset; + + struct ionic_q_init_cmd q_init; +}; + +union ionic_dev_cmd_comp { + u32 words[4]; + u8 status; + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + + struct ionic_dev_identify_comp identify; + struct ionic_dev_init_comp init; + struct ionic_dev_reset_comp reset; + struct ionic_dev_getattr_comp getattr; + struct ionic_dev_setattr_comp setattr; + + struct ionic_port_identify_comp port_identify; + struct ionic_port_init_comp port_init; + struct ionic_port_reset_comp port_reset; + struct ionic_port_getattr_comp port_getattr; + struct ionic_port_setattr_comp port_setattr; + + struct ionic_lif_identify_comp lif_identify; + struct ionic_lif_init_comp lif_init; + ionic_lif_reset_comp lif_reset; + + struct ionic_qos_identify_comp qos_identify; + ionic_qos_init_comp qos_init; + ionic_qos_reset_comp qos_reset; + + struct ionic_q_init_comp q_init; +}; + +/** + * union dev_info - Device info register format (read-only) + * @signature: Signature value of 0x44455649 ('DEVI'). + * @version: Current version of info. + * @asic_type: Asic type. + * @asic_rev: Asic revision. + * @fw_status: Firmware status. + * @fw_heartbeat: Firmware heartbeat counter. + * @serial_num: Serial number. + * @fw_version: Firmware version. + */ +union ionic_dev_info_regs { +#define IONIC_DEVINFO_FWVERS_BUFLEN 32 +#define IONIC_DEVINFO_SERIAL_BUFLEN 32 + struct { + u32 signature; + u8 version; + u8 asic_type; + u8 asic_rev; + u8 fw_status; + u32 fw_heartbeat; + char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; + char serial_num[IONIC_DEVINFO_SERIAL_BUFLEN]; + }; + u32 words[512]; +}; + +/** + * union dev_cmd_regs - Device command register format (read-write) + * @doorbell: Device Cmd Doorbell, write-only. + * Write a 1 to signal device to process cmd, + * poll done for completion. + * @done: Done indicator, bit 0 == 1 when command is complete. + * @cmd: Opcode-specific command bytes + * @comp: Opcode-specific response bytes + * @data: Opcode-specific side-data + */ +union ionic_dev_cmd_regs { + struct { + u32 doorbell; + u32 done; + union ionic_dev_cmd cmd; + union ionic_dev_cmd_comp comp; + u8 rsvd[48]; + u32 data[478]; + }; + u32 words[512]; +}; + +/** + * union dev_regs - Device register format in for bar 0 page 0 + * @info: Device info registers + * @devcmd: Device command registers + */ +union ionic_dev_regs { + struct { + union ionic_dev_info_regs info; + union ionic_dev_cmd_regs devcmd; + }; + __le32 words[1024]; +}; + +union ionic_adminq_cmd { + struct ionic_admin_cmd cmd; + struct ionic_nop_cmd nop; + struct ionic_q_init_cmd q_init; + struct ionic_q_control_cmd q_control; + struct ionic_lif_setattr_cmd lif_setattr; + struct ionic_lif_getattr_cmd lif_getattr; + struct ionic_rx_mode_set_cmd rx_mode_set; + struct ionic_rx_filter_add_cmd rx_filter_add; + struct ionic_rx_filter_del_cmd rx_filter_del; + struct ionic_rdma_reset_cmd rdma_reset; + struct ionic_rdma_queue_cmd rdma_queue; + struct ionic_fw_download_cmd fw_download; + struct ionic_fw_control_cmd fw_control; +}; + +union ionic_adminq_comp { + struct ionic_admin_comp comp; + struct ionic_nop_comp nop; + struct ionic_q_init_comp q_init; + struct ionic_lif_setattr_comp lif_setattr; + struct ionic_lif_getattr_comp lif_getattr; + struct ionic_rx_filter_add_comp rx_filter_add; + struct ionic_fw_control_comp fw_control; +}; + +#define IONIC_BARS_MAX 6 +#define IONIC_PCI_BAR_DBELL 1 + +/* BAR0 */ +#define IONIC_BAR0_SIZE 0x8000 + +#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000 +#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800 +#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00 +#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000 +#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000 +#define IONIC_DEV_CMD_DONE 0x00000001 + +#define IONIC_ASIC_TYPE_CAPRI 0 + +/** + * struct doorbell - Doorbell register layout + * @p_index: Producer index + * @ring: Selects the specific ring of the queue to update. + * Type-specific meaning: + * ring=0: Default producer/consumer queue. + * ring=1: (CQ, EQ) Re-Arm queue. RDMA CQs + * send events to EQs when armed. EQs send + * interrupts when armed. + * @qid: The queue id selects the queue destination for the + * producer index and flags. + */ +struct ionic_doorbell { + __le16 p_index; + u8 ring; + u8 qid_lo; + __le16 qid_hi; + u16 rsvd2; +}; + +struct ionic_intr_status { + u32 status[2]; +}; + +struct ionic_notifyq_cmd { + __le32 data; /* Not used but needed for qcq structure */ +}; + +union ionic_notifyq_comp { + struct ionic_notifyq_event event; + struct ionic_link_change_event link_change; + struct ionic_reset_event reset; + struct ionic_heartbeat_event heartbeat; + struct ionic_log_event log; +}; + +/* Deprecate */ +struct ionic_identity { + union ionic_drv_identity drv; + union ionic_dev_identity dev; + union ionic_lif_identity lif; + union ionic_port_identity port; + union ionic_qos_identity qos; +}; + +#pragma pack(pop) + +#endif /* _IONIC_IF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c new file mode 100644 index 000000000000..db7c82742828 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -0,0 +1,2274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/cpumask.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" +#include "ionic_ethtool.h" +#include "ionic_debugfs.h" + +static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode); +static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); +static void ionic_link_status_check(struct ionic_lif *lif); + +static void ionic_lif_deferred_work(struct work_struct *work) +{ + struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work); + struct ionic_deferred *def = &lif->deferred; + struct ionic_deferred_work *w = NULL; + + spin_lock_bh(&def->lock); + if (!list_empty(&def->list)) { + w = list_first_entry(&def->list, + struct ionic_deferred_work, list); + list_del(&w->list); + } + spin_unlock_bh(&def->lock); + + if (w) { + switch (w->type) { + case IONIC_DW_TYPE_RX_MODE: + ionic_lif_rx_mode(lif, w->rx_mode); + break; + case IONIC_DW_TYPE_RX_ADDR_ADD: + ionic_lif_addr_add(lif, w->addr); + break; + case IONIC_DW_TYPE_RX_ADDR_DEL: + ionic_lif_addr_del(lif, w->addr); + break; + case IONIC_DW_TYPE_LINK_STATUS: + ionic_link_status_check(lif); + break; + default: + break; + } + kfree(w); + schedule_work(&def->work); + } +} + +static void ionic_lif_deferred_enqueue(struct ionic_deferred *def, + struct ionic_deferred_work *work) +{ + spin_lock_bh(&def->lock); + list_add_tail(&work->list, &def->list); + spin_unlock_bh(&def->lock); + schedule_work(&def->work); +} + +static void ionic_link_status_check(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + u16 link_status; + bool link_up; + + link_status = le16_to_cpu(lif->info->status.link_status); + link_up = link_status == IONIC_PORT_OPER_STATUS_UP; + + /* filter out the no-change cases */ + if (link_up == netif_carrier_ok(netdev)) + goto link_out; + + if (link_up) { + netdev_info(netdev, "Link up - %d Gbps\n", + le32_to_cpu(lif->info->status.link_speed) / 1000); + + if (test_bit(IONIC_LIF_UP, lif->state)) { + netif_tx_wake_all_queues(lif->netdev); + netif_carrier_on(netdev); + } + } else { + netdev_info(netdev, "Link down\n"); + + /* carrier off first to avoid watchdog timeout */ + netif_carrier_off(netdev); + if (test_bit(IONIC_LIF_UP, lif->state)) + netif_tx_stop_all_queues(netdev); + } + +link_out: + clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state); +} + +static void ionic_link_status_check_request(struct ionic_lif *lif) +{ + struct ionic_deferred_work *work; + + /* we only need one request outstanding at a time */ + if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state)) + return; + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->type = IONIC_DW_TYPE_LINK_STATUS; + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_link_status_check(lif); + } +} + +static irqreturn_t ionic_isr(int irq, void *data) +{ + struct napi_struct *napi = data; + + napi_schedule_irqoff(napi); + + return IRQ_HANDLED; +} + +static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_intr_info *intr = &qcq->intr; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + const char *name; + + if (lif->registered) + name = lif->netdev->name; + else + name = dev_name(dev); + + snprintf(intr->name, sizeof(intr->name), + "%s-%s-%s", IONIC_DRV_NAME, name, q->name); + + return devm_request_irq(dev, intr->vector, ionic_isr, + 0, intr->name, &qcq->napi); +} + +static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr) +{ + struct ionic *ionic = lif->ionic; + int index; + + index = find_first_zero_bit(ionic->intrs, ionic->nintrs); + if (index == ionic->nintrs) { + netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n", + __func__, index, ionic->nintrs); + return -ENOSPC; + } + + set_bit(index, ionic->intrs); + ionic_intr_init(&ionic->idev, intr, index); + + return 0; +} + +static void ionic_intr_free(struct ionic_lif *lif, int index) +{ + if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs) + clear_bit(index, lif->ionic->intrs); +} + +static int ionic_qcq_enable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev; + struct device *dev; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .oper = IONIC_Q_ENABLE, + }, + }; + + idev = &lif->ionic->idev; + dev = lif->ionic->dev; + + dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + irq_set_affinity_hint(qcq->intr.vector, + &qcq->intr.affinity_mask); + napi_enable(&qcq->napi); + ionic_intr_clean(idev->intr_ctrl, qcq->intr.index); + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_qcq_disable(struct ionic_qcq *qcq) +{ + struct ionic_queue *q = &qcq->q; + struct ionic_lif *lif = q->lif; + struct ionic_dev *idev; + struct device *dev; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_control = { + .opcode = IONIC_CMD_Q_CONTROL, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .oper = IONIC_Q_DISABLE, + }, + }; + + idev = &lif->ionic->idev; + dev = lif->ionic->dev; + + dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n", + ctx.cmd.q_control.index, ctx.cmd.q_control.type); + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + synchronize_irq(qcq->intr.vector); + irq_set_affinity_hint(qcq->intr.vector, NULL); + napi_disable(&qcq->napi); + } + + return ionic_adminq_post_wait(lif, &ctx); +} + +static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + + if (!qcq) + return; + + ionic_debugfs_del_qcq(qcq); + + if (!(qcq->flags & IONIC_QCQ_F_INITED)) + return; + + if (qcq->flags & IONIC_QCQ_F_INTR) { + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_SET); + devm_free_irq(dev, qcq->intr.vector, &qcq->napi); + netif_napi_del(&qcq->napi); + } + + qcq->flags &= ~IONIC_QCQ_F_INITED; +} + +static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + + if (!qcq) + return; + + dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa); + qcq->base = NULL; + qcq->base_pa = 0; + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_free(lif, qcq->intr.index); + + devm_kfree(dev, qcq->cq.info); + qcq->cq.info = NULL; + devm_kfree(dev, qcq->q.info); + qcq->q.info = NULL; + devm_kfree(dev, qcq); +} + +static void ionic_qcqs_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + unsigned int i; + + if (lif->notifyqcq) { + ionic_qcq_free(lif, lif->notifyqcq); + lif->notifyqcq = NULL; + } + + if (lif->adminqcq) { + ionic_qcq_free(lif, lif->adminqcq); + lif->adminqcq = NULL; + } + + for (i = 0; i < lif->nxqs; i++) + if (lif->rxqcqs[i].stats) + devm_kfree(dev, lif->rxqcqs[i].stats); + + devm_kfree(dev, lif->rxqcqs); + lif->rxqcqs = NULL; + + for (i = 0; i < lif->nxqs; i++) + if (lif->txqcqs[i].stats) + devm_kfree(dev, lif->txqcqs[i].stats); + + devm_kfree(dev, lif->txqcqs); + lif->txqcqs = NULL; +} + +static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, + struct ionic_qcq *n_qcq) +{ + if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) { + ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index); + n_qcq->flags &= ~IONIC_QCQ_F_INTR; + } + + n_qcq->intr.vector = src_qcq->intr.vector; + n_qcq->intr.index = src_qcq->intr.index; +} + +static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, + unsigned int index, + const char *name, unsigned int flags, + unsigned int num_descs, unsigned int desc_size, + unsigned int cq_desc_size, + unsigned int sg_desc_size, + unsigned int pid, struct ionic_qcq **qcq) +{ + struct ionic_dev *idev = &lif->ionic->idev; + u32 q_size, cq_size, sg_size, total_size; + struct device *dev = lif->ionic->dev; + void *q_base, *cq_base, *sg_base; + dma_addr_t cq_base_pa = 0; + dma_addr_t sg_base_pa = 0; + dma_addr_t q_base_pa = 0; + struct ionic_qcq *new; + int err; + + *qcq = NULL; + + q_size = num_descs * desc_size; + cq_size = num_descs * cq_desc_size; + sg_size = num_descs * sg_desc_size; + + total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE); + /* Note: aligning q_size/cq_size is not enough due to cq_base + * address aligning as q_base could be not aligned to the page. + * Adding PAGE_SIZE. + */ + total_size += PAGE_SIZE; + if (flags & IONIC_QCQ_F_SG) { + total_size += ALIGN(sg_size, PAGE_SIZE); + total_size += PAGE_SIZE; + } + + new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL); + if (!new) { + netdev_err(lif->netdev, "Cannot allocate queue structure\n"); + err = -ENOMEM; + goto err_out; + } + + new->flags = flags; + + new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, + GFP_KERNEL); + if (!new->q.info) { + netdev_err(lif->netdev, "Cannot allocate queue info\n"); + err = -ENOMEM; + goto err_out; + } + + new->q.type = type; + + err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, + desc_size, sg_desc_size, pid); + if (err) { + netdev_err(lif->netdev, "Cannot initialize queue\n"); + goto err_out; + } + + if (flags & IONIC_QCQ_F_INTR) { + err = ionic_intr_alloc(lif, &new->intr); + if (err) { + netdev_warn(lif->netdev, "no intr for %s: %d\n", + name, err); + goto err_out; + } + + err = ionic_bus_get_irq(lif->ionic, new->intr.index); + if (err < 0) { + netdev_warn(lif->netdev, "no vector for %s: %d\n", + name, err); + goto err_out_free_intr; + } + new->intr.vector = err; + ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index, + IONIC_INTR_MASK_SET); + + new->intr.cpu = new->intr.index % num_online_cpus(); + if (cpu_online(new->intr.cpu)) + cpumask_set_cpu(new->intr.cpu, + &new->intr.affinity_mask); + } else { + new->intr.index = INTR_INDEX_NOT_ASSIGNED; + } + + new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, + GFP_KERNEL); + if (!new->cq.info) { + netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); + err = -ENOMEM; + goto err_out_free_intr; + } + + err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); + if (err) { + netdev_err(lif->netdev, "Cannot initialize completion queue\n"); + goto err_out_free_intr; + } + + new->base = dma_alloc_coherent(dev, total_size, &new->base_pa, + GFP_KERNEL); + if (!new->base) { + netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); + err = -ENOMEM; + goto err_out_free_intr; + } + + new->total_size = total_size; + + q_base = new->base; + q_base_pa = new->base_pa; + + cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE); + cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE); + + if (flags & IONIC_QCQ_F_SG) { + sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size, + PAGE_SIZE); + sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE); + ionic_q_sg_map(&new->q, sg_base, sg_base_pa); + } + + ionic_q_map(&new->q, q_base, q_base_pa); + ionic_cq_map(&new->cq, cq_base, cq_base_pa); + ionic_cq_bind(&new->cq, &new->q); + + *qcq = new; + + return 0; + +err_out_free_intr: + ionic_intr_free(lif, new->intr.index); +err_out: + dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err); + return err; +} + +static int ionic_qcqs_alloc(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + unsigned int q_list_size; + unsigned int flags; + int err; + int i; + + flags = IONIC_QCQ_F_INTR; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags, + IONIC_ADMINQ_LENGTH, + sizeof(struct ionic_admin_cmd), + sizeof(struct ionic_admin_comp), + 0, lif->kern_pid, &lif->adminqcq); + if (err) + return err; + + if (lif->ionic->nnqs_per_lif) { + flags = IONIC_QCQ_F_NOTIFYQ; + err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq", + flags, IONIC_NOTIFYQ_LENGTH, + sizeof(struct ionic_notifyq_cmd), + sizeof(union ionic_notifyq_comp), + 0, lif->kern_pid, &lif->notifyqcq); + if (err) + goto err_out_free_adminqcq; + + /* Let the notifyq ride on the adminq interrupt */ + ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq); + } + + q_list_size = sizeof(*lif->txqcqs) * lif->nxqs; + err = -ENOMEM; + lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); + if (!lif->txqcqs) + goto err_out_free_notifyqcq; + for (i = 0; i < lif->nxqs; i++) { + lif->txqcqs[i].stats = devm_kzalloc(dev, + sizeof(struct ionic_q_stats), + GFP_KERNEL); + if (!lif->txqcqs[i].stats) + goto err_out_free_tx_stats; + } + + lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL); + if (!lif->rxqcqs) + goto err_out_free_tx_stats; + for (i = 0; i < lif->nxqs; i++) { + lif->rxqcqs[i].stats = devm_kzalloc(dev, + sizeof(struct ionic_q_stats), + GFP_KERNEL); + if (!lif->rxqcqs[i].stats) + goto err_out_free_rx_stats; + } + + return 0; + +err_out_free_rx_stats: + for (i = 0; i < lif->nxqs; i++) + if (lif->rxqcqs[i].stats) + devm_kfree(dev, lif->rxqcqs[i].stats); + devm_kfree(dev, lif->rxqcqs); + lif->rxqcqs = NULL; +err_out_free_tx_stats: + for (i = 0; i < lif->nxqs; i++) + if (lif->txqcqs[i].stats) + devm_kfree(dev, lif->txqcqs[i].stats); + devm_kfree(dev, lif->txqcqs); + lif->txqcqs = NULL; +err_out_free_notifyqcq: + if (lif->notifyqcq) { + ionic_qcq_free(lif, lif->notifyqcq); + lif->notifyqcq = NULL; + } +err_out_free_adminqcq: + ionic_qcq_free(lif, lif->adminqcq); + lif->adminqcq = NULL; + + return err; +} + +static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_SG), + .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + .sg_ring_base = cpu_to_le64(q->sg_base_pa), + }, + }; + int err; + + dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) +{ + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + struct ionic_cq *cq = &qcq->cq; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), + .intr_index = cpu_to_le16(cq->bound_intr->index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + .cq_ring_base = cpu_to_le64(cq->base_pa), + }, + }; + int err; + + dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); + + netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, + NAPI_POLL_WEIGHT); + + err = ionic_request_irq(lif, qcq); + if (err) { + netif_napi_del(&qcq->napi); + return err; + } + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static bool ionic_notifyq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + union ionic_notifyq_comp *comp = cq_info->cq_desc; + struct net_device *netdev; + struct ionic_queue *q; + struct ionic_lif *lif; + u64 eid; + + q = cq->bound_q; + lif = q->info[0].cb_arg; + netdev = lif->netdev; + eid = le64_to_cpu(comp->event.eid); + + /* Have we run out of new completions to process? */ + if (eid <= lif->last_eid) + return false; + + lif->last_eid = eid; + + dev_dbg(lif->ionic->dev, "notifyq event:\n"); + dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1, + comp, sizeof(*comp), true); + + switch (le16_to_cpu(comp->event.ecode)) { + case IONIC_EVENT_LINK_CHANGE: + ionic_link_status_check_request(lif); + break; + case IONIC_EVENT_RESET: + netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n", + eid); + netdev_info(netdev, " reset_code=%d state=%d\n", + comp->reset.reset_code, + comp->reset.state); + break; + default: + netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n", + comp->event.ecode, eid); + break; + } + + return true; +} + +static int ionic_notifyq_clean(struct ionic_lif *lif, int budget) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct ionic_cq *cq = &lif->notifyqcq->cq; + u32 work_done; + + work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, + NULL, NULL); + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); + + return work_done; +} + +static bool ionic_adminq_service(struct ionic_cq *cq, + struct ionic_cq_info *cq_info) +{ + struct ionic_admin_comp *comp = cq_info->cq_desc; + + if (!color_match(comp->color, cq->done_color)) + return false; + + ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); + + return true; +} + +static int ionic_adminq_napi(struct napi_struct *napi, int budget) +{ + struct ionic_lif *lif = napi_to_cq(napi)->lif; + int n_work = 0; + int a_work = 0; + + if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)) + n_work = ionic_notifyq_clean(lif, budget); + a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL); + + return max(n_work, a_work); +} + +static void ionic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *ns) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_lif_stats *ls; + + memset(ns, 0, sizeof(*ns)); + ls = &lif->info->stats; + + ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) + + le64_to_cpu(ls->rx_mcast_packets) + + le64_to_cpu(ls->rx_bcast_packets); + + ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) + + le64_to_cpu(ls->tx_mcast_packets) + + le64_to_cpu(ls->tx_bcast_packets); + + ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) + + le64_to_cpu(ls->rx_mcast_bytes) + + le64_to_cpu(ls->rx_bcast_bytes); + + ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) + + le64_to_cpu(ls->tx_mcast_bytes) + + le64_to_cpu(ls->tx_bcast_bytes); + + ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) + + le64_to_cpu(ls->rx_mcast_drop_packets) + + le64_to_cpu(ls->rx_bcast_drop_packets); + + ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) + + le64_to_cpu(ls->tx_mcast_drop_packets) + + le64_to_cpu(ls->tx_bcast_drop_packets); + + ns->multicast = le64_to_cpu(ls->rx_mcast_packets); + + ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty); + + ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) + + le64_to_cpu(ls->rx_queue_disabled) + + le64_to_cpu(ls->rx_desc_fetch_error) + + le64_to_cpu(ls->rx_desc_data_error); + + ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) + + le64_to_cpu(ls->tx_queue_disabled) + + le64_to_cpu(ls->tx_desc_fetch_error) + + le64_to_cpu(ls->tx_desc_data_error); + + ns->rx_errors = ns->rx_over_errors + + ns->rx_missed_errors; + + ns->tx_errors = ns->tx_aborted_errors; +} + +static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + struct ionic_rx_filter *f; + int err; + + /* don't bother if we already have it */ + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_by_addr(lif, addr); + spin_unlock_bh(&lif->rx_filters.lock); + if (f) + return 0; + + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, + ctx.comp.rx_filter_add.filter_id); + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); +} + +static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + f = ionic_rx_filter_by_addr(lif, addr); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, + ctx.cmd.rx_filter_del.filter_id); + + return 0; +} + +static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) +{ + struct ionic *ionic = lif->ionic; + struct ionic_deferred_work *work; + unsigned int nmfilters; + unsigned int nufilters; + + if (add) { + /* Do we have space for this filter? We test the counters + * here before checking the need for deferral so that we + * can return an overflow error to the stack. + */ + nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters); + nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters); + + if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) + lif->nmcast++; + else if (!is_multicast_ether_addr(addr) && + lif->nucast < nufilters) + lif->nucast++; + else + return -ENOSPC; + } else { + if (is_multicast_ether_addr(addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(addr) && lif->nucast) + lif->nucast--; + } + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "%s OOM\n", __func__); + return -ENOMEM; + } + work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : + IONIC_DW_TYPE_RX_ADDR_DEL; + memcpy(work->addr, addr, ETH_ALEN); + netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n", + add ? "add" : "del", addr); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + netdev_dbg(lif->netdev, "rx_filter %s %pM\n", + add ? "add" : "del", addr); + if (add) + return ionic_lif_addr_add(lif, addr); + else + return ionic_lif_addr_del(lif, addr); + } + + return 0; +} + +static int ionic_addr_add(struct net_device *netdev, const u8 *addr) +{ + return ionic_lif_addr(netdev_priv(netdev), addr, true); +} + +static int ionic_addr_del(struct net_device *netdev, const u8 *addr) +{ + return ionic_lif_addr(netdev_priv(netdev), addr, false); +} + +static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_mode_set = { + .opcode = IONIC_CMD_RX_MODE_SET, + .lif_index = cpu_to_le16(lif->index), + .rx_mode = cpu_to_le16(rx_mode), + }, + }; + char buf[128]; + int err; + int i; +#define REMAIN(__x) (sizeof(buf) - (__x)) + + i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:", + lif->rx_mode, rx_mode); + if (rx_mode & IONIC_RX_MODE_F_UNICAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST"); + if (rx_mode & IONIC_RX_MODE_F_MULTICAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST"); + if (rx_mode & IONIC_RX_MODE_F_BROADCAST) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST"); + if (rx_mode & IONIC_RX_MODE_F_PROMISC) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC"); + if (rx_mode & IONIC_RX_MODE_F_ALLMULTI) + i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI"); + netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n", + rx_mode, err); + else + lif->rx_mode = rx_mode; +} + +static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) +{ + struct ionic_deferred_work *work; + + if (in_interrupt()) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "%s OOM\n", __func__); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + work->rx_mode = rx_mode; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_lif_rx_mode(lif, rx_mode); + } +} + +static void ionic_set_rx_mode(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_identity *ident; + unsigned int nfilters; + unsigned int rx_mode; + + ident = &lif->ionic->ident; + + rx_mode = IONIC_RX_MODE_F_UNICAST; + rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0; + rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0; + rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; + rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; + + /* sync unicast addresses + * next check to see if we're in an overflow state + * if so, we track that we overflowed and enable NIC PROMISC + * else if the overflow is set and not needed + * we remove our overflow flag and check the netdev flags + * to see if we can disable NIC PROMISC + */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters); + if (netdev_uc_count(netdev) + 1 > nfilters) { + rx_mode |= IONIC_RX_MODE_F_PROMISC; + lif->uc_overflow = true; + } else if (lif->uc_overflow) { + lif->uc_overflow = false; + if (!(netdev->flags & IFF_PROMISC)) + rx_mode &= ~IONIC_RX_MODE_F_PROMISC; + } + + /* same for multicast */ + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); + nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters); + if (netdev_mc_count(netdev) > nfilters) { + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; + lif->mc_overflow = true; + } else if (lif->mc_overflow) { + lif->mc_overflow = false; + if (!(netdev->flags & IFF_ALLMULTI)) + rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; + } + + if (lif->rx_mode != rx_mode) + _ionic_lif_rx_mode(lif, rx_mode); +} + +static __le64 ionic_netdev_features_to_nic(netdev_features_t features) +{ + u64 wanted = 0; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + wanted |= IONIC_ETH_HW_VLAN_TX_TAG; + if (features & NETIF_F_HW_VLAN_CTAG_RX) + wanted |= IONIC_ETH_HW_VLAN_RX_STRIP; + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wanted |= IONIC_ETH_HW_VLAN_RX_FILTER; + if (features & NETIF_F_RXHASH) + wanted |= IONIC_ETH_HW_RX_HASH; + if (features & NETIF_F_RXCSUM) + wanted |= IONIC_ETH_HW_RX_CSUM; + if (features & NETIF_F_SG) + wanted |= IONIC_ETH_HW_TX_SG; + if (features & NETIF_F_HW_CSUM) + wanted |= IONIC_ETH_HW_TX_CSUM; + if (features & NETIF_F_TSO) + wanted |= IONIC_ETH_HW_TSO; + if (features & NETIF_F_TSO6) + wanted |= IONIC_ETH_HW_TSO_IPV6; + if (features & NETIF_F_TSO_ECN) + wanted |= IONIC_ETH_HW_TSO_ECN; + if (features & NETIF_F_GSO_GRE) + wanted |= IONIC_ETH_HW_TSO_GRE; + if (features & NETIF_F_GSO_GRE_CSUM) + wanted |= IONIC_ETH_HW_TSO_GRE_CSUM; + if (features & NETIF_F_GSO_IPXIP4) + wanted |= IONIC_ETH_HW_TSO_IPXIP4; + if (features & NETIF_F_GSO_IPXIP6) + wanted |= IONIC_ETH_HW_TSO_IPXIP6; + if (features & NETIF_F_GSO_UDP_TUNNEL) + wanted |= IONIC_ETH_HW_TSO_UDP; + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + wanted |= IONIC_ETH_HW_TSO_UDP_CSUM; + + return cpu_to_le64(wanted); +} + +static int ionic_set_nic_features(struct ionic_lif *lif, + netdev_features_t features) +{ + struct device *dev = lif->ionic->dev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_FEATURES, + }, + }; + u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG | + IONIC_ETH_HW_VLAN_RX_STRIP | + IONIC_ETH_HW_VLAN_RX_FILTER; + int err; + + ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features); + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features & + ctx.comp.lif_setattr.features); + + if ((vlan_flags & features) && + !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features))) + dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n"); + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n"); + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + dev_dbg(dev, "feature ETH_HW_RX_HASH\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + dev_dbg(dev, "feature ETH_HW_TX_SG\n"); + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + dev_dbg(dev, "feature ETH_HW_TX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + dev_dbg(dev, "feature ETH_HW_RX_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO) + dev_dbg(dev, "feature ETH_HW_TSO\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + dev_dbg(dev, "feature ETH_HW_TSO_ECN\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + dev_dbg(dev, "feature ETH_HW_TSO_GRE\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + dev_dbg(dev, "feature ETH_HW_TSO_UDP\n"); + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n"); + + return 0; +} + +static int ionic_init_nic_features(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + netdev_features_t features; + int err; + + /* set up what we expect to support by default */ + features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN; + + err = ionic_set_nic_features(lif, features); + if (err) + return err; + + /* tell the netdev what we actually can support */ + netdev->features |= NETIF_F_HIGHDMA; + + if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (lif->hw_features & IONIC_ETH_HW_RX_HASH) + netdev->hw_features |= NETIF_F_RXHASH; + if (lif->hw_features & IONIC_ETH_HW_TX_SG) + netdev->hw_features |= NETIF_F_SG; + + if (lif->hw_features & IONIC_ETH_HW_TX_CSUM) + netdev->hw_enc_features |= NETIF_F_HW_CSUM; + if (lif->hw_features & IONIC_ETH_HW_RX_CSUM) + netdev->hw_enc_features |= NETIF_F_RXCSUM; + if (lif->hw_features & IONIC_ETH_HW_TSO) + netdev->hw_enc_features |= NETIF_F_TSO; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6) + netdev->hw_enc_features |= NETIF_F_TSO6; + if (lif->hw_features & IONIC_ETH_HW_TSO_ECN) + netdev->hw_enc_features |= NETIF_F_TSO_ECN; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE) + netdev->hw_enc_features |= NETIF_F_GSO_GRE; + if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4; + if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6) + netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6; + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= netdev->hw_enc_features; + netdev->features |= netdev->hw_features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + return 0; +} + +static int ionic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n", + __func__, (u64)lif->netdev->features, (u64)features); + + err = ionic_set_nic_features(lif, features); + + return err; +} + +static int ionic_set_mac_address(struct net_device *netdev, void *sa) +{ + struct sockaddr *addr = sa; + u8 *mac; + int err; + + mac = (u8 *)addr->sa_data; + if (ether_addr_equal(netdev->dev_addr, mac)) + return 0; + + err = eth_prepare_mac_addr_change(netdev, addr); + if (err) + return err; + + if (!is_zero_ether_addr(netdev->dev_addr)) { + netdev_info(netdev, "deleting mac addr %pM\n", + netdev->dev_addr); + ionic_addr_del(netdev, netdev->dev_addr); + } + + eth_commit_mac_addr_change(netdev, addr); + netdev_info(netdev, "updating mac addr %pM\n", mac); + + return ionic_addr_add(netdev, mac); +} + +static int ionic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MTU, + .mtu = cpu_to_le32(new_mtu), + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev->mtu = new_mtu; + err = ionic_reset_queues(lif); + + return err; +} + +static void ionic_tx_timeout_work(struct work_struct *ws) +{ + struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work); + + netdev_info(lif->netdev, "Tx Timeout recovery\n"); + + rtnl_lock(); + ionic_reset_queues(lif); + rtnl_unlock(); +} + +static void ionic_tx_timeout(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + + schedule_work(&lif->tx_timeout_work); +} + +static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN), + .vlan.vlan = cpu_to_le16(vid), + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, + ctx.comp.rx_filter_add.filter_id); + + return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); +} + +static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .lif_index = cpu_to_le16(lif->index), + }, + }; + struct ionic_rx_filter *f; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_vlan(lif, vid); + if (!f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, + le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + return ionic_adminq_post_wait(lif, &ctx); +} + +int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types, + const u8 *key, const u32 *indir) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .attr = IONIC_LIF_ATTR_RSS, + .rss.types = cpu_to_le16(types), + .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa), + }, + }; + unsigned int i, tbl_sz; + + lif->rss_types = types; + + if (key) + memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE); + + if (indir) { + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = indir[i]; + } + + memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key, + IONIC_RSS_HASH_KEY_SIZE); + + return ionic_adminq_post_wait(lif, &ctx); +} + +static int ionic_lif_rss_init(struct ionic_lif *lif) +{ + u8 rss_key[IONIC_RSS_HASH_KEY_SIZE]; + unsigned int tbl_sz; + unsigned int i; + + netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE); + + lif->rss_types = IONIC_RSS_TYPE_IPV4 | + IONIC_RSS_TYPE_IPV4_TCP | + IONIC_RSS_TYPE_IPV4_UDP | + IONIC_RSS_TYPE_IPV6 | + IONIC_RSS_TYPE_IPV6_TCP | + IONIC_RSS_TYPE_IPV6_UDP; + + /* Fill indirection table with 'default' values */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + for (i = 0; i < tbl_sz; i++) + lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs); + + return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL); +} + +static int ionic_lif_rss_deinit(struct ionic_lif *lif) +{ + return ionic_lif_rss_config(lif, 0x0, NULL, NULL); +} + +static void ionic_txrx_disable(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_qcq_disable(lif->txqcqs[i].qcq); + ionic_qcq_disable(lif->rxqcqs[i].qcq); + } +} + +static void ionic_txrx_deinit(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + ionic_tx_flush(&lif->txqcqs[i].qcq->cq); + + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); + ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); + ionic_rx_empty(&lif->rxqcqs[i].qcq->q); + } +} + +static void ionic_txrx_free(struct ionic_lif *lif) +{ + unsigned int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_qcq_free(lif, lif->txqcqs[i].qcq); + lif->txqcqs[i].qcq = NULL; + + ionic_qcq_free(lif, lif->rxqcqs[i].qcq); + lif->rxqcqs[i].qcq = NULL; + } +} + +static int ionic_txrx_alloc(struct ionic_lif *lif) +{ + unsigned int flags; + unsigned int i; + int err = 0; + u32 coal; + + flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, + lif->ntxq_descs, + sizeof(struct ionic_txq_desc), + sizeof(struct ionic_txq_comp), + sizeof(struct ionic_txq_sg_desc), + lif->kern_pid, &lif->txqcqs[i].qcq); + if (err) + goto err_out; + + lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; + } + + flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR; + coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs); + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, + lif->nrxq_descs, + sizeof(struct ionic_rxq_desc), + sizeof(struct ionic_rxq_comp), + 0, lif->kern_pid, &lif->rxqcqs[i].qcq); + if (err) + goto err_out; + + lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; + + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->rxqcqs[i].qcq->intr.index, coal); + ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, + lif->txqcqs[i].qcq); + } + + return 0; + +err_out: + ionic_txrx_free(lif); + + return err; +} + +static int ionic_txrx_init(struct ionic_lif *lif) +{ + unsigned int i; + int err; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq); + if (err) + goto err_out; + + err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq); + if (err) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + goto err_out; + } + } + + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_init(lif); + + ionic_set_rx_mode(lif->netdev); + + return 0; + +err_out: + while (i--) { + ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq); + ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq); + } + + return err; +} + +static int ionic_txrx_enable(struct ionic_lif *lif) +{ + int i, err; + + for (i = 0; i < lif->nxqs; i++) { + err = ionic_qcq_enable(lif->txqcqs[i].qcq); + if (err) + goto err_out; + + ionic_rx_fill(&lif->rxqcqs[i].qcq->q); + err = ionic_qcq_enable(lif->rxqcqs[i].qcq); + if (err) { + ionic_qcq_disable(lif->txqcqs[i].qcq); + goto err_out; + } + } + + return 0; + +err_out: + while (i--) { + ionic_qcq_disable(lif->rxqcqs[i].qcq); + ionic_qcq_disable(lif->txqcqs[i].qcq); + } + + return err; +} + +int ionic_open(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err; + + netif_carrier_off(netdev); + + err = ionic_txrx_alloc(lif); + if (err) + return err; + + err = ionic_txrx_init(lif); + if (err) + goto err_txrx_free; + + err = ionic_txrx_enable(lif); + if (err) + goto err_txrx_deinit; + + netif_set_real_num_tx_queues(netdev, lif->nxqs); + netif_set_real_num_rx_queues(netdev, lif->nxqs); + + set_bit(IONIC_LIF_UP, lif->state); + + ionic_link_status_check_request(lif); + if (netif_carrier_ok(netdev)) + netif_tx_wake_all_queues(netdev); + + return 0; + +err_txrx_deinit: + ionic_txrx_deinit(lif); +err_txrx_free: + ionic_txrx_free(lif); + return err; +} + +int ionic_stop(struct net_device *netdev) +{ + struct ionic_lif *lif = netdev_priv(netdev); + int err = 0; + + if (!test_bit(IONIC_LIF_UP, lif->state)) { + dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n", + __func__, lif->name); + return 0; + } + dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name); + clear_bit(IONIC_LIF_UP, lif->state); + + /* carrier off before disabling queues to avoid watchdog timeout */ + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netif_tx_disable(netdev); + + ionic_txrx_disable(lif); + ionic_txrx_deinit(lif); + ionic_txrx_free(lif); + + return err; +} + +static const struct net_device_ops ionic_netdev_ops = { + .ndo_open = ionic_open, + .ndo_stop = ionic_stop, + .ndo_start_xmit = ionic_start_xmit, + .ndo_get_stats64 = ionic_get_stats64, + .ndo_set_rx_mode = ionic_set_rx_mode, + .ndo_set_features = ionic_set_features, + .ndo_set_mac_address = ionic_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = ionic_tx_timeout, + .ndo_change_mtu = ionic_change_mtu, + .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid, +}; + +int ionic_reset_queues(struct ionic_lif *lif) +{ + bool running; + int err = 0; + + /* Put off the next watchdog timeout */ + netif_trans_update(lif->netdev); + + if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET)) + return -EBUSY; + + running = netif_running(lif->netdev); + if (running) + err = ionic_stop(lif->netdev); + if (!err && running) + ionic_open(lif->netdev); + + clear_bit(IONIC_LIF_QUEUE_RESET, lif->state); + + return err; +} + +static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) +{ + struct device *dev = ionic->dev; + struct net_device *netdev; + struct ionic_lif *lif; + int tbl_sz; + u32 coal; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*lif), + ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); + if (!netdev) { + dev_err(dev, "Cannot allocate netdev, aborting\n"); + return ERR_PTR(-ENOMEM); + } + + SET_NETDEV_DEV(netdev, dev); + + lif = netdev_priv(netdev); + lif->netdev = netdev; + ionic->master_lif = lif; + netdev->netdev_ops = &ionic_netdev_ops; + ionic_ethtool_set_ops(netdev); + + netdev->watchdog_timeo = 2 * HZ; + netdev->min_mtu = IONIC_MIN_MTU; + netdev->max_mtu = IONIC_MAX_MTU; + + lif->neqs = ionic->neqs_per_lif; + lif->nxqs = ionic->ntxqs_per_lif; + + lif->ionic = ionic; + lif->index = index; + lif->ntxq_descs = IONIC_DEF_TXRX_DESC; + lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + + /* Convert the default coalesce value to actual hw resolution */ + coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT); + lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal); + + snprintf(lif->name, sizeof(lif->name), "lif%u", index); + + spin_lock_init(&lif->adminq_lock); + + spin_lock_init(&lif->deferred.lock); + INIT_LIST_HEAD(&lif->deferred.list); + INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work); + + /* allocate lif info */ + lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE); + lif->info = dma_alloc_coherent(dev, lif->info_sz, + &lif->info_pa, GFP_KERNEL); + if (!lif->info) { + dev_err(dev, "Failed to allocate lif info, aborting\n"); + err = -ENOMEM; + goto err_out_free_netdev; + } + + /* allocate queues */ + err = ionic_qcqs_alloc(lif); + if (err) + goto err_out_free_lif_info; + + /* allocate rss indirection table */ + tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); + lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz; + lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz, + &lif->rss_ind_tbl_pa, + GFP_KERNEL); + + if (!lif->rss_ind_tbl) { + dev_err(dev, "Failed to allocate rss indirection table, aborting\n"); + goto err_out_free_qcqs; + } + + list_add_tail(&lif->list, &ionic->lifs); + + return lif; + +err_out_free_qcqs: + ionic_qcqs_free(lif); +err_out_free_lif_info: + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; +err_out_free_netdev: + free_netdev(lif->netdev); + lif = NULL; + + return ERR_PTR(err); +} + +int ionic_lifs_alloc(struct ionic *ionic) +{ + struct ionic_lif *lif; + + INIT_LIST_HEAD(&ionic->lifs); + + /* only build the first lif, others are for later features */ + set_bit(0, ionic->lifbits); + lif = ionic_lif_alloc(ionic, 0); + + return PTR_ERR_OR_ZERO(lif); +} + +static void ionic_lif_reset(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_reset(idev, lif->index); + ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + mutex_unlock(&lif->ionic->dev_cmd_lock); +} + +static void ionic_lif_free(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + + /* free rss indirection table */ + dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl, + lif->rss_ind_tbl_pa); + lif->rss_ind_tbl = NULL; + lif->rss_ind_tbl_pa = 0; + + /* free queues */ + ionic_qcqs_free(lif); + ionic_lif_reset(lif); + + /* free lif info */ + dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); + lif->info = NULL; + lif->info_pa = 0; + + /* unmap doorbell page */ + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; + kfree(lif->dbid_inuse); + lif->dbid_inuse = NULL; + + /* free netdev & lif */ + ionic_debugfs_del_lif(lif); + list_del(&lif->list); + free_netdev(lif->netdev); +} + +void ionic_lifs_free(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + + ionic_lif_free(lif); + } +} + +static void ionic_lif_deinit(struct ionic_lif *lif) +{ + if (!test_bit(IONIC_LIF_INITED, lif->state)) + return; + + clear_bit(IONIC_LIF_INITED, lif->state); + + ionic_rx_filters_deinit(lif); + ionic_lif_rss_deinit(lif); + + napi_disable(&lif->adminqcq->napi); + ionic_lif_qcq_deinit(lif, lif->notifyqcq); + ionic_lif_qcq_deinit(lif, lif->adminqcq); + + ionic_lif_reset(lif); +} + +void ionic_lifs_deinit(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + ionic_lif_deinit(lif); + } +} + +static int ionic_lif_adminq_init(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct ionic_q_init_comp comp; + struct ionic_dev *idev; + struct ionic_qcq *qcq; + struct ionic_queue *q; + int err; + + idev = &lif->ionic->idev; + qcq = lif->adminqcq; + q = &qcq->q; + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index); + err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) { + netdev_err(lif->netdev, "adminq init failed %d\n", err); + return err; + } + + q->hw_type = comp.hw_type; + q->hw_index = le32_to_cpu(comp.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); + + netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi, + NAPI_POLL_WEIGHT); + + err = ionic_request_irq(lif, qcq); + if (err) { + netdev_warn(lif->netdev, "adminq irq request failed %d\n", err); + netif_napi_del(&qcq->napi); + return err; + } + + napi_enable(&qcq->napi); + + if (qcq->flags & IONIC_QCQ_F_INTR) + ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, + IONIC_INTR_MASK_CLEAR); + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_lif_notifyq_init(struct ionic_lif *lif) +{ + struct ionic_qcq *qcq = lif->notifyqcq; + struct device *dev = lif->ionic->dev; + struct ionic_queue *q = &qcq->q; + int err; + + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.q_init = { + .opcode = IONIC_CMD_Q_INIT, + .lif_index = cpu_to_le16(lif->index), + .type = q->type, + .index = cpu_to_le32(q->index), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | + IONIC_QINIT_F_ENA), + .intr_index = cpu_to_le16(lif->adminqcq->intr.index), + .pid = cpu_to_le16(q->pid), + .ring_size = ilog2(q->num_descs), + .ring_base = cpu_to_le64(q->base_pa), + } + }; + + dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid); + dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index); + dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); + dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + q->hw_type = ctx.comp.q_init.hw_type; + q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); + q->dbval = IONIC_DBELL_QID(q->hw_index); + + dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); + dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); + + /* preset the callback info */ + q->info[0].cb_arg = lif; + + qcq->flags |= IONIC_QCQ_F_INITED; + + ionic_debugfs_add_qcq(lif, qcq); + + return 0; +} + +static int ionic_station_set(struct ionic_lif *lif) +{ + struct net_device *netdev = lif->netdev; + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_getattr = { + .opcode = IONIC_CMD_LIF_GETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_MAC, + }, + }; + struct sockaddr addr; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + return err; + + memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); + addr.sa_family = AF_INET; + err = eth_prepare_mac_addr_change(netdev, &addr); + if (err) + return err; + + if (!is_zero_ether_addr(netdev->dev_addr)) { + netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n", + netdev->dev_addr); + ionic_lif_addr(lif, netdev->dev_addr, false); + } + + eth_commit_mac_addr_change(netdev, &addr); + netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", + netdev->dev_addr); + ionic_lif_addr(lif, netdev->dev_addr, true); + + return 0; +} + +static int ionic_lif_init(struct ionic_lif *lif) +{ + struct ionic_dev *idev = &lif->ionic->idev; + struct device *dev = lif->ionic->dev; + struct ionic_lif_init_comp comp; + int dbpage_num; + int err; + + ionic_debugfs_add_lif(lif); + + mutex_lock(&lif->ionic->dev_cmd_lock); + ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa); + err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT); + ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp); + mutex_unlock(&lif->ionic->dev_cmd_lock); + if (err) + return err; + + lif->hw_index = le16_to_cpu(comp.hw_index); + + /* now that we have the hw_index we can figure out our doorbell page */ + lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); + if (!lif->dbid_count) { + dev_err(dev, "No doorbell pages, aborting\n"); + return -EINVAL; + } + + lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL); + if (!lif->dbid_inuse) { + dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n"); + return -ENOMEM; + } + + /* first doorbell id reserved for kernel (dbid aka pid == zero) */ + set_bit(0, lif->dbid_inuse); + lif->kern_pid = 0; + + dbpage_num = ionic_db_page_num(lif, lif->kern_pid); + lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num); + if (!lif->kern_dbpage) { + dev_err(dev, "Cannot map dbpage, aborting\n"); + err = -ENOMEM; + goto err_out_free_dbid; + } + + err = ionic_lif_adminq_init(lif); + if (err) + goto err_out_adminq_deinit; + + if (lif->ionic->nnqs_per_lif) { + err = ionic_lif_notifyq_init(lif); + if (err) + goto err_out_notifyq_deinit; + } + + err = ionic_init_nic_features(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_rx_filters_init(lif); + if (err) + goto err_out_notifyq_deinit; + + err = ionic_station_set(lif); + if (err) + goto err_out_notifyq_deinit; + + lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; + + set_bit(IONIC_LIF_INITED, lif->state); + + INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work); + + return 0; + +err_out_notifyq_deinit: + ionic_lif_qcq_deinit(lif, lif->notifyqcq); +err_out_adminq_deinit: + ionic_lif_qcq_deinit(lif, lif->adminqcq); + ionic_lif_reset(lif); + ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage); + lif->kern_dbpage = NULL; +err_out_free_dbid: + kfree(lif->dbid_inuse); + lif->dbid_inuse = NULL; + + return err; +} + +int ionic_lifs_init(struct ionic *ionic) +{ + struct list_head *cur, *tmp; + struct ionic_lif *lif; + int err; + + list_for_each_safe(cur, tmp, &ionic->lifs) { + lif = list_entry(cur, struct ionic_lif, list); + err = ionic_lif_init(lif); + if (err) + return err; + } + + return 0; +} + +static void ionic_lif_notify_work(struct work_struct *ws) +{ +} + +static void ionic_lif_set_netdev_info(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_NAME, + }, + }; + + strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + sizeof(ctx.cmd.lif_setattr.name)); + + ionic_adminq_post_wait(lif, &ctx); +} + +static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev) +{ + if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit) + return NULL; + + return netdev_priv(netdev); +} + +static int ionic_lif_notify(struct notifier_block *nb, + unsigned long event, void *info) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(info); + struct ionic *ionic = container_of(nb, struct ionic, nb); + struct ionic_lif *lif = ionic_netdev_lif(ndev); + + if (!lif || lif->ionic != ionic) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGENAME: + ionic_lif_set_netdev_info(lif); + break; + } + + return NOTIFY_DONE; +} + +int ionic_lifs_register(struct ionic *ionic) +{ + int err; + + INIT_WORK(&ionic->nb_work, ionic_lif_notify_work); + + ionic->nb.notifier_call = ionic_lif_notify; + + err = register_netdevice_notifier(&ionic->nb); + if (err) + ionic->nb.notifier_call = NULL; + + /* only register LIF0 for now */ + err = register_netdev(ionic->master_lif->netdev); + if (err) { + dev_err(ionic->dev, "Cannot register net device, aborting\n"); + return err; + } + + ionic_link_status_check_request(ionic->master_lif); + ionic->master_lif->registered = true; + + return 0; +} + +void ionic_lifs_unregister(struct ionic *ionic) +{ + if (ionic->nb.notifier_call) { + unregister_netdevice_notifier(&ionic->nb); + cancel_work_sync(&ionic->nb_work); + ionic->nb.notifier_call = NULL; + } + + /* There is only one lif ever registered in the + * current model, so don't bother searching the + * ionic->lif for candidates to unregister + */ + cancel_work_sync(&ionic->master_lif->deferred.work); + cancel_work_sync(&ionic->master_lif->tx_timeout_work); + if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(ionic->master_lif->netdev); +} + +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lid) +{ + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz); + mutex_unlock(&ionic->dev_cmd_lock); + if (err) + return (err); + + dev_dbg(ionic->dev, "capabilities 0x%llx\n", + le64_to_cpu(lid->capabilities)); + + dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n", + le32_to_cpu(lid->eth.max_ucast_filters)); + dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n", + le32_to_cpu(lid->eth.max_mcast_filters)); + dev_dbg(ionic->dev, "eth.features 0x%llx\n", + le64_to_cpu(lid->eth.config.features)); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ])); + dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n", + le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ])); + dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name); + dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac); + dev_dbg(ionic->dev, "eth.config.mtu %d\n", + le32_to_cpu(lid->eth.config.mtu)); + + return 0; +} + +int ionic_lifs_size(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + unsigned int nintrs, dev_nintrs; + union ionic_lif_config *lc; + unsigned int ntxqs_per_lif; + unsigned int nrxqs_per_lif; + unsigned int neqs_per_lif; + unsigned int nnqs_per_lif; + unsigned int nxqs, neqs; + unsigned int min_intrs; + int err; + + lc = &ident->lif.eth.config; + dev_nintrs = le32_to_cpu(ident->dev.nintrs); + neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); + nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]); + ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); + nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + + nxqs = min(ntxqs_per_lif, nrxqs_per_lif); + nxqs = min(nxqs, num_online_cpus()); + neqs = min(neqs_per_lif, num_online_cpus()); + +try_again: + /* interrupt usage: + * 1 for master lif adminq/notifyq + * 1 for each CPU for master lif TxRx queue pairs + * whatever's left is for RDMA queues + */ + nintrs = 1 + nxqs + neqs; + min_intrs = 2; /* adminq + 1 TxRx queue pair */ + + if (nintrs > dev_nintrs) + goto try_fewer; + + err = ionic_bus_alloc_irq_vectors(ionic, nintrs); + if (err < 0 && err != -ENOSPC) { + dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err); + return err; + } + if (err == -ENOSPC) + goto try_fewer; + + if (err != nintrs) { + ionic_bus_free_irq_vectors(ionic); + goto try_fewer; + } + + ionic->nnqs_per_lif = nnqs_per_lif; + ionic->neqs_per_lif = neqs; + ionic->ntxqs_per_lif = nxqs; + ionic->nrxqs_per_lif = nxqs; + ionic->nintrs = nintrs; + + ionic_debugfs_add_sizes(ionic); + + return 0; + +try_fewer: + if (nnqs_per_lif > 1) { + nnqs_per_lif >>= 1; + goto try_again; + } + if (neqs > 1) { + neqs >>= 1; + goto try_again; + } + if (nxqs > 1) { + nxqs >>= 1; + goto try_again; + } + dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs); + return -ENOSPC; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h new file mode 100644 index 000000000000..812190e729c2 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_LIF_H_ +#define _IONIC_LIF_H_ + +#include <linux/pci.h> +#include "ionic_rx_filter.h" + +#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ +#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ + +#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) +#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) +#define IONIC_RX_COPYBREAK_DEFAULT 256 + +struct ionic_tx_stats { + u64 dma_map_err; + u64 pkts; + u64 bytes; + u64 clean; + u64 linearize; + u64 no_csum; + u64 csum; + u64 crc32_csum; + u64 tso; + u64 frags; + u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; +}; + +struct ionic_rx_stats { + u64 dma_map_err; + u64 alloc_err; + u64 pkts; + u64 bytes; + u64 csum_none; + u64 csum_complete; + u64 csum_error; + u64 buffers_posted; +}; + +#define IONIC_QCQ_F_INITED BIT(0) +#define IONIC_QCQ_F_SG BIT(1) +#define IONIC_QCQ_F_INTR BIT(2) +#define IONIC_QCQ_F_TX_STATS BIT(3) +#define IONIC_QCQ_F_RX_STATS BIT(4) +#define IONIC_QCQ_F_NOTIFYQ BIT(5) + +struct ionic_napi_stats { + u64 poll_count; + u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; +}; + +struct ionic_q_stats { + union { + struct ionic_tx_stats tx; + struct ionic_rx_stats rx; + }; +}; + +struct ionic_qcq { + void *base; + dma_addr_t base_pa; + unsigned int total_size; + struct ionic_queue q; + struct ionic_cq cq; + struct ionic_intr_info intr; + struct napi_struct napi; + struct ionic_napi_stats napi_stats; + struct ionic_q_stats *stats; + unsigned int flags; + struct dentry *dentry; +}; + +struct ionic_qcqst { + struct ionic_qcq *qcq; + struct ionic_q_stats *stats; +}; + +#define q_to_qcq(q) container_of(q, struct ionic_qcq, q) +#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx) +#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx) +#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi) +#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq) + +enum ionic_deferred_work_type { + IONIC_DW_TYPE_RX_MODE, + IONIC_DW_TYPE_RX_ADDR_ADD, + IONIC_DW_TYPE_RX_ADDR_DEL, + IONIC_DW_TYPE_LINK_STATUS, + IONIC_DW_TYPE_LIF_RESET, +}; + +struct ionic_deferred_work { + struct list_head list; + enum ionic_deferred_work_type type; + union { + unsigned int rx_mode; + u8 addr[ETH_ALEN]; + }; +}; + +struct ionic_deferred { + spinlock_t lock; /* lock for deferred work list */ + struct list_head list; + struct work_struct work; +}; + +struct ionic_lif_sw_stats { + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 tx_tso; + u64 tx_no_csum; + u64 tx_csum; + u64 rx_csum_none; + u64 rx_csum_complete; + u64 rx_csum_error; +}; + +enum ionic_lif_state_flags { + IONIC_LIF_INITED, + IONIC_LIF_SW_DEBUG_STATS, + IONIC_LIF_UP, + IONIC_LIF_LINK_CHECK_REQUESTED, + IONIC_LIF_QUEUE_RESET, + + /* leave this as last */ + IONIC_LIF_STATE_SIZE +}; + +#define IONIC_LIF_NAME_MAX_SZ 32 +struct ionic_lif { + char name[IONIC_LIF_NAME_MAX_SZ]; + struct list_head list; + struct net_device *netdev; + DECLARE_BITMAP(state, IONIC_LIF_STATE_SIZE); + struct ionic *ionic; + bool registered; + unsigned int index; + unsigned int hw_index; + unsigned int kern_pid; + u64 __iomem *kern_dbpage; + spinlock_t adminq_lock; /* lock for AdminQ operations */ + struct ionic_qcq *adminqcq; + struct ionic_qcq *notifyqcq; + struct ionic_qcqst *txqcqs; + struct ionic_qcqst *rxqcqs; + u64 last_eid; + unsigned int neqs; + unsigned int nxqs; + unsigned int ntxq_descs; + unsigned int nrxq_descs; + u32 rx_copybreak; + unsigned int rx_mode; + u64 hw_features; + bool mc_overflow; + unsigned int nmcast; + bool uc_overflow; + unsigned int nucast; + + struct ionic_lif_info *info; + dma_addr_t info_pa; + u32 info_sz; + + u16 rss_types; + u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; + u8 *rss_ind_tbl; + dma_addr_t rss_ind_tbl_pa; + u32 rss_ind_tbl_sz; + + struct ionic_rx_filters rx_filters; + struct ionic_deferred deferred; + unsigned long *dbid_inuse; + unsigned int dbid_count; + struct dentry *dentry; + u32 rx_coalesce_usecs; + u32 flags; + struct work_struct tx_timeout_work; +}; + +#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq) +#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq) +#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) +#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) + +static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) +{ + unsigned long tlimit = jiffies + HZ; + + while (test_and_set_bit(bitname, lif->state) && + time_before(jiffies, tlimit)) + usleep_range(100, 200); + + return test_bit(bitname, lif->state); +} + +static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) +{ + u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); + u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); + + /* Div-by-zero should never be an issue, but check anyway */ + if (!div || !mult) + return 0; + + /* Round up in case usecs is close to the next hw unit */ + usecs += (div / mult) >> 1; + + /* Convert from usecs to device units */ + return (usecs * mult) / div; +} + +static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) +{ + u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); + u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); + + /* Div-by-zero should never be an issue, but check anyway */ + if (!div || !mult) + return 0; + + /* Convert from device units to usec */ + return (units * div) / mult; +} + +int ionic_lifs_alloc(struct ionic *ionic); +void ionic_lifs_free(struct ionic *ionic); +void ionic_lifs_deinit(struct ionic *ionic); +int ionic_lifs_init(struct ionic *ionic); +int ionic_lifs_register(struct ionic *ionic); +void ionic_lifs_unregister(struct ionic *ionic); +int ionic_lif_identify(struct ionic *ionic, u8 lif_type, + union ionic_lif_identity *lif_ident); +int ionic_lifs_size(struct ionic *ionic); +int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, + const u8 *key, const u32 *indir); + +int ionic_open(struct net_device *netdev); +int ionic_stop(struct net_device *netdev); +int ionic_reset_queues(struct ionic_lif *lif); + +static inline void debug_stats_txq_post(struct ionic_qcq *qcq, + struct ionic_txq_desc *desc, bool dbell) +{ + u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) + & IONIC_TXQ_DESC_NSGE_MASK); + + qcq->q.dbell_count += dbell; + + if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) + num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; + + qcq->stats->tx.sg_cntr[num_sg_elems]++; +} + +static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, + unsigned int work_done) +{ + qcq->napi_stats.poll_count++; + + if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) + work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; + + qcq->napi_stats.work_done_cntr[work_done]++; +} + +#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) +#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++) +#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++) +#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \ + debug_stats_txq_post(qcq, txdesc, dbell) +#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ + debug_stats_napi_poll(qcq, work_done) + +#endif /* _IONIC_LIF_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c new file mode 100644 index 000000000000..5ec67f3f1853 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -0,0 +1,549 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/module.h> +#include <linux/version.h> +#include <linux/netdevice.h> +#include <linux/utsname.h> + +#include "ionic.h" +#include "ionic_bus.h" +#include "ionic_lif.h" +#include "ionic_debugfs.h" + +MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION); +MODULE_AUTHOR("Pensando Systems, Inc"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IONIC_DRV_VERSION); + +static const char *ionic_error_to_str(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return "IONIC_RC_SUCCESS"; + case IONIC_RC_EVERSION: + return "IONIC_RC_EVERSION"; + case IONIC_RC_EOPCODE: + return "IONIC_RC_EOPCODE"; + case IONIC_RC_EIO: + return "IONIC_RC_EIO"; + case IONIC_RC_EPERM: + return "IONIC_RC_EPERM"; + case IONIC_RC_EQID: + return "IONIC_RC_EQID"; + case IONIC_RC_EQTYPE: + return "IONIC_RC_EQTYPE"; + case IONIC_RC_ENOENT: + return "IONIC_RC_ENOENT"; + case IONIC_RC_EINTR: + return "IONIC_RC_EINTR"; + case IONIC_RC_EAGAIN: + return "IONIC_RC_EAGAIN"; + case IONIC_RC_ENOMEM: + return "IONIC_RC_ENOMEM"; + case IONIC_RC_EFAULT: + return "IONIC_RC_EFAULT"; + case IONIC_RC_EBUSY: + return "IONIC_RC_EBUSY"; + case IONIC_RC_EEXIST: + return "IONIC_RC_EEXIST"; + case IONIC_RC_EINVAL: + return "IONIC_RC_EINVAL"; + case IONIC_RC_ENOSPC: + return "IONIC_RC_ENOSPC"; + case IONIC_RC_ERANGE: + return "IONIC_RC_ERANGE"; + case IONIC_RC_BAD_ADDR: + return "IONIC_RC_BAD_ADDR"; + case IONIC_RC_DEV_CMD: + return "IONIC_RC_DEV_CMD"; + case IONIC_RC_ERROR: + return "IONIC_RC_ERROR"; + case IONIC_RC_ERDMA: + return "IONIC_RC_ERDMA"; + default: + return "IONIC_RC_UNKNOWN"; + } +} + +static int ionic_error_to_errno(enum ionic_status_code code) +{ + switch (code) { + case IONIC_RC_SUCCESS: + return 0; + case IONIC_RC_EVERSION: + case IONIC_RC_EQTYPE: + case IONIC_RC_EQID: + case IONIC_RC_EINVAL: + return -EINVAL; + case IONIC_RC_EPERM: + return -EPERM; + case IONIC_RC_ENOENT: + return -ENOENT; + case IONIC_RC_EAGAIN: + return -EAGAIN; + case IONIC_RC_ENOMEM: + return -ENOMEM; + case IONIC_RC_EFAULT: + return -EFAULT; + case IONIC_RC_EBUSY: + return -EBUSY; + case IONIC_RC_EEXIST: + return -EEXIST; + case IONIC_RC_ENOSPC: + return -ENOSPC; + case IONIC_RC_ERANGE: + return -ERANGE; + case IONIC_RC_BAD_ADDR: + return -EFAULT; + case IONIC_RC_EOPCODE: + case IONIC_RC_EINTR: + case IONIC_RC_DEV_CMD: + case IONIC_RC_ERROR: + case IONIC_RC_ERDMA: + case IONIC_RC_EIO: + default: + return -EIO; + } +} + +static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) +{ + switch (opcode) { + case IONIC_CMD_NOP: + return "IONIC_CMD_NOP"; + case IONIC_CMD_INIT: + return "IONIC_CMD_INIT"; + case IONIC_CMD_RESET: + return "IONIC_CMD_RESET"; + case IONIC_CMD_IDENTIFY: + return "IONIC_CMD_IDENTIFY"; + case IONIC_CMD_GETATTR: + return "IONIC_CMD_GETATTR"; + case IONIC_CMD_SETATTR: + return "IONIC_CMD_SETATTR"; + case IONIC_CMD_PORT_IDENTIFY: + return "IONIC_CMD_PORT_IDENTIFY"; + case IONIC_CMD_PORT_INIT: + return "IONIC_CMD_PORT_INIT"; + case IONIC_CMD_PORT_RESET: + return "IONIC_CMD_PORT_RESET"; + case IONIC_CMD_PORT_GETATTR: + return "IONIC_CMD_PORT_GETATTR"; + case IONIC_CMD_PORT_SETATTR: + return "IONIC_CMD_PORT_SETATTR"; + case IONIC_CMD_LIF_INIT: + return "IONIC_CMD_LIF_INIT"; + case IONIC_CMD_LIF_RESET: + return "IONIC_CMD_LIF_RESET"; + case IONIC_CMD_LIF_IDENTIFY: + return "IONIC_CMD_LIF_IDENTIFY"; + case IONIC_CMD_LIF_SETATTR: + return "IONIC_CMD_LIF_SETATTR"; + case IONIC_CMD_LIF_GETATTR: + return "IONIC_CMD_LIF_GETATTR"; + case IONIC_CMD_RX_MODE_SET: + return "IONIC_CMD_RX_MODE_SET"; + case IONIC_CMD_RX_FILTER_ADD: + return "IONIC_CMD_RX_FILTER_ADD"; + case IONIC_CMD_RX_FILTER_DEL: + return "IONIC_CMD_RX_FILTER_DEL"; + case IONIC_CMD_Q_INIT: + return "IONIC_CMD_Q_INIT"; + case IONIC_CMD_Q_CONTROL: + return "IONIC_CMD_Q_CONTROL"; + case IONIC_CMD_RDMA_RESET_LIF: + return "IONIC_CMD_RDMA_RESET_LIF"; + case IONIC_CMD_RDMA_CREATE_EQ: + return "IONIC_CMD_RDMA_CREATE_EQ"; + case IONIC_CMD_RDMA_CREATE_CQ: + return "IONIC_CMD_RDMA_CREATE_CQ"; + case IONIC_CMD_RDMA_CREATE_ADMINQ: + return "IONIC_CMD_RDMA_CREATE_ADMINQ"; + case IONIC_CMD_FW_DOWNLOAD: + return "IONIC_CMD_FW_DOWNLOAD"; + case IONIC_CMD_FW_CONTROL: + return "IONIC_CMD_FW_CONTROL"; + default: + return "DEVCMD_UNKNOWN"; + } +} + +static void ionic_adminq_flush(struct ionic_lif *lif) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + + spin_lock(&lif->adminq_lock); + + while (adminq->tail != adminq->head) { + memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd)); + adminq->tail->cb = NULL; + adminq->tail->cb_arg = NULL; + adminq->tail = adminq->tail->next; + } + spin_unlock(&lif->adminq_lock); +} + +static int ionic_adminq_check_err(struct ionic_lif *lif, + struct ionic_admin_ctx *ctx, + bool timeout) +{ + struct net_device *netdev = lif->netdev; + const char *opcode_str; + const char *status_str; + int err = 0; + + if (ctx->comp.comp.status || timeout) { + opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + status_str = ionic_error_to_str(ctx->comp.comp.status); + err = timeout ? -ETIMEDOUT : + ionic_error_to_errno(ctx->comp.comp.status); + + netdev_err(netdev, "%s (%d) failed: %s (%d)\n", + opcode_str, ctx->cmd.cmd.opcode, + timeout ? "TIMEOUT" : status_str, err); + + if (timeout) + ionic_adminq_flush(lif); + } + + return err; +} + +static void ionic_adminq_cb(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_admin_ctx *ctx = cb_arg; + struct ionic_admin_comp *comp; + struct device *dev; + + if (!ctx) + return; + + comp = cq_info->cq_desc; + dev = &q->lif->netdev->dev; + + memcpy(&ctx->comp, comp, sizeof(*comp)); + + dev_dbg(dev, "comp admin queue command:\n"); + dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->comp, sizeof(ctx->comp), true); + + complete_all(&ctx->work); +} + +static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct ionic_queue *adminq = &lif->adminqcq->q; + int err = 0; + + WARN_ON(in_interrupt()); + + spin_lock(&lif->adminq_lock); + if (!ionic_q_has_space(adminq, 1)) { + err = -ENOSPC; + goto err_out; + } + + memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd)); + + dev_dbg(&lif->netdev->dev, "post admin queue command:\n"); + dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, + &ctx->cmd, sizeof(ctx->cmd), true); + + ionic_q_post(adminq, true, ionic_adminq_cb, ctx); + +err_out: + spin_unlock(&lif->adminq_lock); + + return err; +} + +int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) +{ + struct net_device *netdev = lif->netdev; + unsigned long remaining; + const char *name; + int err; + + err = ionic_adminq_post(lif, ctx); + if (err) { + name = ionic_opcode_to_str(ctx->cmd.cmd.opcode); + netdev_err(netdev, "Posting of %s (%d) failed: %d\n", + name, ctx->cmd.cmd.opcode, err); + return err; + } + + remaining = wait_for_completion_timeout(&ctx->work, + HZ * (ulong)DEVCMD_TIMEOUT); + return ionic_adminq_check_err(lif, ctx, (remaining == 0)); +} + +int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb, + ionic_cq_done_cb done_cb, void *done_arg) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = &qcq->cq; + u32 work_done, flags = 0; + + work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) +{ + struct ionic_dev *idev = &ionic->idev; + unsigned long start_time; + unsigned long max_wait; + unsigned long duration; + int opcode; + int done; + int err; + + WARN_ON(in_interrupt()); + + /* Wait for dev cmd to complete, retrying if we get EAGAIN, + * but don't wait any longer than max_seconds. + */ + max_wait = jiffies + (max_seconds * HZ); +try_again: + start_time = jiffies; + do { + done = ionic_dev_cmd_done(idev); + if (done) + break; + msleep(20); + } while (!done && time_before(jiffies, max_wait)); + duration = jiffies - start_time; + + opcode = idev->dev_cmd_regs->cmd.cmd.opcode; + dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n", + ionic_opcode_to_str(opcode), opcode, + done, duration / HZ, duration); + + if (!done && !time_before(jiffies, max_wait)) { + dev_warn(ionic->dev, "DEVCMD %s (%d) timeout after %ld secs\n", + ionic_opcode_to_str(opcode), opcode, max_seconds); + return -ETIMEDOUT; + } + + err = ionic_dev_cmd_status(&ionic->idev); + if (err) { + if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) { + dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(err), err); + + msleep(1000); + iowrite32(0, &idev->dev_cmd_regs->done); + iowrite32(1, &idev->dev_cmd_regs->doorbell); + goto try_again; + } + + dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n", + ionic_opcode_to_str(opcode), opcode, + ionic_error_to_str(err), err); + + return ionic_error_to_errno(err); + } + + return 0; +} + +int ionic_setup(struct ionic *ionic) +{ + int err; + + err = ionic_dev_setup(ionic); + if (err) + return err; + + return 0; +} + +int ionic_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + memset(ident, 0, sizeof(*ident)); + + ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX); + strncpy(ident->drv.driver_ver_str, IONIC_DRV_VERSION, + sizeof(ident->drv.driver_ver_str) - 1); + + mutex_lock(&ionic->dev_cmd_lock); + + sz = min(sizeof(ident->drv), sizeof(idev->dev_cmd_regs->data)); + memcpy_toio(&idev->dev_cmd_regs->data, &ident->drv, sz); + + ionic_dev_cmd_identify(idev, IONIC_IDENTITY_VERSION_1); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + if (!err) { + sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + if (err) + goto err_out_unmap; + + ionic_debugfs_add_ident(ionic); + + return 0; + +err_out_unmap: + return err; +} + +int ionic_init(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_init(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_reset(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_port_identify(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + mutex_lock(&ionic->dev_cmd_lock); + + ionic_dev_cmd_port_identify(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + if (!err) { + sz = min(sizeof(ident->port), sizeof(idev->dev_cmd_regs->data)); + memcpy_fromio(&ident->port, &idev->dev_cmd_regs->data, sz); + } + + mutex_unlock(&ionic->dev_cmd_lock); + + return err; +} + +int ionic_port_init(struct ionic *ionic) +{ + struct ionic_identity *ident = &ionic->ident; + struct ionic_dev *idev = &ionic->idev; + size_t sz; + int err; + + if (idev->port_info) + return 0; + + idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE); + idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz, + &idev->port_info_pa, + GFP_KERNEL); + if (!idev->port_info) { + dev_err(ionic->dev, "Failed to allocate port info, aborting\n"); + return -ENOMEM; + } + + sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data)); + + mutex_lock(&ionic->dev_cmd_lock); + + memcpy_toio(&idev->dev_cmd_regs->data, &ident->port.config, sz); + ionic_dev_cmd_port_init(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP); + (void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + + mutex_unlock(&ionic->dev_cmd_lock); + if (err) { + dev_err(ionic->dev, "Failed to init port\n"); + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + idev->port_info = NULL; + idev->port_info_pa = 0; + } + + return err; +} + +int ionic_port_reset(struct ionic *ionic) +{ + struct ionic_dev *idev = &ionic->idev; + int err; + + if (!idev->port_info) + return 0; + + mutex_lock(&ionic->dev_cmd_lock); + ionic_dev_cmd_port_reset(idev); + err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); + mutex_unlock(&ionic->dev_cmd_lock); + + dma_free_coherent(ionic->dev, idev->port_info_sz, + idev->port_info, idev->port_info_pa); + + idev->port_info = NULL; + idev->port_info_pa = 0; + + if (err) + dev_err(ionic->dev, "Failed to reset port\n"); + + return err; +} + +static int __init ionic_init_module(void) +{ + pr_info("%s %s, ver %s\n", + IONIC_DRV_NAME, IONIC_DRV_DESCRIPTION, IONIC_DRV_VERSION); + ionic_debugfs_create(); + return ionic_bus_register_driver(); +} + +static void __exit ionic_cleanup_module(void) +{ + ionic_bus_unregister_driver(); + ionic_debugfs_destroy(); + + pr_info("%s removed\n", IONIC_DRV_NAME); +} + +module_init(ionic_init_module); +module_exit(ionic_cleanup_module); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_regs.h b/drivers/net/ethernet/pensando/ionic/ionic_regs.h new file mode 100644 index 000000000000..03ee5a36472b --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_regs.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */ +/* Copyright (c) 2018-2019 Pensando Systems, Inc. All rights reserved. */ + +#ifndef IONIC_REGS_H +#define IONIC_REGS_H + +#include <linux/io.h> + +/** struct ionic_intr - interrupt control register set. + * @coal_init: coalesce timer initial value. + * @mask: interrupt mask value. + * @credits: interrupt credit count and return. + * @mask_assert: interrupt mask value on assert. + * @coal: coalesce timer time remaining. + */ +struct ionic_intr { + u32 coal_init; + u32 mask; + u32 credits; + u32 mask_assert; + u32 coal; + u32 rsvd[3]; +}; + +#define IONIC_INTR_CTRL_REGS_MAX 2048 +#define IONIC_INTR_CTRL_COAL_MAX 0x3F + +/** enum ionic_intr_mask_vals - valid values for mask and mask_assert. + * @IONIC_INTR_MASK_CLEAR: unmask interrupt. + * @IONIC_INTR_MASK_SET: mask interrupt. + */ +enum ionic_intr_mask_vals { + IONIC_INTR_MASK_CLEAR = 0, + IONIC_INTR_MASK_SET = 1, +}; + +/** enum ionic_intr_credits_bits - bitwise composition of credits values. + * @IONIC_INTR_CRED_COUNT: bit mask of credit count, no shift needed. + * @IONIC_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit. + * @IONIC_INTR_CRED_UNMASK: unmask the interrupt. + * @IONIC_INTR_CRED_RESET_COALESCE: reset the coalesce timer. + * @IONIC_INTR_CRED_REARM: unmask the and reset the timer. + */ +enum ionic_intr_credits_bits { + IONIC_INTR_CRED_COUNT = 0x7fffu, + IONIC_INTR_CRED_COUNT_SIGNED = 0xffffu, + IONIC_INTR_CRED_UNMASK = 0x10000u, + IONIC_INTR_CRED_RESET_COALESCE = 0x20000u, + IONIC_INTR_CRED_REARM = (IONIC_INTR_CRED_UNMASK | + IONIC_INTR_CRED_RESET_COALESCE), +}; + +static inline void ionic_intr_coal_init(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 coal) +{ + iowrite32(coal, &intr_ctrl[intr_idx].coal_init); +} + +static inline void ionic_intr_mask(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask); +} + +static inline void ionic_intr_credits(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 cred, u32 flags) +{ + if (WARN_ON_ONCE(cred > IONIC_INTR_CRED_COUNT)) { + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + } + + iowrite32(cred | flags, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_clean(struct ionic_intr __iomem *intr_ctrl, + int intr_idx) +{ + u32 cred; + + cred = ioread32(&intr_ctrl[intr_idx].credits); + cred &= IONIC_INTR_CRED_COUNT_SIGNED; + cred |= IONIC_INTR_CRED_RESET_COALESCE; + iowrite32(cred, &intr_ctrl[intr_idx].credits); +} + +static inline void ionic_intr_mask_assert(struct ionic_intr __iomem *intr_ctrl, + int intr_idx, u32 mask) +{ + iowrite32(mask, &intr_ctrl[intr_idx].mask_assert); +} + +/** enum ionic_dbell_bits - bitwise composition of dbell values. + * + * @IONIC_DBELL_QID_MASK: unshifted mask of valid queue id bits. + * @IONIC_DBELL_QID_SHIFT: queue id shift amount in dbell value. + * @IONIC_DBELL_QID: macro to build QID component of dbell value. + * + * @IONIC_DBELL_RING_MASK: unshifted mask of valid ring bits. + * @IONIC_DBELL_RING_SHIFT: ring shift amount in dbell value. + * @IONIC_DBELL_RING: macro to build ring component of dbell value. + * + * @IONIC_DBELL_RING_0: ring zero dbell component value. + * @IONIC_DBELL_RING_1: ring one dbell component value. + * @IONIC_DBELL_RING_2: ring two dbell component value. + * @IONIC_DBELL_RING_3: ring three dbell component value. + * + * @IONIC_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed. + */ +enum ionic_dbell_bits { + IONIC_DBELL_QID_MASK = 0xffffff, + IONIC_DBELL_QID_SHIFT = 24, + +#define IONIC_DBELL_QID(n) \ + (((u64)(n) & IONIC_DBELL_QID_MASK) << IONIC_DBELL_QID_SHIFT) + + IONIC_DBELL_RING_MASK = 0x7, + IONIC_DBELL_RING_SHIFT = 16, + +#define IONIC_DBELL_RING(n) \ + (((u64)(n) & IONIC_DBELL_RING_MASK) << IONIC_DBELL_RING_SHIFT) + + IONIC_DBELL_RING_0 = 0, + IONIC_DBELL_RING_1 = IONIC_DBELL_RING(1), + IONIC_DBELL_RING_2 = IONIC_DBELL_RING(2), + IONIC_DBELL_RING_3 = IONIC_DBELL_RING(3), + + IONIC_DBELL_INDEX_MASK = 0xffff, +}; + +static inline void ionic_dbell_ring(u64 __iomem *db_page, int qtype, u64 val) +{ + writeq(val, &db_page[qtype]); +} + +#endif /* IONIC_REGS_H */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c new file mode 100644 index 000000000000..7a093f148ee5 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_rx_filter.h" + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct device *dev = lif->ionic->dev; + + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + devm_kfree(dev, f); +} + +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_del = { + .opcode = IONIC_CMD_RX_FILTER_DEL, + .filter_id = cpu_to_le32(f->filter_id), + }, + }; + + return ionic_adminq_post_wait(lif, &ctx); +} + +int ionic_rx_filters_init(struct ionic_lif *lif) +{ + unsigned int i; + + spin_lock_init(&lif->rx_filters.lock); + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); + INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); + } + + return 0; +} + +void ionic_rx_filters_deinit(struct ionic_lif *lif) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + unsigned int i; + + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) + ionic_rx_filter_free(lif, f); + } +} + +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx) +{ + struct device *dev = lif->ionic->dev; + struct ionic_rx_filter_add_cmd *ac; + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + ac = &ctx->cmd.rx_filter_add; + + switch (le16_to_cpu(ac->match)) { + case IONIC_RX_FILTER_MATCH_VLAN: + key = le16_to_cpu(ac->vlan.vlan); + break; + case IONIC_RX_FILTER_MATCH_MAC: + key = *(u32 *)ac->mac.addr; + break; + case IONIC_RX_FILTER_MATCH_MAC_VLAN: + key = le16_to_cpu(ac->mac_vlan.vlan); + break; + default: + return -EINVAL; + } + + f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL); + if (!f) + return -ENOMEM; + + f->flow_id = flow_id; + f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->rxq_index = rxq_index; + memcpy(&f->cmd, ac, sizeof(f->cmd)); + + INIT_HLIST_NODE(&f->by_hash); + INIT_HLIST_NODE(&f->by_id); + + spin_lock_bh(&lif->rx_filters.lock); + + key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + hlist_add_head(&f->by_hash, head); + + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + + spin_unlock_bh(&lif->rx_filters.lock); + + return 0; +} + +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN) + continue; + if (le16_to_cpu(f->cmd.vlan.vlan) == vid) + return f; + } + + return NULL; +} + +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, + const u8 *addr) +{ + struct ionic_rx_filter *f; + struct hlist_head *head; + unsigned int key; + + key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS); + head = &lif->rx_filters.by_hash[key]; + + hlist_for_each_entry(f, head, by_hash) { + if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC) + continue; + if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0) + return f; + } + + return NULL; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h new file mode 100644 index 000000000000..b6aec9c19918 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_RX_FILTER_H_ +#define _IONIC_RX_FILTER_H_ + +#define IONIC_RXQ_INDEX_ANY (0xFFFF) +struct ionic_rx_filter { + u32 flow_id; + u32 filter_id; + u16 rxq_index; + struct ionic_rx_filter_add_cmd cmd; + struct hlist_node by_hash; + struct hlist_node by_id; +}; + +#define IONIC_RX_FILTER_HASH_BITS 10 +#define IONIC_RX_FILTER_HLISTS BIT(IONIC_RX_FILTER_HASH_BITS) +#define IONIC_RX_FILTER_HLISTS_MASK (IONIC_RX_FILTER_HLISTS - 1) +struct ionic_rx_filters { + spinlock_t lock; /* filter list lock */ + struct hlist_head by_hash[IONIC_RX_FILTER_HLISTS]; /* by skb hash */ + struct hlist_head by_id[IONIC_RX_FILTER_HLISTS]; /* by filter_id */ +}; + +void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filter_del(struct ionic_lif *lif, struct ionic_rx_filter *f); +int ionic_rx_filters_init(struct ionic_lif *lif); +void ionic_rx_filters_deinit(struct ionic_lif *lif); +int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, + u32 hash, struct ionic_admin_ctx *ctx); +struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); +struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); + +#endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c new file mode 100644 index 000000000000..e2907884f843 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_stats.h" + +static const struct ionic_stat_desc ionic_lif_stats_desc[] = { + IONIC_LIF_STAT_DESC(tx_packets), + IONIC_LIF_STAT_DESC(tx_bytes), + IONIC_LIF_STAT_DESC(rx_packets), + IONIC_LIF_STAT_DESC(rx_bytes), + IONIC_LIF_STAT_DESC(tx_tso), + IONIC_LIF_STAT_DESC(tx_no_csum), + IONIC_LIF_STAT_DESC(tx_csum), + IONIC_LIF_STAT_DESC(rx_csum_none), + IONIC_LIF_STAT_DESC(rx_csum_complete), + IONIC_LIF_STAT_DESC(rx_csum_error), +}; + +static const struct ionic_stat_desc ionic_tx_stats_desc[] = { + IONIC_TX_STAT_DESC(pkts), + IONIC_TX_STAT_DESC(bytes), + IONIC_TX_STAT_DESC(clean), + IONIC_TX_STAT_DESC(dma_map_err), + IONIC_TX_STAT_DESC(linearize), + IONIC_TX_STAT_DESC(frags), +}; + +static const struct ionic_stat_desc ionic_rx_stats_desc[] = { + IONIC_RX_STAT_DESC(pkts), + IONIC_RX_STAT_DESC(bytes), + IONIC_RX_STAT_DESC(dma_map_err), + IONIC_RX_STAT_DESC(alloc_err), + IONIC_RX_STAT_DESC(csum_none), + IONIC_RX_STAT_DESC(csum_complete), + IONIC_RX_STAT_DESC(csum_error), +}; + +static const struct ionic_stat_desc ionic_txq_stats_desc[] = { + IONIC_TX_Q_STAT_DESC(stop), + IONIC_TX_Q_STAT_DESC(wake), + IONIC_TX_Q_STAT_DESC(drop), + IONIC_TX_Q_STAT_DESC(dbell_count), +}; + +static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = { + IONIC_CQ_STAT_DESC(compl_count), +}; + +static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = { + IONIC_INTR_STAT_DESC(rearm_count), +}; + +static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = { + IONIC_NAPI_STAT_DESC(poll_count), +}; + +#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) +#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc) +#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc) +#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc) +#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc) +#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc) +#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc) + +#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues) + +static void ionic_get_lif_stats(struct ionic_lif *lif, + struct ionic_lif_sw_stats *stats) +{ + struct ionic_tx_stats *tstats; + struct ionic_rx_stats *rstats; + struct ionic_qcq *txqcq; + struct ionic_qcq *rxqcq; + int q_num; + + memset(stats, 0, sizeof(*stats)); + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + txqcq = lif_to_txqcq(lif, q_num); + if (txqcq && txqcq->stats) { + tstats = &txqcq->stats->tx; + stats->tx_packets += tstats->pkts; + stats->tx_bytes += tstats->bytes; + stats->tx_tso += tstats->tso; + stats->tx_no_csum += tstats->no_csum; + stats->tx_csum += tstats->csum; + } + + rxqcq = lif_to_rxqcq(lif, q_num); + if (rxqcq && rxqcq->stats) { + rstats = &rxqcq->stats->rx; + stats->rx_packets += rstats->pkts; + stats->rx_bytes += rstats->bytes; + stats->rx_csum_none += rstats->csum_none; + stats->rx_csum_complete += rstats->csum_complete; + stats->rx_csum_error += rstats->csum_error; + } + } +} + +static u64 ionic_sw_stats_get_count(struct ionic_lif *lif) +{ + u64 total = 0; + + /* lif stats */ + total += IONIC_NUM_LIF_STATS; + + /* tx stats */ + total += MAX_Q(lif) * IONIC_NUM_TX_STATS; + + /* rx stats */ + total += MAX_Q(lif) * IONIC_NUM_RX_STATS; + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + /* tx debug stats */ + total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_TX_Q_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_MAX_NUM_SG_CNTR); + + /* rx debug stats */ + total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS + + IONIC_NUM_DBG_INTR_STATS + + IONIC_NUM_DBG_NAPI_STATS + + IONIC_MAX_NUM_NAPI_CNTR); + } + + return total; +} + +static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf) +{ + int i, q_num; + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, ionic_lif_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, "tx_%d_%s", + q_num, ionic_tx_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_%s", + q_num, + ionic_txq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_cq_%s", + q_num, + ionic_dbg_cq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_intr_%s", + q_num, + ionic_dbg_intr_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "txq_%d_sg_cntr_%d", + q_num, i); + *buf += ETH_GSTRING_LEN; + } + } + } + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rx_%d_%s", + q_num, ionic_rx_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_cq_%s", + q_num, + ionic_dbg_cq_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_intr_%s", + q_num, + ionic_dbg_intr_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_napi_%s", + q_num, + ionic_dbg_napi_stats_desc[i].name); + *buf += ETH_GSTRING_LEN; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + snprintf(*buf, ETH_GSTRING_LEN, + "rxq_%d_napi_work_done_%d", + q_num, i); + *buf += ETH_GSTRING_LEN; + } + } + } +} + +static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf) +{ + struct ionic_lif_sw_stats lif_stats; + struct ionic_qcq *txqcq, *rxqcq; + int i, q_num; + + ionic_get_lif_stats(lif, &lif_stats); + + for (i = 0; i < IONIC_NUM_LIF_STATS; i++) { + **buf = IONIC_READ_STAT64(&lif_stats, &ionic_lif_stats_desc[i]); + (*buf)++; + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + txqcq = lif_to_txqcq(lif, q_num); + + for (i = 0; i < IONIC_NUM_TX_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->stats->tx, + &ionic_tx_stats_desc[i]); + (*buf)++; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->q, + &ionic_txq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&txqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { + **buf = txqcq->stats->tx.sg_cntr[i]; + (*buf)++; + } + } + } + + for (q_num = 0; q_num < MAX_Q(lif); q_num++) { + rxqcq = lif_to_rxqcq(lif, q_num); + + for (i = 0; i < IONIC_NUM_RX_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->stats->rx, + &ionic_rx_stats_desc[i]); + (*buf)++; + } + + if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) { + for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->cq, + &ionic_dbg_cq_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->intr, + &ionic_dbg_intr_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { + **buf = IONIC_READ_STAT64(&rxqcq->napi_stats, + &ionic_dbg_napi_stats_desc[i]); + (*buf)++; + } + for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { + **buf = rxqcq->napi_stats.work_done_cntr[i]; + (*buf)++; + } + } + } +} + +const struct ionic_stats_group_intf ionic_stats_groups[] = { + /* SW Stats group */ + { + .get_strings = ionic_sw_stats_get_strings, + .get_values = ionic_sw_stats_get_values, + .get_count = ionic_sw_stats_get_count, + }, + /* Add more stat groups here */ +}; + +const int ionic_num_stats_grps = ARRAY_SIZE(ionic_stats_groups); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.h b/drivers/net/ethernet/pensando/ionic/ionic_stats.h new file mode 100644 index 000000000000..d2c1122a2c6e --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_STATS_H_ +#define _IONIC_STATS_H_ + +#define IONIC_STAT_TO_OFFSET(type, stat_name) (offsetof(type, stat_name)) + +#define IONIC_STAT_DESC(type, stat_name) { \ + .name = #stat_name, \ + .offset = IONIC_STAT_TO_OFFSET(type, stat_name) \ +} + +#define IONIC_LIF_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_lif_sw_stats, stat_name) + +#define IONIC_TX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_tx_stats, stat_name) + +#define IONIC_RX_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_rx_stats, stat_name) + +#define IONIC_TX_Q_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_queue, stat_name) + +#define IONIC_CQ_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_cq, stat_name) + +#define IONIC_INTR_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_intr_info, stat_name) + +#define IONIC_NAPI_STAT_DESC(stat_name) \ + IONIC_STAT_DESC(struct ionic_napi_stats, stat_name) + +/* Interface structure for a particalar stats group */ +struct ionic_stats_group_intf { + void (*get_strings)(struct ionic_lif *lif, u8 **buf); + void (*get_values)(struct ionic_lif *lif, u64 **buf); + u64 (*get_count)(struct ionic_lif *lif); +}; + +extern const struct ionic_stats_group_intf ionic_stats_groups[]; +extern const int ionic_num_stats_grps; + +#define IONIC_READ_STAT64(base_ptr, desc_ptr) \ + (*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset))) + +struct ionic_stat_desc { + char name[ETH_GSTRING_LEN]; + u64 offset; +}; + +#endif /* _IONIC_STATS_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c new file mode 100644 index 000000000000..ab6663d94f42 --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_vlan.h> +#include <net/ip6_checksum.h> + +#include "ionic.h" +#include "ionic_lif.h" +#include "ionic_txrx.h" + +static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg); + +static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); + + ionic_q_post(q, ring_dbell, cb_func, cb_arg); +} + +static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, + ionic_desc_cb cb_func, void *cb_arg) +{ + ionic_q_post(q, ring_dbell, cb_func, cb_arg); + + DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); +} + +static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) +{ + return netdev_get_tx_queue(q->lif->netdev, q->index); +} + +static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct sk_buff *skb) +{ + struct ionic_rxq_desc *old = desc_info->desc; + struct ionic_rxq_desc *new = q->head->desc; + + new->addr = old->addr; + new->len = old->len; + + ionic_rxq_post(q, true, ionic_rx_clean, skb); +} + +static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, struct sk_buff **skb) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_rxq_desc *desc = desc_info->desc; + struct net_device *netdev = q->lif->netdev; + struct device *dev = q->lif->ionic->dev; + struct sk_buff *new_skb; + u16 clen, dlen; + + clen = le16_to_cpu(comp->len); + dlen = le16_to_cpu(desc->len); + if (clen > q->lif->rx_copybreak) { + dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr), + dlen, DMA_FROM_DEVICE); + return false; + } + + new_skb = netdev_alloc_skb_ip_align(netdev, clen); + if (!new_skb) { + dma_unmap_single(dev, (dma_addr_t)le64_to_cpu(desc->addr), + dlen, DMA_FROM_DEVICE); + return false; + } + + dma_sync_single_for_cpu(dev, (dma_addr_t)le64_to_cpu(desc->addr), + clen, DMA_FROM_DEVICE); + + memcpy(new_skb->data, (*skb)->data, clen); + + ionic_rx_recycle(q, desc_info, *skb); + *skb = new_skb; + + return true; +} + +static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_qcq *qcq = q_to_qcq(q); + struct sk_buff *skb = cb_arg; + struct ionic_rx_stats *stats; + struct net_device *netdev; + + stats = q_to_rx_stats(q); + netdev = q->lif->netdev; + + if (comp->status) { + ionic_rx_recycle(q, desc_info, skb); + return; + } + + if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) { + /* no packet processing while resetting */ + ionic_rx_recycle(q, desc_info, skb); + return; + } + + stats->pkts++; + stats->bytes += le16_to_cpu(comp->len); + + ionic_rx_copybreak(q, desc_info, cq_info, &skb); + + skb_put(skb, le16_to_cpu(comp->len)); + skb->protocol = eth_type_trans(skb, netdev); + + skb_record_rx_queue(skb, q->index); + + if (netdev->features & NETIF_F_RXHASH) { + switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { + case IONIC_PKT_TYPE_IPV4: + case IONIC_PKT_TYPE_IPV6: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L3); + break; + case IONIC_PKT_TYPE_IPV4_TCP: + case IONIC_PKT_TYPE_IPV6_TCP: + case IONIC_PKT_TYPE_IPV4_UDP: + case IONIC_PKT_TYPE_IPV6_UDP: + skb_set_hash(skb, le32_to_cpu(comp->rss_hash), + PKT_HASH_TYPE_L4); + break; + } + } + + if (netdev->features & NETIF_F_RXCSUM) { + if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__wsum)le16_to_cpu(comp->csum); + stats->csum_complete++; + } + } else { + stats->csum_none++; + } + + if ((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) || + (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)) + stats->csum_error++; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(comp->vlan_tci)); + } + + napi_gro_receive(&qcq->napi, skb); +} + +static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) +{ + struct ionic_rxq_comp *comp = cq_info->cq_desc; + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + + if (!color_match(comp->pkt_type_color, cq->done_color)) + return false; + + /* check for empty queue */ + if (q->tail->index == q->head->index) + return false; + + desc_info = q->tail; + if (desc_info->index != le16_to_cpu(comp->comp_index)) + return false; + + q->tail = desc_info->next; + + /* clean the related q entry, only one per qc completion */ + ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); + + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + + return true; +} + +static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) +{ + u32 work_done = 0; + + while (ionic_rx_service(rxcq, rxcq->tail)) { + if (rxcq->tail->last) + rxcq->done_color = !rxcq->done_color; + rxcq->tail = rxcq->tail->next; + DEBUG_STATS_CQE_CNT(rxcq); + + if (++work_done >= limit) + break; + } + + return work_done; +} + +void ionic_rx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + + work_done = ionic_rx_walk_cq(cq, cq->num_descs); + + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, IONIC_INTR_CRED_RESET_COALESCE); +} + +static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len, + dma_addr_t *dma_addr) +{ + struct ionic_lif *lif = q->lif; + struct ionic_rx_stats *stats; + struct net_device *netdev; + struct sk_buff *skb; + struct device *dev; + + netdev = lif->netdev; + dev = lif->ionic->dev; + stats = q_to_rx_stats(q); + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) { + net_warn_ratelimited("%s: SKB alloc failed on %s!\n", + netdev->name, q->name); + stats->alloc_err++; + return NULL; + } + + *dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_kfree_skb(skb); + net_warn_ratelimited("%s: DMA single map failed on %s!\n", + netdev->name, q->name); + stats->dma_map_err++; + return NULL; + } + + return skb; +} + +#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 2) - 1) + +void ionic_rx_fill(struct ionic_queue *q) +{ + struct net_device *netdev = q->lif->netdev; + struct ionic_rxq_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + bool ring_doorbell; + unsigned int len; + unsigned int i; + + len = netdev->mtu + ETH_HLEN; + + for (i = ionic_q_space_avail(q); i; i--) { + skb = ionic_rx_skb_alloc(q, len, &dma_addr); + if (!skb) + return; + + desc = q->head->desc; + desc->addr = cpu_to_le64(dma_addr); + desc->len = cpu_to_le16(len); + desc->opcode = IONIC_RXQ_DESC_OPCODE_SIMPLE; + + ring_doorbell = ((q->head->index + 1) & + IONIC_RX_RING_DOORBELL_STRIDE) == 0; + + ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb); + } +} + +static void ionic_rx_fill_cb(void *arg) +{ + ionic_rx_fill(arg); +} + +void ionic_rx_empty(struct ionic_queue *q) +{ + struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *cur; + struct ionic_rxq_desc *desc; + + for (cur = q->tail; cur != q->head; cur = cur->next) { + desc = cur->desc; + dma_unmap_single(dev, le64_to_cpu(desc->addr), + le16_to_cpu(desc->len), DMA_FROM_DEVICE); + dev_kfree_skb(cur->cb_arg); + cur->cb_arg = NULL; + } +} + +int ionic_rx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *rxcq = napi_to_cq(napi); + unsigned int qi = rxcq->bound_q->index; + struct ionic_dev *idev; + struct ionic_lif *lif; + struct ionic_cq *txcq; + u32 work_done = 0; + u32 flags = 0; + + lif = rxcq->bound_q->lif; + idev = &lif->ionic->idev; + txcq = &lif->txqcqs[qi].qcq->cq; + + ionic_tx_flush(txcq); + + work_done = ionic_rx_walk_cq(rxcq, budget); + + if (work_done) + ionic_rx_fill_cb(rxcq->bound_q); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(rxcq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + + dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA single map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + return 0; + } + return dma_addr; +} + +static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, + size_t offset, size_t len) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + + dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + net_warn_ratelimited("%s: DMA frag map failed on %s!\n", + q->lif->netdev->name, q->name); + stats->dma_map_err++; + } + return dma_addr; +} + +static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, void *cb_arg) +{ + struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc; + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = desc_info->desc; + struct device *dev = q->lif->ionic->dev; + u8 opcode, flags, nsge; + u16 queue_index; + unsigned int i; + u64 addr; + + decode_txq_desc_cmd(le64_to_cpu(desc->cmd), + &opcode, &flags, &nsge, &addr); + + /* use unmap_single only if either this is not TSO, + * or this is first descriptor of a TSO + */ + if (opcode != IONIC_TXQ_DESC_OPCODE_TSO || + flags & IONIC_TXQ_DESC_FLAG_TSO_SOT) + dma_unmap_single(dev, (dma_addr_t)addr, + le16_to_cpu(desc->len), DMA_TO_DEVICE); + else + dma_unmap_page(dev, (dma_addr_t)addr, + le16_to_cpu(desc->len), DMA_TO_DEVICE); + + for (i = 0; i < nsge; i++, elem++) + dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr), + le16_to_cpu(elem->len), DMA_TO_DEVICE); + + if (cb_arg) { + struct sk_buff *skb = cb_arg; + u32 len = skb->len; + + queue_index = skb_get_queue_mapping(skb); + if (unlikely(__netif_subqueue_stopped(q->lif->netdev, + queue_index))) { + netif_wake_subqueue(q->lif->netdev, queue_index); + q->wake++; + } + dev_kfree_skb_any(skb); + stats->clean++; + netdev_tx_completed_queue(q_to_ndq(q), 1, len); + } +} + +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_txq_comp *comp = cq->tail->cq_desc; + struct ionic_dev *idev = &cq->lif->ionic->idev; + struct ionic_queue *q = cq->bound_q; + struct ionic_desc_info *desc_info; + unsigned int work_done = 0; + + /* walk the completed cq entries */ + while (work_done < cq->num_descs && + color_match(comp->color, cq->done_color)) { + + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = q->tail; + q->tail = desc_info->next; + ionic_tx_clean(q, desc_info, cq->tail, + desc_info->cb_arg); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (desc_info->index != le16_to_cpu(comp->comp_index)); + + if (cq->tail->last) + cq->done_color = !cq->done_color; + + cq->tail = cq->tail->next; + comp = cq->tail->cq_desc; + DEBUG_STATS_CQE_CNT(cq); + + work_done++; + } + + if (work_done) + ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, + work_done, 0); +} + +static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + inner_ip_hdr(skb)->check = 0; + inner_tcp_hdr(skb)->check = + ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr, + inner_ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + inner_tcp_hdr(skb)->check = + ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr, + &inner_ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err) + return err; + + if (skb->protocol == cpu_to_be16(ETH_P_IP)) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + return 0; +} + +static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, + struct sk_buff *skb, + dma_addr_t addr, u8 nsge, u16 len, + unsigned int hdrlen, unsigned int mss, + bool outer_csum, + u16 vlan_tci, bool has_vlan, + bool start, bool done) +{ + u8 flags = 0; + u64 cmd; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; + flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(len); + desc->vlan_tci = cpu_to_le16(vlan_tci); + desc->hdr_len = cpu_to_le16(hdrlen); + desc->mss = cpu_to_le16(mss); + + if (done) { + skb_tx_timestamp(skb); + netdev_tx_sent_queue(q_to_ndq(q), skb->len); + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + } else { + ionic_txq_post(q, false, ionic_tx_clean, NULL); + } +} + +static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, + struct ionic_txq_sg_elem **elem) +{ + struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + struct ionic_txq_desc *desc = q->head->desc; + + *elem = sg_desc->elems; + return desc; +} + +static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_desc_info *abort = q->head; + struct device *dev = q->lif->ionic->dev; + struct ionic_desc_info *rewind = abort; + struct ionic_txq_sg_elem *elem; + struct ionic_txq_desc *desc; + unsigned int frag_left = 0; + unsigned int offset = 0; + unsigned int len_left; + dma_addr_t desc_addr; + unsigned int hdrlen; + unsigned int nfrags; + unsigned int seglen; + u64 total_bytes = 0; + u64 total_pkts = 0; + unsigned int left; + unsigned int len; + unsigned int mss; + skb_frag_t *frag; + bool start, done; + bool outer_csum; + bool has_vlan; + u16 desc_len; + u8 desc_nsge; + u16 vlan_tci; + bool encap; + int err; + + mss = skb_shinfo(skb)->gso_size; + nfrags = skb_shinfo(skb)->nr_frags; + len_left = skb->len - skb_headlen(skb); + outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) || + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); + has_vlan = !!skb_vlan_tag_present(skb); + vlan_tci = skb_vlan_tag_get(skb); + encap = skb->encapsulation; + + /* Preload inner-most TCP csum field with IP pseudo hdr + * calculated with IP length set to zero. HW will later + * add in length to each TCP segment resulting from the TSO. + */ + + if (encap) + err = ionic_tx_tcp_inner_pseudo_csum(skb); + else + err = ionic_tx_tcp_pseudo_csum(skb); + if (err) + return err; + + if (encap) + hdrlen = skb_inner_transport_header(skb) - skb->data + + inner_tcp_hdrlen(skb); + else + hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); + + seglen = hdrlen + mss; + left = skb_headlen(skb); + + desc = ionic_tx_tso_next(q, &elem); + start = true; + + /* Chop skb->data up into desc segments */ + + while (left > 0) { + len = min(seglen, left); + frag_left = seglen - len; + desc_addr = ionic_tx_map_single(q, skb->data + offset, len); + if (dma_mapping_error(dev, desc_addr)) + goto err_out_abort; + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, + desc_addr, desc_nsge, desc_len, + hdrlen, mss, + outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + seglen = mss; + } + + /* Chop skb frags into desc segments */ + + for (frag = skb_shinfo(skb)->frags; len_left; frag++) { + offset = 0; + left = skb_frag_size(frag); + len_left -= left; + nfrags--; + stats->frags++; + + while (left > 0) { + if (frag_left > 0) { + len = min(frag_left, left); + frag_left -= len; + elem->addr = + cpu_to_le64(ionic_tx_map_frag(q, frag, + offset, len)); + if (dma_mapping_error(dev, elem->addr)) + goto err_out_abort; + elem->len = cpu_to_le16(len); + elem++; + desc_nsge++; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, desc_addr, + desc_nsge, desc_len, + hdrlen, mss, outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + } else { + len = min(mss, left); + frag_left = mss - len; + desc_addr = ionic_tx_map_frag(q, frag, + offset, len); + if (dma_mapping_error(dev, desc_addr)) + goto err_out_abort; + desc_len = len; + desc_nsge = 0; + left -= len; + offset += len; + if (nfrags > 0 && frag_left > 0) + continue; + done = (nfrags == 0 && left == 0); + ionic_tx_tso_post(q, desc, skb, desc_addr, + desc_nsge, desc_len, + hdrlen, mss, outer_csum, + vlan_tci, has_vlan, + start, done); + total_pkts++; + total_bytes += start ? len : len + hdrlen; + desc = ionic_tx_tso_next(q, &elem); + start = false; + } + } + } + + stats->pkts += total_pkts; + stats->bytes += total_bytes; + stats->tso++; + + return 0; + +err_out_abort: + while (rewind->desc != q->head->desc) { + ionic_tx_clean(q, rewind, NULL, NULL); + rewind = rewind->next; + } + q->head = abort; + + return -ENOMEM; +} + +static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = q->head->desc; + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL, + flags, skb_shinfo(skb)->nr_frags, dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(skb_headlen(skb)); + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); + desc->csum_offset = cpu_to_le16(skb->csum_offset); + + if (skb->csum_not_inet) + stats->crc32_csum++; + else + stats->csum++; + + return 0; +} + +static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct ionic_txq_desc *desc = q->head->desc; + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + bool has_vlan; + u8 flags = 0; + bool encap; + u64 cmd; + + has_vlan = !!skb_vlan_tag_present(skb); + encap = skb->encapsulation; + + dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + + flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; + flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, + flags, skb_shinfo(skb)->nr_frags, dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(skb_headlen(skb)); + desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); + + stats->no_csum++; + + return 0; +} + +static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; + unsigned int len_left = skb->len - skb_headlen(skb); + struct ionic_txq_sg_elem *elem = sg_desc->elems; + struct ionic_tx_stats *stats = q_to_tx_stats(q); + struct device *dev = q->lif->ionic->dev; + dma_addr_t dma_addr; + skb_frag_t *frag; + u16 len; + + for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) { + len = skb_frag_size(frag); + elem->len = cpu_to_le16(len); + dma_addr = ionic_tx_map_frag(q, frag, 0, len); + if (dma_mapping_error(dev, dma_addr)) + return -ENOMEM; + elem->addr = cpu_to_le64(dma_addr); + len_left -= len; + stats->frags++; + } + + return 0; +} + +static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + int err; + + /* set up the initial descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + err = ionic_tx_calc_csum(q, skb); + else + err = ionic_tx_calc_no_csum(q, skb); + if (err) + return err; + + /* add frags */ + err = ionic_tx_skb_frags(q, skb); + if (err) + return err; + + skb_tx_timestamp(skb); + stats->pkts++; + stats->bytes += skb->len; + + netdev_tx_sent_queue(q_to_ndq(q), skb->len); + ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); + + return 0; +} + +static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) +{ + struct ionic_tx_stats *stats = q_to_tx_stats(q); + int err; + + /* If TSO, need roundup(skb->len/mss) descs */ + if (skb_is_gso(skb)) + return (skb->len / skb_shinfo(skb)->gso_size) + 1; + + /* If non-TSO, just need 1 desc and nr_frags sg elems */ + if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS) + return 1; + + /* Too many frags, so linearize */ + err = skb_linearize(skb); + if (err) + return err; + + stats->linearize++; + + /* Need 1 desc and zero sg elems */ + return 1; +} + +static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) +{ + int stopped = 0; + + if (unlikely(!ionic_q_has_space(q, ndescs))) { + netif_stop_subqueue(q->lif->netdev, q->index); + q->stop++; + stopped = 1; + + /* Might race with ionic_tx_clean, check again */ + smp_rmb(); + if (ionic_q_has_space(q, ndescs)) { + netif_wake_subqueue(q->lif->netdev, q->index); + stopped = 0; + } + } + + return stopped; +} + +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + u16 queue_index = skb_get_queue_mapping(skb); + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *q; + int ndescs; + int err; + + if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (unlikely(!lif_to_txqcq(lif, queue_index))) + queue_index = 0; + q = lif_to_txq(lif, queue_index); + + ndescs = ionic_tx_descs_needed(q, skb); + if (ndescs < 0) + goto err_out_drop; + + if (unlikely(ionic_maybe_stop_tx(q, ndescs))) + return NETDEV_TX_BUSY; + + if (skb_is_gso(skb)) + err = ionic_tx_tso(q, skb); + else + err = ionic_tx(q, skb); + + if (err) + goto err_out_drop; + + /* Stop the queue if there aren't descriptors for the next packet. + * Since our SG lists per descriptor take care of most of the possible + * fragmentation, we don't need to have many descriptors available. + */ + ionic_maybe_stop_tx(q, 4); + + return NETDEV_TX_OK; + +err_out_drop: + q->stop++; + q->drop++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h new file mode 100644 index 000000000000..53775c62c85a --- /dev/null +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ + +#ifndef _IONIC_TXRX_H_ +#define _IONIC_TXRX_H_ + +void ionic_rx_flush(struct ionic_cq *cq); +void ionic_tx_flush(struct ionic_cq *cq); + +void ionic_rx_fill(struct ionic_queue *q); +void ionic_rx_empty(struct ionic_queue *q); +int ionic_rx_napi(struct napi_struct *napi, int budget); +netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); + +#endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 2c5d3f5b84dd..786b158bd305 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2189,6 +2189,9 @@ static int rocker_router_fib_event(struct notifier_block *nb, struct rocker_fib_event_work *fib_work; struct fib_notifier_info *info = ptr; + if (!net_eq(info->net, &init_net)) + return NOTIFY_DONE; + if (info->family != AF_INET) return NOTIFY_DONE; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index cb2ea8facd8d..3ab24fdccd3b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1345,7 +1345,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); - if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { + if (ipv4_is_multicast(info->key.u.ipv4.dst)) { NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], "Remote IPv4 address cannot be Multicast"); return -EINVAL; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 778d27d1fb15..ec23c166e67b 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -757,6 +757,7 @@ struct r8152 { u32 msg_enable; u32 tx_qlen; u32 coalesce; + u32 advertising; u32 rx_buf_sz; u32 rx_copybreak; u32 rx_pending; @@ -790,6 +791,13 @@ enum tx_csum_stat { TX_CSUM_NONE }; +#define RTL_ADVERTISED_10_HALF BIT(0) +#define RTL_ADVERTISED_10_FULL BIT(1) +#define RTL_ADVERTISED_100_HALF BIT(2) +#define RTL_ADVERTISED_100_FULL BIT(3) +#define RTL_ADVERTISED_1000_HALF BIT(4) +#define RTL_ADVERTISED_1000_FULL BIT(5) + /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -3801,90 +3809,117 @@ static void rtl8153b_disable(struct r8152 *tp) r8153b_aldps_en(tp, true); } -static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) +static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, + u32 advertising) { - u16 bmcr, anar, gbcr; enum spd_duplex speed_duplex; + u16 bmcr; int ret = 0; - anar = r8152_mdio_read(tp, MII_ADVERTISE); - anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); - if (tp->mii.supports_gmii) { - gbcr = r8152_mdio_read(tp, MII_CTRL1000); - gbcr &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - } else { - gbcr = 0; - } - if (autoneg == AUTONEG_DISABLE) { - if (speed == SPEED_10) { - bmcr = 0; - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = FORCE_10M_HALF; - } else if (speed == SPEED_100) { - bmcr = BMCR_SPEED100; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = FORCE_100M_HALF; - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - bmcr = BMCR_SPEED1000; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - speed_duplex = NWAY_1000M_FULL; - } else { - ret = -EINVAL; - goto out; - } + if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL) + return -EINVAL; - if (duplex == DUPLEX_FULL) { - bmcr |= BMCR_FULLDPLX; - if (speed != SPEED_1000) - speed_duplex++; - } - } else { - if (speed == SPEED_10) { + switch (speed) { + case SPEED_10: + bmcr = BMCR_SPEED10; if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - speed_duplex = NWAY_10M_FULL; + bmcr |= BMCR_FULLDPLX; + speed_duplex = FORCE_10M_FULL; } else { - anar |= ADVERTISE_10HALF; - speed_duplex = NWAY_10M_HALF; + speed_duplex = FORCE_10M_HALF; } - } else if (speed == SPEED_100) { + break; + case SPEED_100: + bmcr = BMCR_SPEED100; if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - speed_duplex = NWAY_100M_FULL; + bmcr |= BMCR_FULLDPLX; + speed_duplex = FORCE_100M_FULL; } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - speed_duplex = NWAY_100M_HALF; + speed_duplex = FORCE_100M_HALF; } - } else if (speed == SPEED_1000 && tp->mii.supports_gmii) { - if (duplex == DUPLEX_FULL) { - anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; - anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; - gbcr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - } else { - anar |= ADVERTISE_10HALF; - anar |= ADVERTISE_100HALF; - gbcr |= ADVERTISE_1000HALF; + break; + case SPEED_1000: + if (tp->mii.supports_gmii) { + bmcr = BMCR_SPEED1000 | BMCR_FULLDPLX; + speed_duplex = NWAY_1000M_FULL; + break; } - speed_duplex = NWAY_1000M_FULL; - } else { + /* fall through */ + default: ret = -EINVAL; goto out; } + if (duplex == DUPLEX_FULL) + tp->mii.full_duplex = 1; + else + tp->mii.full_duplex = 0; + + tp->mii.force_media = 1; + } else { + u16 anar, tmp1; + u32 support; + + support = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + + if (tp->mii.supports_gmii) + support |= RTL_ADVERTISED_1000_FULL; + + if (!(advertising & support)) + return -EINVAL; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + tmp1 = anar & ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + if (advertising & RTL_ADVERTISED_10_HALF) { + tmp1 |= ADVERTISE_10HALF; + speed_duplex = NWAY_10M_HALF; + } + if (advertising & RTL_ADVERTISED_10_FULL) { + tmp1 |= ADVERTISE_10FULL; + speed_duplex = NWAY_10M_FULL; + } + + if (advertising & RTL_ADVERTISED_100_HALF) { + tmp1 |= ADVERTISE_100HALF; + speed_duplex = NWAY_100M_HALF; + } + if (advertising & RTL_ADVERTISED_100_FULL) { + tmp1 |= ADVERTISE_100FULL; + speed_duplex = NWAY_100M_FULL; + } + + if (anar != tmp1) { + r8152_mdio_write(tp, MII_ADVERTISE, tmp1); + tp->mii.advertising = tmp1; + } + + if (tp->mii.supports_gmii) { + u16 gbcr; + + gbcr = r8152_mdio_read(tp, MII_CTRL1000); + tmp1 = gbcr & ~(ADVERTISE_1000FULL | + ADVERTISE_1000HALF); + + if (advertising & RTL_ADVERTISED_1000_FULL) { + tmp1 |= ADVERTISE_1000FULL; + speed_duplex = NWAY_1000M_FULL; + } + + if (gbcr != tmp1) + r8152_mdio_write(tp, MII_CTRL1000, tmp1); + } + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + tp->mii.force_media = 0; } if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; - if (tp->mii.supports_gmii) - r8152_mdio_write(tp, MII_CTRL1000, gbcr); - - r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); switch (tp->version) { @@ -4122,7 +4157,8 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work) tp->rtl_ops.hw_phy_cfg(tp); - rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex, + tp->advertising); mutex_unlock(&tp->control); @@ -4840,20 +4876,46 @@ static int rtl8152_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct r8152 *tp = netdev_priv(dev); + u32 advertising = 0; int ret; ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out; + if (test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_10_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_10_FULL; + + if (test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_100_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_100_FULL; + + if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_1000_HALF; + + if (test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + cmd->link_modes.advertising)) + advertising |= RTL_ADVERTISED_1000_FULL; + mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->base.autoneg, cmd->base.speed, - cmd->base.duplex); + cmd->base.duplex, advertising); if (!ret) { tp->autoneg = cmd->base.autoneg; tp->speed = cmd->base.speed; tp->duplex = cmd->base.duplex; + tp->advertising = advertising; } mutex_unlock(&tp->control); @@ -5568,7 +5630,13 @@ static int rtl8152_probe(struct usb_interface *intf, tp->mii.phy_id = R8152_PHY_ID; tp->autoneg = AUTONEG_ENABLE; - tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->speed = SPEED_100; + tp->advertising = RTL_ADVERTISED_10_HALF | RTL_ADVERTISED_10_FULL | + RTL_ADVERTISED_100_HALF | RTL_ADVERTISED_100_FULL; + if (tp->mii.supports_gmii) { + tp->speed = SPEED_1000; + tp->advertising |= RTL_ADVERTISED_1000_FULL; + } tp->duplex = DUPLEX_FULL; tp->rx_copybreak = RTL8152_RXFG_HEADSZ; diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index f01623aef2f7..9b3c720a31b1 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,7 +169,8 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); -struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, + u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 9daa1119ea42..01219f2902bf 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -15,7 +15,8 @@ struct can_rx_offload { struct net_device *dev; - unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf, + unsigned int (*mailbox_read)(struct can_rx_offload *offload, + struct can_frame *cf, u32 *timestamp, unsigned int mb); struct sk_buff_head skb_queue; @@ -29,9 +30,13 @@ struct can_rx_offload { bool inc; }; -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload); -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); +int can_rx_offload_add_timestamp(struct net_device *dev, + struct can_rx_offload *offload); +int can_rx_offload_add_fifo(struct net_device *dev, + struct can_rx_offload *offload, + unsigned int weight); +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, + u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); int can_rx_offload_queue_sorted(struct can_rx_offload *offload, struct sk_buff *skb, u32 timestamp); diff --git a/include/net/devlink.h b/include/net/devlink.h index 13523b0a0642..460bc629d1a4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -458,6 +458,13 @@ enum devlink_param_generic_id { /* Maker of the board */ #define DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE "board.manufacture" +/* Part number, identifier of asic design */ +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +/* Revision of asic design */ +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" + +/* Overall FW version */ +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" /* Control processor FW version */ #define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT "fw.mgmt" /* Data path microcode controlling high-speed packet processing */ diff --git a/include/net/tls.h b/include/net/tls.h index ec3c3ed2c6c3..c664e6dba0d1 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -275,16 +275,6 @@ struct tls_context { struct proto *sk_proto; void (*sk_destruct)(struct sock *sk); - void (*sk_proto_close)(struct sock *sk, long timeout); - - int (*setsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - unsigned int optlen); - int (*getsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - int __user *optlen); - int (*hash)(struct sock *sk); - void (*unhash)(struct sock *sk); union tls_crypto_context crypto_send; union tls_crypto_context crypto_recv; @@ -376,13 +366,9 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); -int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -void tls_device_free_resources_tx(struct sock *sk); -void tls_device_init(void); -void tls_device_cleanup(void); int tls_tx_records(struct sock *sk, int flags); struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, @@ -659,7 +645,6 @@ int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type); void tls_register_device(struct tls_device *device); void tls_unregister_device(struct tls_device *device); -int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); int decrypt_skb(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgout); struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); @@ -672,9 +657,40 @@ int tls_sw_fallback_init(struct sock *sk, struct tls_offload_context_tx *offload_ctx, struct tls_crypto_info *crypto_info); +#ifdef CONFIG_TLS_DEVICE +void tls_device_init(void); +void tls_device_cleanup(void); +int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); +void tls_device_free_resources_tx(struct sock *sk); int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); - void tls_device_offload_cleanup_rx(struct sock *sk); void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); +int tls_device_decrypted(struct sock *sk, struct sk_buff *skb); +#else +static inline void tls_device_init(void) {} +static inline void tls_device_cleanup(void) {} +static inline int +tls_set_device_offload(struct sock *sk, struct tls_context *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void tls_device_free_resources_tx(struct sock *sk) {} + +static inline int +tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} +static inline void +tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} + +static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) +{ + return 0; +} +#endif #endif /* _TLS_OFFLOAD_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index dc1583a1fb8a..335283dbe9b3 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -391,7 +391,7 @@ static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) if (ipa->sa.sa_family == AF_INET6) return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr); else - return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); + return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #else /* !IS_ENABLED(CONFIG_IPV6) */ @@ -403,7 +403,7 @@ static inline bool vxlan_addr_any(const union vxlan_addr *ipa) static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa) { - return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr)); + return ipv4_is_multicast(ipa->sin.sin_addr.s_addr); } #endif /* IS_ENABLED(CONFIG_IPV6) */ diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c index 4bb418313720..3286f9d527d3 100644 --- a/net/atm/mpoa_caches.c +++ b/net/atm/mpoa_caches.c @@ -180,8 +180,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc) static void in_cache_put(in_cache_entry *entry) { if (refcount_dec_and_test(&entry->use)) { - memset(entry, 0, sizeof(in_cache_entry)); - kfree(entry); + kzfree(entry); } } @@ -416,8 +415,7 @@ static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, static void eg_cache_put(eg_cache_entry *entry) { if (refcount_dec_and_test(&entry->use)) { - memset(entry, 0, sizeof(eg_cache_entry)); - kfree(entry); + kzfree(entry); } } diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 2977137c28eb..1a5bf3fa4578 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -559,7 +559,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, ret = -EDESTADDRREQ; break; } - if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) || + if (ipv4_is_multicast(sin->sin_addr.s_addr) || sin->sin_addr.s_addr == htonl(INADDR_BROADCAST)) { ret = -EINVAL; break; @@ -593,7 +593,7 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, addr4 = sin6->sin6_addr.s6_addr32[3]; if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(addr4))) { + ipv4_is_multicast(addr4)) { ret = -EPROTOTYPE; break; } diff --git a/net/rds/bind.c b/net/rds/bind.c index 0f4398e7f2a7..6dbb763bc1fd 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -181,7 +181,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (addr_len < sizeof(struct sockaddr_in) || sin->sin_addr.s_addr == htonl(INADDR_ANY) || sin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + ipv4_is_multicast(sin->sin_addr.s_addr)) return -EINVAL; ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &v6addr); binding_addr = &v6addr; @@ -206,7 +206,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) addr4 = sin6->sin6_addr.s6_addr32[3]; if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(addr4))) + ipv4_is_multicast(addr4)) return -EINVAL; } /* The scope ID must be specified for link local address. */ diff --git a/net/rds/send.c b/net/rds/send.c index 9ce552abf9e9..82dcd8b84fe7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1144,7 +1144,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) case AF_INET: if (usin->sin_addr.s_addr == htonl(INADDR_ANY) || usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(usin->sin_addr.s_addr))) { + ipv4_is_multicast(usin->sin_addr.s_addr)) { ret = -EINVAL; goto out; } @@ -1175,7 +1175,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) addr4 = sin6->sin6_addr.s6_addr32[3]; if (addr4 == htonl(INADDR_ANY) || addr4 == htonl(INADDR_BROADCAST) || - IN_MULTICAST(ntohl(addr4))) { + ipv4_is_multicast(addr4)) { ret = -EINVAL; goto out; } diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 810645b5c086..93b58fde99b7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -299,7 +299,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; int speed = SPEED_10; - int port_rate = -1; + int port_rate; int err; err = __ethtool_get_link_ksettings(dev, &ecmd); diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 550fdf18d3b3..3b7f721c023b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -228,14 +228,11 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, ret = 0; err_free_raw: - memset(rawkey, 0, keybytes); - kfree(rawkey); + kzfree(rawkey); err_free_out: - memset(outblockdata, 0, blocksize); - kfree(outblockdata); + kzfree(outblockdata); err_free_in: - memset(inblockdata, 0, blocksize); - kfree(inblockdata); + kzfree(inblockdata); err_free_cipher: crypto_free_sync_skcipher(cipher); err_return: diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index e188139f0464..41c106e45f01 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -159,12 +159,8 @@ static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) spin_lock_irqsave(&ctx->lock, flags); info = ctx->retransmit_hint; - if (info && !before(acked_seq, info->end_seq)) { + if (info && !before(acked_seq, info->end_seq)) ctx->retransmit_hint = NULL; - list_del(&info->list); - destroy_record(info); - deleted_records++; - } list_for_each_entry_safe(info, temp, &ctx->records_list, list) { if (before(acked_seq, info->end_seq)) @@ -838,22 +834,18 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) struct net_device *netdev; char *iv, *rec_seq; struct sk_buff *skb; - int rc = -EINVAL; __be64 rcd_sn; + int rc; if (!ctx) - goto out; + return -EINVAL; - if (ctx->priv_ctx_tx) { - rc = -EEXIST; - goto out; - } + if (ctx->priv_ctx_tx) + return -EEXIST; start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); - if (!start_marker_record) { - rc = -ENOMEM; - goto out; - } + if (!start_marker_record) + return -ENOMEM; offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); if (!offload_ctx) { @@ -939,17 +931,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) if (skb) TCP_SKB_CB(skb)->eor = 1; - /* We support starting offload on multiple sockets - * concurrently, so we only need a read lock here. - * This lock must precede get_netdev_for_sock to prevent races between - * NETDEV_DOWN and setsockopt. - */ - down_read(&device_offload_lock); netdev = get_netdev_for_sock(sk); if (!netdev) { pr_err_ratelimited("%s: netdev not found\n", __func__); rc = -EINVAL; - goto release_lock; + goto disable_cad; } if (!(netdev->features & NETIF_F_HW_TLS_TX)) { @@ -960,10 +946,15 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) /* Avoid offloading if the device is down * We don't want to offload new flows after * the NETDEV_DOWN event + * + * device_offload_lock is taken in tls_devices's NETDEV_DOWN + * handler thus protecting from the device going down before + * ctx was added to tls_device_list. */ + down_read(&device_offload_lock); if (!(netdev->flags & IFF_UP)) { rc = -EINVAL; - goto release_netdev; + goto release_lock; } ctx->priv_ctx_tx = offload_ctx; @@ -971,9 +962,10 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) &ctx->crypto_send.info, tcp_sk(sk)->write_seq); if (rc) - goto release_netdev; + goto release_lock; tls_device_attach(ctx, sk, netdev); + up_read(&device_offload_lock); /* following this assignment tls_is_sk_tx_device_offloaded * will return true and the context might be accessed @@ -981,13 +973,14 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) */ smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); dev_put(netdev); - up_read(&device_offload_lock); - goto out; -release_netdev: - dev_put(netdev); + return 0; + release_lock: up_read(&device_offload_lock); +release_netdev: + dev_put(netdev); +disable_cad: clean_acked_data_disable(inet_csk(sk)); crypto_free_aead(offload_ctx->aead_send); free_rec_seq: @@ -999,7 +992,6 @@ free_offload_ctx: ctx->priv_ctx_tx = NULL; free_marker_record: kfree(start_marker_record); -out: return rc; } @@ -1012,17 +1004,10 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) return -EOPNOTSUPP; - /* We support starting offload on multiple sockets - * concurrently, so we only need a read lock here. - * This lock must precede get_netdev_for_sock to prevent races between - * NETDEV_DOWN and setsockopt. - */ - down_read(&device_offload_lock); netdev = get_netdev_for_sock(sk); if (!netdev) { pr_err_ratelimited("%s: netdev not found\n", __func__); - rc = -EINVAL; - goto release_lock; + return -EINVAL; } if (!(netdev->features & NETIF_F_HW_TLS_RX)) { @@ -1033,16 +1018,21 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) /* Avoid offloading if the device is down * We don't want to offload new flows after * the NETDEV_DOWN event + * + * device_offload_lock is taken in tls_devices's NETDEV_DOWN + * handler thus protecting from the device going down before + * ctx was added to tls_device_list. */ + down_read(&device_offload_lock); if (!(netdev->flags & IFF_UP)) { rc = -EINVAL; - goto release_netdev; + goto release_lock; } context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); if (!context) { rc = -ENOMEM; - goto release_netdev; + goto release_lock; } context->resync_nh_reset = 1; @@ -1058,7 +1048,11 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) goto free_sw_resources; tls_device_attach(ctx, sk, netdev); - goto release_netdev; + up_read(&device_offload_lock); + + dev_put(netdev); + + return 0; free_sw_resources: up_read(&device_offload_lock); @@ -1066,10 +1060,10 @@ free_sw_resources: down_read(&device_offload_lock); release_ctx: ctx->priv_ctx_rx = NULL; -release_netdev: - dev_put(netdev); release_lock: up_read(&device_offload_lock); +release_netdev: + dev_put(netdev); return rc; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 277f7c209fed..ac88877dcade 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -286,19 +286,14 @@ static void tls_sk_proto_cleanup(struct sock *sk, kfree(ctx->tx.rec_seq); kfree(ctx->tx.iv); tls_sw_release_resources_tx(sk); -#ifdef CONFIG_TLS_DEVICE } else if (ctx->tx_conf == TLS_HW) { tls_device_free_resources_tx(sk); -#endif } if (ctx->rx_conf == TLS_SW) tls_sw_release_resources_rx(sk); - -#ifdef CONFIG_TLS_DEVICE - if (ctx->rx_conf == TLS_HW) + else if (ctx->rx_conf == TLS_HW) tls_device_offload_cleanup_rx(sk); -#endif } static void tls_sk_proto_close(struct sock *sk, long timeout) @@ -331,7 +326,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) tls_sw_strparser_done(ctx); if (ctx->rx_conf == TLS_SW) tls_sw_free_ctx_rx(ctx); - ctx->sk_proto_close(sk, timeout); + ctx->sk_proto->close(sk, timeout); if (free_ctx) tls_ctx_free(sk, ctx); @@ -451,7 +446,8 @@ static int tls_getsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->getsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->getsockopt(sk, level, + optname, optval, optlen); return do_tls_getsockopt(sk, optname, optval, optlen); } @@ -536,26 +532,18 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, } if (tx) { -#ifdef CONFIG_TLS_DEVICE rc = tls_set_device_offload(sk, ctx); conf = TLS_HW; if (rc) { -#else - { -#endif rc = tls_set_sw_offload(sk, ctx, 1); if (rc) goto err_crypto_info; conf = TLS_SW; } } else { -#ifdef CONFIG_TLS_DEVICE rc = tls_set_device_offload_rx(sk, ctx); conf = TLS_HW; if (rc) { -#else - { -#endif rc = tls_set_sw_offload(sk, ctx, 0); if (rc) goto err_crypto_info; @@ -609,7 +597,8 @@ static int tls_setsockopt(struct sock *sk, int level, int optname, struct tls_context *ctx = tls_get_ctx(sk); if (level != SOL_TLS) - return ctx->setsockopt(sk, level, optname, optval, optlen); + return ctx->sk_proto->setsockopt(sk, level, optname, optval, + optlen); return do_tls_setsockopt(sk, optname, optval, optlen); } @@ -624,10 +613,7 @@ static struct tls_context *create_ctx(struct sock *sk) return NULL; rcu_assign_pointer(icsk->icsk_ulp_data, ctx); - ctx->setsockopt = sk->sk_prot->setsockopt; - ctx->getsockopt = sk->sk_prot->getsockopt; - ctx->sk_proto_close = sk->sk_prot->close; - ctx->unhash = sk->sk_prot->unhash; + ctx->sk_proto = sk->sk_prot; return ctx; } @@ -683,9 +669,6 @@ static int tls_hw_prot(struct sock *sk) spin_unlock_bh(&device_spinlock); tls_build_proto(sk); - ctx->hash = sk->sk_prot->hash; - ctx->unhash = sk->sk_prot->unhash; - ctx->sk_proto_close = sk->sk_prot->close; ctx->sk_destruct = sk->sk_destruct; sk->sk_destruct = tls_hw_sk_destruct; ctx->rx_conf = TLS_HW_RECORD; @@ -717,7 +700,7 @@ static void tls_hw_unhash(struct sock *sk) } } spin_unlock_bh(&device_spinlock); - ctx->unhash(sk); + ctx->sk_proto->unhash(sk); } static int tls_hw_hash(struct sock *sk) @@ -726,7 +709,7 @@ static int tls_hw_hash(struct sock *sk) struct tls_device *dev; int err; - err = ctx->hash(sk); + err = ctx->sk_proto->hash(sk); spin_lock_bh(&device_spinlock); list_for_each_entry(dev, &device_list, dev_list) { if (dev->hash) { @@ -816,7 +799,6 @@ static int tls_init(struct sock *sk) ctx->tx_conf = TLS_BASE; ctx->rx_conf = TLS_BASE; - ctx->sk_proto = sk->sk_prot; update_sk_prot(sk, ctx); out: write_unlock_bh(&sk->sk_callback_lock); @@ -828,12 +810,10 @@ static void tls_update(struct sock *sk, struct proto *p) struct tls_context *ctx; ctx = tls_get_ctx(sk); - if (likely(ctx)) { - ctx->sk_proto_close = p->close; + if (likely(ctx)) ctx->sk_proto = p; - } else { + else sk->sk_prot = p; - } } static int tls_get_info(const struct sock *sk, struct sk_buff *skb) @@ -927,9 +907,7 @@ static int __init tls_register(void) tls_sw_proto_ops = inet_stream_ops; tls_sw_proto_ops.splice_read = tls_sw_splice_read; -#ifdef CONFIG_TLS_DEVICE tls_device_init(); -#endif tcp_register_ulp(&tcp_tls_ulp_ops); return 0; @@ -938,9 +916,7 @@ static int __init tls_register(void) static void __exit tls_unregister(void) { tcp_unregister_ulp(&tcp_tls_ulp_ops); -#ifdef CONFIG_TLS_DEVICE tls_device_cleanup(); -#endif } module_init(tls_register); diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 91d21b048a9b..c2b5e0d2ba1a 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1489,13 +1489,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, int pad, err = 0; if (!ctx->decrypted) { -#ifdef CONFIG_TLS_DEVICE if (tls_ctx->rx_conf == TLS_HW) { err = tls_device_decrypted(sk, skb); if (err < 0) return err; } -#endif + /* Still not decrypted after tls_device */ if (!ctx->decrypted) { err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, @@ -2014,10 +2013,9 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) ret = -EINVAL; goto read_failure; } -#ifdef CONFIG_TLS_DEVICE + tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE, TCP_SKB_CB(skb)->seq + rxm->offset); -#endif return data_len + TLS_HEADER_SIZE; read_failure: diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 94cc0fa3e848..5bb70c692b1e 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -307,8 +307,13 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, spin_unlock_bh(&vvs->rx_lock); - /* We send a credit update only when the space available seen - * by the transmitter is less than VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + /* To reduce the number of credit update messages, + * don't update credits as long as lots of space is available. + * Note: the limit chosen here is arbitrary. Setting the limit + * too high causes extra messages. Too low causes transmitter + * stalls. As stalls are in theory more expensive than extra + * messages, we set the limit to a high value. TODO: experiment + * with different values. */ if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { virtio_transport_send_credit_update(vsk, |