diff options
77 files changed, 2450 insertions, 1957 deletions
diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp index 2363ad810ddb..d378f57c1b73 100644 --- a/Documentation/ABI/testing/sysfs-ptp +++ b/Documentation/ABI/testing/sysfs-ptp @@ -33,6 +33,13 @@ Description: frequency adjustment value (a positive integer) in parts per billion. +What: /sys/class/ptp/ptpN/max_vclocks +Date: May 2021 +Contact: Yangbo Lu <[email protected]> +Description: + This file contains the maximum number of ptp vclocks. + Write integer to re-configure it. + What: /sys/class/ptp/ptpN/n_alarms Date: September 2010 Contact: Richard Cochran <[email protected]> @@ -61,6 +68,19 @@ Description: This file contains the number of programmable pins offered by the PTP hardware clock. +What: /sys/class/ptp/ptpN/n_vclocks +Date: May 2021 +Contact: Yangbo Lu <[email protected]> +Description: + This file contains the number of virtual PTP clocks in + use. By default, the value is 0 meaning that only the + physical clock is in use. Setting the value creates + the corresponding number of virtual clocks and causes + the physical clock to become free running. Setting the + value back to 0 deletes the virtual clocks and + switches the physical clock back to normal, adjustable + operation. + What: /sys/class/ptp/ptpN/pins Date: March 2014 Contact: Richard Cochran <[email protected]> diff --git a/Documentation/devicetree/bindings/net/gpmc-eth.txt b/Documentation/devicetree/bindings/net/gpmc-eth.txt index f7da3d73ca1b..32821066a85b 100644 --- a/Documentation/devicetree/bindings/net/gpmc-eth.txt +++ b/Documentation/devicetree/bindings/net/gpmc-eth.txt @@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt For the properties relevant to the ethernet controller connected to the GPMC refer to the binding documentation of the device. For example, the documentation -for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt +for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml Child nodes need to specify the GPMC bus address width using the "bank-width" property but is possible that an ethernet controller also has a property to diff --git a/Documentation/devicetree/bindings/net/smsc,lan9115.yaml b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml new file mode 100644 index 000000000000..f86667cbcca8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/smsc,lan9115.yaml @@ -0,0 +1,110 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller + +maintainers: + - Shawn Guo <[email protected]> + +allOf: + - $ref: ethernet-controller.yaml# + +properties: + compatible: + oneOf: + - const: smsc,lan9115 + - items: + - enum: + - smsc,lan89218 + - smsc,lan9117 + - smsc,lan9118 + - smsc,lan9220 + - smsc,lan9221 + - const: smsc,lan9115 + + reg: + maxItems: 1 + + reg-shift: true + + reg-io-width: + enum: [ 2, 4 ] + default: 2 + + interrupts: + minItems: 1 + items: + - description: + LAN interrupt line + - description: + Optional PME (power management event) interrupt that is able to wake + up the host system with a 50ms pulse on network activity + + clocks: + maxItems: 1 + + phy-mode: true + + smsc,irq-active-high: + type: boolean + description: Indicates the IRQ polarity is active-high + + smsc,irq-push-pull: + type: boolean + description: Indicates the IRQ type is push-pull + + smsc,force-internal-phy: + type: boolean + description: Forces SMSC LAN controller to use internal PHY + + smsc,force-external-phy: + type: boolean + description: Forces SMSC LAN controller to use external PHY + + smsc,save-mac-address: + type: boolean + description: + Indicates that MAC address needs to be saved before resetting the + controller + + reset-gpios: + maxItems: 1 + description: + A GPIO line connected to the RESET (active low) signal of the device. + On many systems this is wired high so the device goes out of reset at + power-on, but if it is under program control, this optional GPIO can + wake up in response to it. + + vdd33a-supply: + description: 3.3V analog power supply + + vddvario-supply: + description: IO logic power supply + +required: + - compatible + - reg + - interrupts + +# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*", +# "gpmc,*", ...) to be found, that actually depend on the compatible value of +# the parent node. +additionalProperties: true + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + ethernet@f4000000 { + compatible = "smsc,lan9220", "smsc,lan9115"; + reg = <0xf4000000 0x2000000>; + phy-mode = "mii"; + interrupt-parent = <&gpio1>; + interrupts = <31>, <32>; + reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; + reg-io-width = <4>; + smsc,irq-push-pull; + }; diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt deleted file mode 100644 index acfafc8e143c..000000000000 --- a/Documentation/devicetree/bindings/net/smsc911x.txt +++ /dev/null @@ -1,43 +0,0 @@ -* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller - -Required properties: -- compatible : Should be "smsc,lan<model>", "smsc,lan9115" -- reg : Address and length of the io space for SMSC LAN -- interrupts : one or two interrupt specifiers - - The first interrupt is the SMSC LAN interrupt line - - The second interrupt (if present) is the PME (power - management event) interrupt that is able to wake up the host - system with a 50ms pulse on network activity -- phy-mode : See ethernet.txt file in the same directory - -Optional properties: -- reg-shift : Specify the quantity to shift the register offsets by -- reg-io-width : Specify the size (in bytes) of the IO accesses that - should be performed on the device. Valid value for SMSC LAN is - 2 or 4. If it's omitted or invalid, the size would be 2. -- smsc,irq-active-high : Indicates the IRQ polarity is active-high -- smsc,irq-push-pull : Indicates the IRQ type is push-pull -- smsc,force-internal-phy : Forces SMSC LAN controller to use - internal PHY -- smsc,force-external-phy : Forces SMSC LAN controller to use - external PHY -- smsc,save-mac-address : Indicates that mac address needs to be saved - before resetting the controller -- reset-gpios : a GPIO line connected to the RESET (active low) signal - of the device. On many systems this is wired high so the device goes - out of reset at power-on, but if it is under program control, this - optional GPIO can wake up in response to it. -- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies - -Examples: - -lan9220@f4000000 { - compatible = "smsc,lan9220", "smsc,lan9115"; - reg = <0xf4000000 0x2000000>; - phy-mode = "mii"; - interrupt-parent = <&gpio1>; - interrupts = <31>, <32>; - reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; - reg-io-width = <4>; - smsc,irq-push-pull; -}; diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 6ea91e41593f..c86628e6a235 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -212,6 +212,7 @@ Userspace to kernel: ``ETHTOOL_MSG_FEC_SET`` set FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET`` get standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info ===================================== ================================ Kernel to userspace: @@ -250,6 +251,7 @@ Kernel to userspace: ``ETHTOOL_MSG_FEC_NTF`` FEC settings ``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM ``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics + ``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info ======================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example: etherStatsPkts512to1023Octets 512 1023 ============================= ==== ==== +PHC_VCLOCKS_GET +=============== + +Query device PHC virtual clocks information. + +Request contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header + ==================================== ====== ========================== + +Kernel response contents: + + ==================================== ====== ========================== + ``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header + ``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number + ``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array + ==================================== ====== ========================== + Request translation =================== @@ -1575,4 +1596,5 @@ are netlink only. n/a ``ETHTOOL_MSG_CABLE_TEST_ACT`` n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET`` + n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET`` =================================== ===================================== diff --git a/Documentation/networking/tipc.rst b/Documentation/networking/tipc.rst index 76775f24cdc8..ab63d298cca2 100644 --- a/Documentation/networking/tipc.rst +++ b/Documentation/networking/tipc.rst @@ -4,10 +4,125 @@ Linux Kernel TIPC ================= -TIPC (Transparent Inter Process Communication) is a protocol that is -specially designed for intra-cluster communication. +Introduction +============ -For more information about TIPC, see http://tipc.sourceforge.net. +TIPC (Transparent Inter Process Communication) is a protocol that is specially +designed for intra-cluster communication. It can be configured to transmit +messages either on UDP or directly across Ethernet. Message delivery is +sequence guaranteed, loss free and flow controlled. Latency times are shorter +than with any other known protocol, while maximal throughput is comparable to +that of TCP. + +TIPC Features +------------- + +- Cluster wide IPC service + + Have you ever wished you had the convenience of Unix Domain Sockets even when + transmitting data between cluster nodes? Where you yourself determine the + addresses you want to bind to and use? Where you don't have to perform DNS + lookups and worry about IP addresses? Where you don't have to start timers + to monitor the continuous existence of peer sockets? And yet without the + downsides of that socket type, such as the risk of lingering inodes? + + Welcome to the Transparent Inter Process Communication service, TIPC in short, + which gives you all of this, and a lot more. + +- Service Addressing + + A fundamental concept in TIPC is that of Service Addressing which makes it + possible for a programmer to chose his own address, bind it to a server + socket and let client programs use only that address for sending messages. + +- Service Tracking + + A client wanting to wait for the availability of a server, uses the Service + Tracking mechanism to subscribe for binding and unbinding/close events for + sockets with the associated service address. + + The service tracking mechanism can also be used for Cluster Topology Tracking, + i.e., subscribing for availability/non-availability of cluster nodes. + + Likewise, the service tracking mechanism can be used for Cluster Connectivity + Tracking, i.e., subscribing for up/down events for individual links between + cluster nodes. + +- Transmission Modes + + Using a service address, a client can send datagram messages to a server socket. + + Using the same address type, it can establish a connection towards an accepting + server socket. + + It can also use a service address to create and join a Communication Group, + which is the TIPC manifestation of a brokerless message bus. + + Multicast with very good performance and scalability is available both in + datagram mode and in communication group mode. + +- Inter Node Links + + Communication between any two nodes in a cluster is maintained by one or two + Inter Node Links, which both guarantee data traffic integrity and monitor + the peer node's availability. + +- Cluster Scalability + + By applying the Overlapping Ring Monitoring algorithm on the inter node links + it is possible to scale TIPC clusters up to 1000 nodes with a maintained + neighbor failure discovery time of 1-2 seconds. For smaller clusters this + time can be made much shorter. + +- Neighbor Discovery + + Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP + multicast, when any of those services are available. If not, configured peer + IP addresses can be used. + +- Configuration + + When running TIPC in single node mode no configuration whatsoever is needed. + When running in cluster mode TIPC must as a minimum be given a node address + (before Linux 4.17) and told which interface to attach to. The "tipc" + configuration tool makes is possible to add and maintain many more + configuration parameters. + +- Performance + + TIPC message transfer latency times are better than in any other known protocol. + Maximal byte throughput for inter-node connections is still somewhat lower than + for TCP, while they are superior for intra-node and inter-container throughput + on the same host. + +- Language Support + + The TIPC user API has support for C, Python, Perl, Ruby, D and Go. + +More Information +---------------- + +- How to set up TIPC: + + http://tipc.io/getting_started.html + +- How to program with TIPC: + + http://tipc.io/programming.html + +- How to contribute to TIPC: + +- http://tipc.io/contacts.html + +- More details about TIPC specification: + + http://tipc.io/protocol.html + + +Implementation +============== + +TIPC is implemented as a kernel module in net/tipc/ directory. TIPC Base Types --------------- diff --git a/MAINTAINERS b/MAINTAINERS index 88449b7a4c95..ca7287ee775b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14890,6 +14890,13 @@ F: drivers/net/phy/dp83640* F: drivers/ptp/* F: include/linux/ptp_cl* +PTP VIRTUAL CLOCK SUPPORT +M: Yangbo Lu <[email protected]> +S: Maintained +F: drivers/ptp/ptp_vclock.c +F: net/ethtool/phc_vclocks.c + PTRACE SUPPORT M: Oleg Nesterov <[email protected]> S: Maintained diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts index dace8ffeb991..0a4ffd10c484 100644 --- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts +++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts @@ -581,7 +581,7 @@ * EBI2. This has a 25MHz chrystal next to it, so no * clocking is needed. */ - ethernet-ebi2@2,0 { + ethernet@2,0 { compatible = "smsc,lan9221", "smsc,lan9115"; reg = <2 0x0 0x100>; /* @@ -598,8 +598,6 @@ phy-mode = "mii"; reg-io-width = <2>; smsc,force-external-phy; - /* IRQ on edge falling = active low */ - smsc,irq-active-low; smsc,irq-push-pull; /* diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index a77124bc1f4b..709660cb38f8 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -20,15 +20,6 @@ config CAIF_TTY identified as N_CAIF. When this ldisc is opened from user space it will redirect the TTY's traffic into the CAIF stack. -config CAIF_HSI - tristate "CAIF HSI transport driver" - depends on CAIF - default n - help - The CAIF low level driver for CAIF over HSI. - Be aware that if you enable this then you also need to - enable a low-level HSI driver. - config CAIF_VIRTIO tristate "CAIF virtio transport driver" depends on CAIF && HAS_DMA diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index b1918c8c126c..97f664f8016c 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG # Serial interface obj-$(CONFIG_CAIF_TTY) += caif_serial.o -# HSI interface -obj-$(CONFIG_CAIF_HSI) += caif_hsi.o - # Virtio interface obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c deleted file mode 100644 index 3d63b15bbaa1..000000000000 --- a/drivers/net/caif/caif_hsi.c +++ /dev/null @@ -1,1454 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson - * Dmitry.Tarnyagin / [email protected] - */ - -#define pr_fmt(fmt) KBUILD_MODNAME fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/device.h> -#include <linux/netdevice.h> -#include <linux/string.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/if_arp.h> -#include <linux/timer.h> -#include <net/rtnetlink.h> -#include <linux/pkt_sched.h> -#include <net/caif/caif_layer.h> -#include <net/caif/caif_hsi.h> - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Martensson"); -MODULE_DESCRIPTION("CAIF HSI driver"); - -/* Returns the number of padding bytes for alignment. */ -#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ - (((pow)-((x)&((pow)-1))))) - -static const struct cfhsi_config hsi_default_config = { - - /* Inactivity timeout on HSI, ms */ - .inactivity_timeout = HZ, - - /* Aggregation timeout (ms) of zero means no aggregation is done*/ - .aggregation_timeout = 1, - - /* - * HSI link layer flow-control thresholds. - * Threshold values for the HSI packet queue. Flow-control will be - * asserted when the number of packets exceeds q_high_mark. It will - * not be de-asserted before the number of packets drops below - * q_low_mark. - * Warning: A high threshold value might increase throughput but it - * will at the same time prevent channel prioritization and increase - * the risk of flooding the modem. The high threshold should be above - * the low. - */ - .q_high_mark = 100, - .q_low_mark = 50, - - /* - * HSI padding options. - * Warning: must be a base of 2 (& operation used) and can not be zero ! - */ - .head_align = 4, - .tail_align = 4, -}; - -#define ON 1 -#define OFF 0 - -static LIST_HEAD(cfhsi_list); - -static void cfhsi_inactivity_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Schedule power down work queue. */ - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_down_work); -} - -static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, - const struct sk_buff *skb, - int direction) -{ - struct caif_payload_info *info; - int hpad, tpad, len; - - info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - len = skb->len + hpad + tpad; - - if (direction > 0) - cfhsi->aggregation_len += len; - else if (direction < 0) - cfhsi->aggregation_len -= len; -} - -static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) -{ - int i; - - if (cfhsi->cfg.aggregation_timeout == 0) - return true; - - for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { - if (cfhsi->qhead[i].qlen) - return true; - } - - /* TODO: Use aggregation_len instead */ - if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) - return true; - - return false; -} - -static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - int i; - - for (i = 0; i < CFHSI_PRIO_LAST; ++i) { - skb = skb_dequeue(&cfhsi->qhead[i]); - if (skb) - break; - } - - return skb; -} - -static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) -{ - int i, len = 0; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - len += skb_queue_len(&cfhsi->qhead[i]); - return len; -} - -static void cfhsi_abort_tx(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - - for (;;) { - spin_lock_bh(&cfhsi->lock); - skb = cfhsi_dequeue(cfhsi); - if (!skb) - break; - - cfhsi->ndev->stats.tx_errors++; - cfhsi->ndev->stats.tx_dropped++; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - kfree_skb(skb); - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); -} - -static int cfhsi_flush_fifo(struct cfhsi *cfhsi) -{ - char buffer[32]; /* Any reasonable value */ - size_t fifo_occupancy; - int ret; - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - do { - ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy); - if (ret) { - netdev_warn(cfhsi->ndev, - "%s: can't get FIFO occupancy: %d.\n", - __func__, ret); - break; - } else if (!fifo_occupancy) - /* No more data, exitting normally */ - break; - - fifo_occupancy = min(sizeof(buffer), fifo_occupancy); - set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, - cfhsi->ops); - if (ret) { - clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - netdev_warn(cfhsi->ndev, - "%s: can't read data: %d.\n", - __func__, ret); - break; - } - - ret = 5 * HZ; - ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, - !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); - - if (ret < 0) { - netdev_warn(cfhsi->ndev, - "%s: can't wait for flush complete: %d.\n", - __func__, ret); - break; - } else if (!ret) { - ret = -ETIMEDOUT; - netdev_warn(cfhsi->ndev, - "%s: timeout waiting for flush complete.\n", - __func__); - break; - } - } while (1); - - return ret; -} - -static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int nfrms = 0; - int pld_len = 0; - struct sk_buff *skb; - u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - - skb = cfhsi_dequeue(cfhsi); - if (!skb) - return 0; - - /* Clear offset. */ - desc->offset = 0; - - /* Check if we can embed a CAIF frame. */ - if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { - struct caif_payload_info *info; - int hpad; - int tpad; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Check if frame still fits with added alignment. */ - if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { - u8 *pemb = desc->emb_frm; - desc->offset = CFHSI_DESC_SHORT_SZ; - *pemb = (u8)(hpad - 1); - pemb += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in embedded CAIF frame. */ - skb_copy_bits(skb, 0, pemb, skb->len); - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - } - } - - /* Create payload CAIF frames. */ - while (nfrms < CFHSI_MAX_PKTS) { - struct caif_payload_info *info; - int hpad; - int tpad; - - if (!skb) - skb = cfhsi_dequeue(cfhsi); - - if (!skb) - break; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Fill in CAIF frame length in descriptor. */ - desc->cffrm_len[nfrms] = hpad + skb->len + tpad; - - /* Fill head padding information. */ - *pfrm = (u8)(hpad - 1); - pfrm += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in CAIF frame. */ - skb_copy_bits(skb, 0, pfrm, skb->len); - - /* Update payload length. */ - pld_len += desc->cffrm_len[nfrms]; - - /* Update frame pointer. */ - pfrm += skb->len + tpad; - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - - /* Update number of frames. */ - nfrms++; - } - - /* Unused length fields should be zero-filled (according to SPEC). */ - while (nfrms < CFHSI_MAX_PKTS) { - desc->cffrm_len[nfrms] = 0x0000; - nfrms++; - } - - /* Check if we can piggy-back another descriptor. */ - if (cfhsi_can_send_aggregate(cfhsi)) - desc->header |= CFHSI_PIGGY_DESC; - else - desc->header &= ~CFHSI_PIGGY_DESC; - - return CFHSI_DESC_SZ + pld_len; -} - -static void cfhsi_start_tx(struct cfhsi *cfhsi) -{ - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len, res; - - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - do { - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - if (!len) { - spin_lock_bh(&cfhsi->lock); - if (unlikely(cfhsi_tx_queue_len(cfhsi))) { - spin_unlock_bh(&cfhsi->lock); - res = -EAGAIN; - continue; - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - break; - } - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - } while (res < 0); -} - -static void cfhsi_tx_done(struct cfhsi *cfhsi) -{ - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* - * Send flow on if flow off has been previously signalled - * and number of packets is below low water mark. - */ - spin_lock_bh(&cfhsi->lock); - if (cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && - cfhsi->cfdev.flowctrl) { - - cfhsi->flow_off_sent = 0; - cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); - } - - if (cfhsi_can_send_aggregate(cfhsi)) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_start_tx(cfhsi); - } else { - mod_timer(&cfhsi->aggregation_timer, - jiffies + cfhsi->cfg.aggregation_timeout); - spin_unlock_bh(&cfhsi->lock); - } - - return; -} - -static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - cfhsi_tx_done(cfhsi); -} - -static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Check for embedded CAIF frame. */ - if (desc->offset) { - struct sk_buff *skb; - int len = 0; - pfrm = ((u8 *)desc) + desc->offset; - - /* Remove offset padding. */ - pfrm += *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pfrm; - len |= ((*(pfrm+1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frame. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pfrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - /* Check for piggy-backed descriptor. */ - if (desc->header & CFHSI_PIGGY_DESC) - xfer_sz += CFHSI_DESC_SZ; - - if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { - netdev_err(cfhsi->ndev, - "%s: Invalid payload len: %d, ignored.\n", - __func__, xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_desc_len(struct cfhsi_desc *desc) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - - pr_err("Invalid descriptor. %x %x\n", desc->header, - desc->offset); - return -EPROTO; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - if (xfer_sz % 4) { - pr_err("Invalid payload len: %d, ignored.\n", xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int rx_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - /* Sanity check header and offset. */ - if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Set frame pointer to start of payload. */ - pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - plen = desc->cffrm_len; - - /* Skip already processed frames. */ - while (nfrms < cfhsi->rx_state.nfrms) { - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - /* Parse payload. */ - while (nfrms < CFHSI_MAX_PKTS && *plen) { - struct sk_buff *skb; - u8 *pcffrm = NULL; - int len; - - /* CAIF frame starts after head padding. */ - pcffrm = pfrm + *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pcffrm; - len |= ((*(pcffrm + 1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frames. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - cfhsi->rx_state.nfrms = nfrms; - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pcffrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - return rx_sz; -} - -static void cfhsi_rx_done(struct cfhsi *cfhsi) -{ - int res; - int desc_pld_len = 0, rx_len, rx_state; - struct cfhsi_desc *desc = NULL; - u8 *rx_ptr, *rx_buf; - struct cfhsi_desc *piggy_desc = NULL; - - desc = (struct cfhsi_desc *)cfhsi->rx_buf; - - netdev_dbg(cfhsi->ndev, "%s\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Update inactivity timer if pending. */ - spin_lock_bh(&cfhsi->lock); - mod_timer_pending(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - desc_pld_len = cfhsi_rx_desc_len(desc); - - if (desc_pld_len < 0) - goto out_of_sync; - - rx_buf = cfhsi->rx_buf; - rx_len = desc_pld_len; - if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC)) - rx_len += CFHSI_DESC_SZ; - if (desc_pld_len == 0) - rx_buf = cfhsi->rx_flip_buf; - } else { - rx_buf = cfhsi->rx_flip_buf; - - rx_len = CFHSI_DESC_SZ; - if (cfhsi->rx_state.pld_len > 0 && - (desc->header & CFHSI_PIGGY_DESC)) { - - piggy_desc = (struct cfhsi_desc *) - (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + - cfhsi->rx_state.pld_len); - - cfhsi->rx_state.piggy_desc = true; - - /* Extract payload len from piggy-backed descriptor. */ - desc_pld_len = cfhsi_rx_desc_len(piggy_desc); - if (desc_pld_len < 0) - goto out_of_sync; - - if (desc_pld_len > 0) { - rx_len = desc_pld_len; - if (piggy_desc->header & CFHSI_PIGGY_DESC) - rx_len += CFHSI_DESC_SZ; - } - - /* - * Copy needed information from the piggy-backed - * descriptor to the descriptor in the start. - */ - memcpy(rx_buf, (u8 *)piggy_desc, - CFHSI_DESC_SHORT_SZ); - } - } - - if (desc_pld_len) { - rx_state = CFHSI_RX_STATE_PAYLOAD; - rx_ptr = rx_buf + CFHSI_DESC_SZ; - } else { - rx_state = CFHSI_RX_STATE_DESC; - rx_ptr = rx_buf; - rx_len = CFHSI_DESC_SZ; - } - - /* Initiate next read */ - if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { - /* Set up new transfer. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", - __func__); - - res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, - cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: RX error %d.\n", - __func__, res); - cfhsi->ndev->stats.rx_errors++; - cfhsi->ndev->stats.rx_dropped++; - } - } - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - /* Extract payload from descriptor */ - if (cfhsi_rx_desc(desc, cfhsi) < 0) - goto out_of_sync; - } else { - /* Extract payload */ - if (cfhsi_rx_pld(desc, cfhsi) < 0) - goto out_of_sync; - if (piggy_desc) { - /* Extract any payload in piggyback descriptor. */ - if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) - goto out_of_sync; - /* Mark no embedded frame after extracting it */ - piggy_desc->offset = 0; - } - } - - /* Update state info */ - memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); - cfhsi->rx_state.state = rx_state; - cfhsi->rx_ptr = rx_ptr; - cfhsi->rx_len = rx_len; - cfhsi->rx_state.pld_len = desc_pld_len; - cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC; - - if (rx_buf != cfhsi->rx_buf) - swap(cfhsi->rx_buf, cfhsi->rx_flip_buf); - return; - -out_of_sync: - netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); - print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, - cfhsi->rx_buf, CFHSI_DESC_SZ); - schedule_work(&cfhsi->out_of_sync_work); -} - -static void cfhsi_rx_slowpath(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) - wake_up_interruptible(&cfhsi->flush_fifo_wait); - else - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_wake_up(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - int res; - int len; - long ret; - - cfhsi = container_of(work, struct cfhsi, wake_up_work); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) { - /* It happenes when wakeup is requested by - * both ends at the same time. */ - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - return; - } - - /* Activate wake line. */ - cfhsi->ops->cfhsi_wake_up(cfhsi->ops); - - netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", - __func__); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, - test_and_clear_bit(CFHSI_WAKE_UP_ACK, - &cfhsi->bits), ret); - if (unlikely(ret < 0)) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } else if (!ret) { - bool ca_wake = false; - size_t fifo_occupancy = 0; - - /* Wakeup timeout */ - netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", - __func__); - - /* Check FIFO to check if modem has sent something. */ - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", - __func__, (unsigned) fifo_occupancy); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - - if (ca_wake) { - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - - /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - /* Continue execution. */ - goto wake_ack; - } - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } -wake_ack: - netdev_dbg(cfhsi->ndev, "%s: Woken.\n", - __func__); - - /* Clear power up bit. */ - set_bit(CFHSI_AWAKE, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - - /* Resume read operation. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); - res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); - - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); - - /* Clear power up acknowledment. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - spin_lock_bh(&cfhsi->lock); - - /* Resume transmit if queues are not empty. */ - if (!cfhsi_tx_queue_len(cfhsi)) { - netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", - __func__); - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - return; - } - - netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", - __func__); - - spin_unlock_bh(&cfhsi->lock); - - /* Create HSI frame. */ - len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi); - - if (likely(len > 0)) { - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - netdev_err(cfhsi->ndev, - "%s: Failed to create HSI frame: %d.\n", - __func__, len); - } -} - -static void cfhsi_wake_down(struct work_struct *work) -{ - long ret; - struct cfhsi *cfhsi = NULL; - size_t fifo_occupancy = 0; - int retry = CFHSI_WAKE_TOUT; - - cfhsi = container_of(work, struct cfhsi, wake_down_work); - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Deactivate wake line. */ - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, - test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, - &cfhsi->bits), ret); - if (ret < 0) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - return; - } else if (!ret) { - bool ca_wake = true; - - /* Timeout */ - netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - if (!ca_wake) - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - } - - /* Check FIFO occupancy. */ - while (retry) { - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - if (!fifo_occupancy) - break; - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - retry--; - } - - if (!retry) - netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); - - /* Clear AWAKE condition. */ - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Cancel pending RX requests. */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); -} - -static void cfhsi_out_of_sync(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(work, struct cfhsi, out_of_sync_work); - - rtnl_lock(); - dev_close(cfhsi->ndev); - rtnl_unlock(); -} - -static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_up_wait); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Schedule wake up work queue if the peer initiates. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); -} - -static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Initiating low power is only permitted by the host (us). */ - set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_down_wait); -} - -static void cfhsi_aggregation_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_start_tx(cfhsi); -} - -static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct cfhsi *cfhsi = NULL; - int start_xfer = 0; - int timer_active; - int prio; - - if (!dev) - return -EINVAL; - - cfhsi = netdev_priv(dev); - - switch (skb->priority) { - case TC_PRIO_BESTEFFORT: - case TC_PRIO_FILLER: - case TC_PRIO_BULK: - prio = CFHSI_PRIO_BEBK; - break; - case TC_PRIO_INTERACTIVE_BULK: - prio = CFHSI_PRIO_VI; - break; - case TC_PRIO_INTERACTIVE: - prio = CFHSI_PRIO_VO; - break; - case TC_PRIO_CONTROL: - default: - prio = CFHSI_PRIO_CTL; - break; - } - - spin_lock_bh(&cfhsi->lock); - - /* Update aggregation statistics */ - cfhsi_update_aggregation_stats(cfhsi, skb, 1); - - /* Queue the SKB */ - skb_queue_tail(&cfhsi->qhead[prio], skb); - - /* Sanity check; xmit should not be called after unregister_netdev */ - if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_abort_tx(cfhsi); - return -EINVAL; - } - - /* Send flow off if number of packets is above high water mark. */ - if (!cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && - cfhsi->cfdev.flowctrl) { - cfhsi->flow_off_sent = 1; - cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); - } - - if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) { - cfhsi->tx_state = CFHSI_TX_STATE_XFER; - start_xfer = 1; - } - - if (!start_xfer) { - /* Send aggregate if it is possible */ - bool aggregate_ready = - cfhsi_can_send_aggregate(cfhsi) && - del_timer(&cfhsi->aggregation_timer) > 0; - spin_unlock_bh(&cfhsi->lock); - if (aggregate_ready) - cfhsi_start_tx(cfhsi); - return NETDEV_TX_OK; - } - - /* Delete inactivity timer if started. */ - timer_active = del_timer_sync(&cfhsi->inactivity_timer); - - spin_unlock_bh(&cfhsi->lock); - - if (timer_active) { - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len; - int res; - - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - WARN_ON(!len); - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - /* Schedule wake up work queue if the we initiate. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); - } - - return NETDEV_TX_OK; -} - -static const struct net_device_ops cfhsi_netdevops; - -static void cfhsi_setup(struct net_device *dev) -{ - int i; - struct cfhsi *cfhsi = netdev_priv(dev); - dev->features = 0; - dev->type = ARPHRD_CAIF; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; - dev->priv_flags |= IFF_NO_QUEUE; - dev->needs_free_netdev = true; - dev->netdev_ops = &cfhsi_netdevops; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - skb_queue_head_init(&cfhsi->qhead[i]); - cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; - cfhsi->cfdev.use_frag = false; - cfhsi->cfdev.use_stx = false; - cfhsi->cfdev.use_fcs = false; - cfhsi->ndev = dev; - cfhsi->cfg = hsi_default_config; -} - -static int cfhsi_open(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - int res; - - clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Initialize state vaiables. */ - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; - - /* Set flow info */ - cfhsi->flow_off_sent = 0; - - /* - * Allocate a TX buffer with the size of a HSI packet descriptors - * and the necessary room for CAIF payload frames. - */ - cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL); - if (!cfhsi->tx_buf) { - res = -ENODEV; - goto err_alloc_tx; - } - - /* - * Allocate a RX buffer with the size of two HSI packet descriptors and - * the necessary room for CAIF payload frames. - */ - cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_buf) { - res = -ENODEV; - goto err_alloc_rx; - } - - cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_flip_buf) { - res = -ENODEV; - goto err_alloc_rx_flip; - } - - /* Initialize aggregation timeout */ - cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; - - /* Initialize recieve vaiables. */ - cfhsi->rx_ptr = cfhsi->rx_buf; - cfhsi->rx_len = CFHSI_DESC_SZ; - - /* Initialize spin locks. */ - spin_lock_init(&cfhsi->lock); - - /* Set up the driver. */ - cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; - cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; - cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; - cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; - - /* Initialize the work queues. */ - INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); - INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); - INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync); - - /* Clear all bit fields. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Create work thread. */ - cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); - if (!cfhsi->wq) { - netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", - __func__); - res = -ENODEV; - goto err_create_wq; - } - - /* Initialize wait queues. */ - init_waitqueue_head(&cfhsi->wake_up_wait); - init_waitqueue_head(&cfhsi->wake_down_wait); - init_waitqueue_head(&cfhsi->flush_fifo_wait); - - /* Setup the inactivity timer. */ - timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0); - /* Setup the slowpath RX timer. */ - timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0); - /* Setup the aggregation timer. */ - timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0); - - /* Activate HSI interface. */ - res = cfhsi->ops->cfhsi_up(cfhsi->ops); - if (res) { - netdev_err(cfhsi->ndev, - "%s: can't activate HSI interface: %d.\n", - __func__, res); - goto err_activate; - } - - /* Flush FIFO */ - res = cfhsi_flush_fifo(cfhsi); - if (res) { - netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", - __func__, res); - goto err_net_reg; - } - return res; - - err_net_reg: - cfhsi->ops->cfhsi_down(cfhsi->ops); - err_activate: - destroy_workqueue(cfhsi->wq); - err_create_wq: - kfree(cfhsi->rx_flip_buf); - err_alloc_rx_flip: - kfree(cfhsi->rx_buf); - err_alloc_rx: - kfree(cfhsi->tx_buf); - err_alloc_tx: - return res; -} - -static int cfhsi_close(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - u8 *tx_buf, *rx_buf, *flip_buf; - - /* going to shutdown driver */ - set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Delete timers if pending */ - del_timer_sync(&cfhsi->inactivity_timer); - del_timer_sync(&cfhsi->rx_slowpath_timer); - del_timer_sync(&cfhsi->aggregation_timer); - - /* Cancel pending RX request (if any) */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); - - /* Destroy workqueue */ - destroy_workqueue(cfhsi->wq); - - /* Store bufferes: will be freed later. */ - tx_buf = cfhsi->tx_buf; - rx_buf = cfhsi->rx_buf; - flip_buf = cfhsi->rx_flip_buf; - /* Flush transmit queues. */ - cfhsi_abort_tx(cfhsi); - - /* Deactivate interface */ - cfhsi->ops->cfhsi_down(cfhsi->ops); - - /* Free buffers. */ - kfree(tx_buf); - kfree(rx_buf); - kfree(flip_buf); - return 0; -} - -static void cfhsi_uninit(struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - ASSERT_RTNL(); - symbol_put(cfhsi_get_device); - list_del(&cfhsi->list); -} - -static const struct net_device_ops cfhsi_netdevops = { - .ndo_uninit = cfhsi_uninit, - .ndo_open = cfhsi_open, - .ndo_stop = cfhsi_close, - .ndo_start_xmit = cfhsi_xmit -}; - -static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) -{ - int i; - - if (!data) { - pr_debug("no params data found\n"); - return; - } - - i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; - /* - * Inactivity timeout in millisecs. Lowest possible value is 1, - * and highest possible is NEXT_TIMER_MAX_DELTA. - */ - if (data[i]) { - u32 inactivity_timeout = nla_get_u32(data[i]); - /* Pre-calculate inactivity timeout. */ - cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; - if (cfhsi->cfg.inactivity_timeout == 0) - cfhsi->cfg.inactivity_timeout = 1; - else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) - cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; - } - - i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; - if (data[i]) - cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_HEAD_ALIGN; - if (data[i]) - cfhsi->cfg.head_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_TAIL_ALIGN; - if (data[i]) - cfhsi->cfg.tail_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; - if (data[i]) - cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QLOW_WATERMARK; - if (data[i]) - cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); -} - -static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - cfhsi_netlink_parms(data, netdev_priv(dev)); - netdev_state_change(dev); - return 0; -} - -static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = { - [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 }, -}; - -static size_t caif_hsi_get_size(const struct net_device *dev) -{ - int i; - size_t s = 0; - for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++) - s += nla_total_size(caif_hsi_policy[i].len); - return s; -} - -static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - - if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, - cfhsi->cfg.inactivity_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, - cfhsi->cfg.aggregation_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, - cfhsi->cfg.head_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, - cfhsi->cfg.tail_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, - cfhsi->cfg.q_high_mark) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, - cfhsi->cfg.q_low_mark)) - return -EMSGSIZE; - - return 0; -} - -static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - struct cfhsi *cfhsi = NULL; - struct cfhsi_ops *(*get_ops)(void); - - ASSERT_RTNL(); - - cfhsi = netdev_priv(dev); - cfhsi_netlink_parms(data, cfhsi); - - get_ops = symbol_get(cfhsi_get_ops); - if (!get_ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - return -ENODEV; - } - - /* Assign the HSI device. */ - cfhsi->ops = (*get_ops)(); - if (!cfhsi->ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - goto err; - } - - /* Assign the driver to this HSI device. */ - cfhsi->ops->cb_ops = &cfhsi->cb_ops; - if (register_netdevice(dev)) { - pr_warn("%s: caif_hsi device registration failed\n", __func__); - goto err; - } - /* Add CAIF HSI device to list. */ - list_add_tail(&cfhsi->list, &cfhsi_list); - - return 0; -err: - symbol_put(cfhsi_get_ops); - return -ENODEV; -} - -static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { - .kind = "cfhsi", - .priv_size = sizeof(struct cfhsi), - .setup = cfhsi_setup, - .maxtype = __IFLA_CAIF_HSI_MAX, - .policy = caif_hsi_policy, - .newlink = caif_hsi_newlink, - .changelink = caif_hsi_changelink, - .get_size = caif_hsi_get_size, - .fill_info = caif_hsi_fill_info, -}; - -static void __exit cfhsi_exit_module(void) -{ - struct list_head *list_node; - struct list_head *n; - struct cfhsi *cfhsi; - - rtnl_link_unregister(&caif_hsi_link_ops); - - rtnl_lock(); - list_for_each_safe(list_node, n, &cfhsi_list) { - cfhsi = list_entry(list_node, struct cfhsi, list); - unregister_netdevice(cfhsi->ndev); - } - rtnl_unlock(); -} - -static int __init cfhsi_init_module(void) -{ - return rtnl_link_register(&caif_hsi_link_ops); -} - -module_init(cfhsi_init_module); -module_exit(cfhsi_exit_module); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 961fa6b75cad..beb41572d04e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_irq_enable = mv88e6390_serdes_irq_enable, .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; @@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index e4fbef81bc52..b1d46dd8eaab 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = { int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_hw_stats); @@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, struct mv88e6390_serdes_hw_stat *stat; int i; - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, int lane; int i; - lane = mv88e6390_serdes_get_lane(chip, port); + lane = mv88e6xxx_serdes_get_lane(chip, port); if (lane < 0) return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 41f7f078cd27..35e9956e930c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); if (GENET_IS_V5(priv)) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | @@ -3292,7 +3293,6 @@ static int bcmgenet_open(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned long dma_ctrl; - u32 reg; int ret; netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); @@ -3318,12 +3318,6 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); @@ -4139,7 +4133,6 @@ static int bcmgenet_resume(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 reg; int ret; if (!netif_running(dev)) @@ -4176,12 +4169,6 @@ static int bcmgenet_resume(struct device *d) if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) bcmgenet_hfb_create_rxnfc_filter(priv, rule); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index facde824bcaa..e31a5a397f11 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - if (priv->hw_params->flags & GENET_HAS_EXT) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg &= ~EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - reg = UMAC_IRQ_MPD_R; if (hfb_enable) reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 374a75d4faea..ed77191d19f4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2420,9 +2420,10 @@ out: static void __ibmvnic_reset(struct work_struct *work) { - struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; bool saved_state = false; + struct ibmvnic_rwi *tmprwi; + struct ibmvnic_rwi *rwi; unsigned long flags; u32 reset_state; int rc = 0; @@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work) } else { rc = do_reset(adapter, rwi, reset_state); } - kfree(rwi); + tmprwi = rwi; adapter->last_reset_time = jiffies; if (rc) @@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); + /* + * If there is another reset queued, free the previous rwi + * and process the new reset even if previous reset failed + * (the previous reset could have failed because of a fail + * over for instance, so process the fail over). + * + * If there are no resets queued and the previous reset failed, + * the adapter would be in an undefined state. So retry the + * previous reset as a hard reset. + */ + if (rwi) + kfree(tmprwi); + else if (rc) + rwi = tmprwi; + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || - rwi->reset_reason == VNIC_RESET_MOBILITY)) + rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) adapter->force_reset_recovery = true; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad694..9169849881bf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id) return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr) return mac; } +static void cfg2mac(u64 cfg, u8 *mac_addr) +{ + int i, index = 0; + + for (i = ETH_ALEN - 1; i >= 0; i--, index++) + mac_addr[i] = (cfg >> (8 * index)) & 0xFF; +} + int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = mac2u64 (mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = mac2u64 (mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + cfg2mac(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); + u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + int index, i; u64 cfg = 0; + int id; if (!cgx) return; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } @@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + return err; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); @@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 12521262164a..237ba2b56210 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -23,6 +23,7 @@ #define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define MAX_DMAC_ENTRIES_PER_CGX 32 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) @@ -46,10 +47,12 @@ #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 @@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index); +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); @@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id); +u64 cgx_read_dmac_entry(void *cgxd, int index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 45706fd87120..a8b7b1c7a1d5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -10,17 +10,19 @@ #include "rvu.h" #include "cgx.h" /** - * struct lmac + * struct lmac - per lmac locks and properties * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @cmd_lock: Lock to serialize the command interface * @resp: command response * @link_info: link related information + * @mac_to_index_bmap: Mac address to CGX table index mapping * @event_cb: callback for linkchange events * @event_cb_lock: lock for serializing callback with unregister - * @cmd_pend: flag set before new command is started - * flag cleared after command response is received * @cgx: parent cgx port + * @mcast_filters_count: Number of multicast filters installed * @lmac_id: lmac port id + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received * @name: lmac port name */ struct lmac { @@ -29,12 +31,14 @@ struct lmac { struct mutex cmd_lock; u64 resp; struct cgx_link_user_info link_info; + struct rsrc_bmap mac_to_index_bmap; struct cgx_event_cb event_cb; /* lock for serializing callback with unregister */ spinlock_t event_cb_lock; - bool cmd_pend; struct cgx *cgx; + u8 mcast_filters_count; u8 lmac_id; + bool cmd_pend; char *name; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 770d86262838..f5ec39de026a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ + msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ @@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ - /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ + msg_rsp) \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ + cgx_max_dmac_entries_get_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ + msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get { u8 mac_addr[ETH_ALEN]; }; +/* Structure for requesting the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure for response against the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_rsp { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for requesting the operation to + * delete DMAC filter entry from CGX interface + */ +struct cgx_mac_addr_del_req { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for response against the operation to + * get maximum supported DMAC filter entries + */ +struct cgx_max_dmac_entries_get_rsp { + struct mbox_msghdr hdr; + u8 max_dmac_filters; +}; + struct cgx_link_user_info { uint64_t link_up:1; uint64_t full_duplex:1; @@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp { int status; }; +struct cgx_mac_addr_update_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; + u8 index; +}; + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) @@ -1278,6 +1326,14 @@ struct set_vf_perm { u64 flags; }; +struct lmtst_tbl_setup_req { + struct mbox_msghdr hdr; + u16 base_pcifunc; + u8 use_local_lmt_region; + u64 lmt_iova; + u64 rsvd[4]; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 0b092949d7ac..10cddf1ac7b9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_unlock(&rvu->flr_lock); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 9e5d9ba6f01e..10e58a5d5861 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -243,6 +243,7 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ unsigned long flags; }; @@ -656,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat); +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); + /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); @@ -741,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); @@ -754,6 +758,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); +/* CN10K RVU - LMT*/ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); + #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); void rvu_dbg_exit(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6e2bf4fcd29c..6cc8fbb7190c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; @@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + int i = 0, lmac_count = 0; + u8 max_dmac_filters; + u8 cgx_id, lmac_id; + void *cgx_dev; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgx_dev = cgx_get_pdata(cgx_id); + lmac_count = cgx_get_lmac_cnt(cgx_dev); + max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + for (i = 0; i < max_dmac_filters; i++) + cgx_lmac_addr_del(cgx_id, lmac_id, i); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx_id, lmac_id); +} + int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, + struct cgx_mac_addr_add_req *req, + struct cgx_mac_addr_add_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); + if (rc >= 0) { + rsp->index = rc; + return 0; + } + + return rc; +} + +int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, + struct cgx_mac_addr_del_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); +} + +int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_max_dmac_entries_get_rsp + *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* If msg is received from PFs(which are not mapped to CGX LMACs) + * or VF then no entries are allocated for DMAC filters at CGX level. + * So returning zero. + */ + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { + rsp->max_dmac_filters = 0; + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); + return 0; +} + int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } + +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_reset(cgx_id, lmac_id); +} + +int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, + struct cgx_mac_addr_update_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7d9e71c6965f..8d48b64485c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -10,6 +10,206 @@ #include "cgx.h" #include "rvu_reg.h" +/* RVU LMTST */ +#define LMT_TBL_OP_READ 0 +#define LMT_TBL_OP_WRITE 1 +#define LMT_MAP_TABLE_SIZE (128 * 1024) +#define LMT_MAPTBL_ENTRY_SIZE 16 + +/* Function to perform operations (read/write) on lmtst map table */ +static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) +{ + void __iomem *lmt_map_base; + u64 tbl_base; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; + } + + if (lmt_tbl_op == LMT_TBL_OP_READ) { + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write + * to 1 to complete the flush. + */ + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); + rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); + } + + iounmap(lmt_map_base); + return 0; +} + +static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) +{ + return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; +} + +static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + u64 iova, u64 *lmt_addr) +{ + u64 pa, val, pf; + int err; + + if (!iova) { + dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); + return -EINVAL; + } + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); + pf = rvu_get_pf(pcifunc) & 0x1F; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); + + err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); + if (err) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); + return err; + } + val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); + if (val & ~0x1ULL) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); + return -EIO; + } + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + * PA[11:0] = IOVA[11:0] + */ + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa &= GENMASK_ULL(39, 0); + *lmt_addr = (pa << 12) | (iova & 0xFFF); + + return 0; +} + +static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err = 0; + u64 val; + + /* Read the current lmt addr of pcifunc */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Update the LMT table with new addr */ + err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + return 0; +} + +int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, + struct lmtst_tbl_setup_req *req, + struct msg_rsp *rsp) +{ + u64 lmt_addr, val; + u32 pri_tbl_idx; + int err = 0; + + /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + * region, if so, convert that IOVA to physical address and + * populate LMT table with that address + */ + if (req->use_local_lmt_region) { + err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, + req->lmt_iova, &lmt_addr); + if (err < 0) + return err; + + /* Update the lmt addr for this PFFUNC in the LMT table */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); + if (err) + return err; + } + + /* Reconfiguring lmtst map table in lmt region shared mode i.e. make + * multiple PF_FUNCs to share an LMTLINE region, so primary/base + * pcifunc (which is passed as an argument to mailbox) is the one + * whose lmt base address will be shared among other secondary + * pcifunc (will be the one who is calling this mailbox). + */ + if (req->base_pcifunc) { + /* Calculating the LMT table index equivalent to primary + * pcifunc. + */ + pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); + + /* Read the base lmt addr of the primary pcifunc */ + err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + pri_tbl_idx, err); + return err; + } + + /* Update the base lmt addr of secondary with primary's base + * lmt addr. + */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); + if (err) + return err; + } + + return 0; +} + +/* Resetting the lmtst map table to original base addresses */ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err; + + if (is_rvu_otx2(rvu)) + return; + + if (pfvf->lmt_base_addr) { + /* This corresponds to lmt map table index */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + /* Reverting back original lmt base addr for respective + * pcifunc. + */ + err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } +} + int rvu_set_channels_base(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3cc3c6fd1d84..370d4ca1e5ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; - int err, lmac_id; char *buf; current_dir = filp->file->f_path.dentry->d_parent; @@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) if (!buf) return -EINVAL; - err = kstrtoint(buf + 1, 10, &lmac_id); - if (!err) { - err = cgx_print_stats(filp, lmac_id); - if (err) - return err; - } + return kstrtoint(buf + 1, 10, lmac_id); +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + int lmac_id, err; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_stats(filp, lmac_id); + return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); +static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) +{ + struct pci_dev *pdev = NULL; + void *cgxd = s->private; + char *bcast, *mcast; + u16 index, domain; + u8 dmac[ETH_ALEN]; + struct rvu *rvu; + u64 cfg, mac; + int pf; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + domain = 2; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + return 0; + + cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); + bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; + mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; + + seq_puts(s, + "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); + seq_printf(s, "%s PF%d %9s %9s", + dev_name(&pdev->dev), pf, bcast, mcast); + if (cfg & CGX_DMAC_CAM_ACCEPT) + seq_printf(s, "%12s\n\n", "UNICAST"); + else + seq_printf(s, "%16s\n\n", "PROMISCUOUS"); + + seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); + + for (index = 0 ; index < 32 ; index++) { + cfg = cgx_read_dmac_entry(cgxd, index); + /* Display enabled dmac entries associated with current lmac */ + if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && + FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { + mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); + u64_to_ether_addr(mac, dmac); + seq_printf(s, "%7d %pM\n", index, dmac); + } + } + + return 0; +} + +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +{ + int err, lmac_id; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_dmac_flt(filp, lmac_id); + + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); + debugfs_create_file("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_dmac_flt_fops); } } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d6f8210652c5..aeae37704428 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 76837d5e19c6..8b01ef6e2c99 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -49,6 +49,11 @@ #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) +#define RVU_AF_SMMU_ADDR_REQ (0x6000) +#define RVU_AF_SMMU_TXN_REQ (0x6008) +#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) +#define RVU_AF_SMMU_ADDR_TLN (0x6018) +#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -692,4 +697,9 @@ #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) +/* APR */ +#define APR_AF_LMT_CFG (0x000ull) +#define APR_AF_LMT_MAP_BASE (0x008ull) +#define APR_AF_LMT_CTL (0x010ull) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 14aa8e37ea41..5bbe6727d11d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -35,7 +35,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NPA0 = 0xeULL, BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, - BLK_COUNT = 0x12ULL, + BLKADDR_APR = 0x16ULL, + BLK_COUNT = 0x17ULL, }; /* RVU Block Type Enumeration */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 457c94793e63..3254b02205ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 1b08896b46d2..184de9466286 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = { .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; -int cn10k_pf_lmtst_init(struct otx2_nic *pf) +int cn10k_lmtst_init(struct otx2_nic *pfvf) { - int size, num_lines; - u64 base; - if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { - pf->hw_ops = &otx2_hw_ops; + struct lmtst_tbl_setup_req *req; + int qcount, err; + + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; return 0; } - pf->hw_ops = &cn10k_hw_ops; - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - (MBOX_SIZE * (pf->total_vfs + 1)); - - size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - - (MBOX_SIZE * (pf->total_vfs + 1)); - - pf->hw.lmt_base = ioremap(base, size); + pfvf->hw_ops = &cn10k_hw_ops; + qcount = pfvf->hw.max_queues; + /* LMTST lines allocation + * qcount = num_online_cpus(); + * NPA = TX + RX + XDP. + * NIX = TX * 32 (For Burst SQE flush). + */ + pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32); + pfvf->npa_lmt_lines = qcount * 3; + pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE; - if (!pf->hw.lmt_base) { - dev_err(pf->dev, "Unable to map PF LMTST region\n"); + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } - /* FIXME: Get the num of LMTST lines from LMT table */ - pf->tot_lmt_lines = size / LMT_LINE_SIZE; - num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / - pf->hw.tx_queues; - /* Number of LMT lines per SQ queues */ - pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - - pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; - return 0; -} + req->use_local_lmt_region = true; -int cn10k_vf_lmtst_init(struct otx2_nic *vf) -{ - int size, num_lines; - - if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { - vf->hw_ops = &otx2_hw_ops; - return 0; + err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, + LMT_LINE_SIZE); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; } + pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; + req->lmt_iova = (u64)pfvf->dync_lmt->iova; - vf->hw_ops = &cn10k_hw_ops; - size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); - vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, - PCI_MBOX_BAR_NUM), - size); - if (!vf->hw.lmt_base) { - dev_err(vf->dev, "Unable to map VF LMTST region\n"); - return -ENOMEM; - } + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); - vf->tot_lmt_lines = size / LMT_LINE_SIZE; - /* LMTST lines per SQ */ - num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / - vf->hw.tx_queues; - vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; return 0; } -EXPORT_SYMBOL(cn10k_vf_lmtst_init); +EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { @@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) struct otx2_snd_queue *sq; sq = &pfvf->qset.sq[qidx]; - sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + + sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base + (qidx * pfvf->nix_lmt_size)); + sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE); + /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) @@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { - struct otx2_nic *pfvf = dev; - int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); u64 val = 0, tar_addr = 0; /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ - val = (lmt_id & 0x7FF); + val = (sq->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 71292a4cf1f3..1a1ae334477d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -12,8 +12,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_pf_lmtst_init(struct otx2_nic *pf); -int cn10k_vf_lmtst_init(struct otx2_nic *vf); +int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index cf7875d51d87..7cccd802c4ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) /* update dmac field in vlan offload rule */ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); + /* update dmac address in ntuple and DMAC filter list */ + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 234b330f3183..8fd58cd07f50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -218,8 +218,8 @@ struct otx2_hw { unsigned long cap_flag; #define LMT_LINE_SIZE 128 -#define NIX_LMTID_BASE 72 /* RX + TX + XDP */ - void __iomem *lmt_base; +#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ + u64 *lmt_base; u64 *npa_lmt_base; u64 *nix_lmt_base; }; @@ -288,6 +288,9 @@ struct otx2_flow_config { u16 tc_flower_offset; u16 ntuple_max_flows; u16 tc_max_flows; + u8 dmacflt_max_flows; + u8 *bmap_to_dmacindex; + unsigned long dmacflt_bmap; struct list_head flow_list; }; @@ -329,6 +332,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) +#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; struct otx2_qset qset; @@ -363,8 +367,9 @@ struct otx2_nic { /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ int nix_blkaddr; /* LMTST Lines info */ + struct qmem *dync_lmt; u16 tot_lmt_lines; - u16 nix_lmt_lines; + u16 npa_lmt_lines; u32 nix_lmt_size; struct otx2_ptp *ptp; @@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +/* CGX/RPM DMAC filters support */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c new file mode 100644 index 000000000000..ffe3e94562d0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, + u8 *dmac_index) +{ + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + err = otx2_sync_mbox_msg(&pf->mbox); + + if (!err) { + rsp = (struct cgx_mac_addr_add_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + *dmac_index = rsp->index; + } + + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +{ + struct cgx_mac_addr_set_or_get *req; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +{ + u8 *dmacindex; + + /* Store dmacindex returned by CGX/RPM driver which will + * be used for macaddr update/remove + */ + dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_add_pfmac(pf); + else + return otx2_dmacflt_do_add(pf, mac, dmacindex); +} + +static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, + u8 dmac_index) +{ + struct cgx_mac_addr_del_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->index = dmac_index; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return err; +} + +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, + u8 bit_pos) +{ + u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_remove_pfmac(pf); + else + return otx2_dmacflt_do_remove(pf, mac, dmacindex); +} + +/* CGX/RPM blocks support max unicast entries of 32. + * on typical configuration MAC block associated + * with 4 lmacs, each lmac will have 8 dmac entries + */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_max_dmac_entries_get_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; + +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +{ + struct cgx_mac_addr_update_req *req; + int rc; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); + + if (!req) { + mutex_unlock(&pf->mbox.lock); + rc = -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + rc = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 8c97106bdd1c..4d9de525802d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,6 +18,12 @@ struct otx2_flow { bool is_vf; u8 rss_ctx_id; int vf; + bool dmac_filter; +}; + +enum dmac_req { + DMAC_ADDR_UPDATE, + DMAC_ADDR_DEL }; static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) @@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) if (!pf->mac_table) return -ENOMEM; + otx2_dmacflt_get_max_cnt(pf); + + /* DMAC filters are not allocated */ + if (!pf->flow_cfg->dmacflt_max_flows) + return 0; + + pf->flow_cfg->bmap_to_dmacindex = + devm_kzalloc(pf->dev, sizeof(u8) * + pf->flow_cfg->dmacflt_max_flows, + GFP_KERNEL); + + if (!pf->flow_cfg->bmap_to_dmacindex) + return -ENOMEM; + + pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; + return 0; } @@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); + if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(netdev, + "Add %pM to CGX/RPM DMAC filters list as well\n", + mac); + return otx2_do_add_macfilter(pf, mac); } @@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } +static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +{ + if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) + return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + else + return flow_cfg->ntuple_max_flows; +} + int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; - if (location >= pfvf->flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { @@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int idx = 0; int err = 0; - nfc->data = pfvf->flow_cfg->ntuple_max_flows; + nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) @@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return 0; } +static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + u64 ring_cookie = fsp->ring_cookie; + u32 flow_type; + + if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) + return false; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + /* CGX/RPM block dmac filtering configured for white listing + * check for action other than DROP + */ + if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && + !ethtool_get_flow_spec_ring_vf(ring_cookie)) { + if (is_zero_ether_addr(eth_mask->h_dest) && + is_valid_ether_addr(eth_hdr->h_dest)) + return true; + } + + return false; +} + static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; @@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) return err; } +static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, + struct otx2_flow *flow) +{ + struct otx2_flow *pf_mac; + struct ethhdr *eth_hdr; + + pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); + if (!pf_mac) + return -ENOMEM; + + pf_mac->entry = 0; + pf_mac->dmac_filter = true; + pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + memcpy(&pf_mac->flow_spec, &flow->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + pf_mac->flow_spec.location = pf_mac->location; + + /* Copy PF mac address */ + eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; + ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + + /* Install DMAC filter with PF mac address */ + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); + + otx2_add_flow_to_list(pfvf, pf_mac); + pfvf->flow_cfg->nr_flows++; + set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + + return 0; +} + int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; + struct ethhdr *eth_hdr; bool new = false; + int err = 0; u32 ring; - int err; ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) @@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (fsp->location >= flow_cfg->ntuple_max_flows) + if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; - flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; - err = otx2_add_flow_msg(pfvf, flow); + if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { + eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* Sync dmac filter table with updated fields */ + if (flow->dmac_filter) + return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, + flow->entry); + + if (bitmap_full(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) { + netdev_warn(pfvf->netdev, + "Can't insert the rule %d as max allowed dmac filters are %d\n", + flow->location + + flow_cfg->dmacflt_max_flows, + flow_cfg->dmacflt_max_flows); + err = -EINVAL; + if (new) + kfree(flow); + return err; + } + + /* Install PF mac address to DMAC filter list */ + if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + otx2_add_flow_with_pfmac(pfvf, flow); + + flow->dmac_filter = true; + flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows); + fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + flow->flow_spec.location = fsp->location; + flow->location = fsp->location; + + set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); + + } else { + if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + netdev_warn(pfvf->netdev, + "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", + flow->location, + flow_cfg->ntuple_max_flows - 1); + err = -EINVAL; + } else { + flow->entry = flow_cfg->flow_ent[flow->location]; + err = otx2_add_flow_msg(pfvf, flow); + } + } + if (err) { if (new) kfree(flow); @@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) return err; } +static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + bool found = false; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->dmac_filter && iter->entry == 0) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + if (req == DMAC_ADDR_DEL) { + otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + 0); + clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + found = true; + } else { + ether_addr_copy(eth_hdr->h_dest, + pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); + } + break; + } + } + + if (found) { + list_del(&iter->list); + kfree(iter); + pfvf->flow_cfg->nr_flows--; + } +} + int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; - if (location >= flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; - err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (flow->dmac_filter) { + struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* user not allowed to remove dmac filter with interface mac */ + if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) + return -EPERM; + + err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + flow->entry); + clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + /* If all dmac filters are removed delete macfilter with + * interface mac address and configure CGX/RPM block in + * promiscuous mode + */ + if (bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows) == 1) + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); + } else { + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + } + if (err) return err; @@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } + +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + + list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { + if (iter->dmac_filter) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + otx2_dmacflt_add(pf, eth_hdr->h_dest, + iter->entry); + } + } +} + +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) +{ + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 59912f73417b..f300b807a85b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; + if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(pf->netdev, + "CGX/RPM internal loopback might not work as DMAC filters are active\n"); + mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); @@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev) if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { /* Reserve LMT lines for NPA AURA batch free */ - pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; + pf->hw.npa_lmt_base = pf->hw.lmt_base; /* Reserve LMT lines for NIX TX */ - pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + - (NIX_LMTID_BASE * LMT_LINE_SIZE)); + pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base + + (pf->npa_lmt_lines * LMT_LINE_SIZE)); } err = otx2_init_hw_resources(pf); @@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev) /* Restore pause frame settings */ otx2_config_pause_frm(pf); + /* Install DMAC Filters */ + if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_reinstall_flows(pf); + err = otx2_rxtx_enable(pf, true); if (err) goto err_tx_stop_queues; @@ -2526,7 +2535,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_pf_lmtst_init(pf); + err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; @@ -2630,8 +2639,8 @@ err_del_mcam_entries: err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); @@ -2772,9 +2781,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); - if (pf->hw.lmt_base) - iounmap(pf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 52486c1f0973..2f144e2cf436 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -83,6 +83,7 @@ struct otx2_snd_queue { u16 num_sqbs; u16 sqe_thresh; u8 sqe_per_sqb; + u32 lmt_id; u64 io_addr; u64 *aura_fc_addr; u64 *lmt_addr; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 13a908f75ba0..a8bee5aefec1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_vf_lmtst_init(vf); + err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; @@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); @@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev) destroy_workqueue(vf->otx2_wq); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); - - if (vf->hw.lmt_base) - iounmap(vf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d9d6ecf8c63..7b8404a21544 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev) priv->plat->rx_queues_to_use, false); stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); } priv->speed = SPEED_UNKNOWN; diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index aec97b021a73..2c115216420a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev) return ret; } + phy_suspend(priv->phydev); priv->phydev->mac_managed_pm = 1; phy_attached_info(priv->phydev); diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 46f76e8aae92..0a472ce77370 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) return -EIO; } - /* check for the interafce id - * if if_id 1 to 8 then create IP MUX channel sessions. - * To start MUX session from 0 as network interface id would start - * from 1 so map it to if_id = if_id - 1 - */ - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - return ipc_mux_open_session(ipc_imem->mux, if_id - 1); - - return -EINVAL; + return ipc_mux_open_session(ipc_imem->mux, if_id); } /* Release a net link to CP. */ @@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, { if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - ipc_mux_close_session(ipc_imem->mux, if_id - 1); + ipc_mux_close_session(ipc_imem->mux, if_id); } /* Tasklet call to do uplink transfer. */ @@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, goto out; } - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - /* Route the UL packet through IP MUX Layer */ - ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, - if_id - 1, skb); - else - dev_err(ipc_imem->dev, - "invalid if_id %d: ", if_id); + /* Route the UL packet through IP MUX Layer */ + ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); out: return ret; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index fd356dafbdd6..2007fe23e9a5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -27,11 +27,11 @@ #define BOOT_CHECK_DEFAULT_TIMEOUT 400 /* IP MUX channel range */ -#define IP_MUX_SESSION_START 1 -#define IP_MUX_SESSION_END 8 +#define IP_MUX_SESSION_START 0 +#define IP_MUX_SESSION_END 7 /* Default IP MUX channel */ -#define IP_MUX_SESSION_DEFAULT 1 +#define IP_MUX_SESSION_DEFAULT 0 /** * ipc_imem_sys_port_open - Open a port link to CP. diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index e634ffc6ec08..562de275797a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id, /* Pass the packet to the netif layer. */ dest_skb->priority = service_class; - return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1); + return ipc_wwan_receive(wwan, dest_skb, false, if_id); } /* Decode Flow Credit Table in the block */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c index 2229d752926c..d12188ffed7e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c +++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c @@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent) /* Store the device and event information */ info->dev = dev; - snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent); + snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent); /* Schedule uevent in process context using work queue */ schedule_work(&info->work); diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index c999c64001f4..b2357ad5d517 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, { struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; + unsigned int len = skb->len; int if_id = priv->if_id; int ret; @@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, /* Return code of zero is success */ if (ret == 0) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += len; ret = NETDEV_TX_OK; } else if (ret == -EBUSY) { ret = NETDEV_TX_BUSY; @@ -140,7 +143,8 @@ exit: ret); dev_kfree_skb_any(skb); - return ret; + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; } /* Ops structure for wwan net link */ @@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) iosm_dev->priv_flags |= IFF_NO_QUEUE; iosm_dev->type = ARPHRD_NONE; + iosm_dev->mtu = ETH_DATA_LEN; iosm_dev->min_mtu = ETH_MIN_MTU; iosm_dev->max_mtu = ETH_MAX_MTU; @@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, skb->pkt_type = PACKET_HOST; - if (if_id < (IP_MUX_SESSION_START - 1) || - if_id > (IP_MUX_SESSION_END - 1)) { + if (if_id < IP_MUX_SESSION_START || + if_id > IP_MUX_SESSION_END) { ret = -EINVAL; goto free; } diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 8673d1743faa..28a6fe342d3e 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -3,7 +3,7 @@ # Makefile for PTP 1588 clock support. # -ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index a23a37a4d5dc..f012fa581cf4 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -24,10 +24,11 @@ #define PTP_PPS_EVENT PPS_CAPTUREASSERT #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) +struct class *ptp_class; + /* private globals */ static dev_t ptp_devt; -static struct class *ptp_class; static DEFINE_IDA(ptp_clocks_map); @@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + return ptp->info->settime64(ptp->info, tp); } @@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { @@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev) ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); kfree(ptp); } @@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, { struct ptp_clock *ptp; int err = 0, index, major = MAJOR(ptp_devt); + size_t size; if (info->n_alarm > PTP_MAX_ALARMS) return ERR_PTR(-EINVAL); @@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, spin_lock_init(&ptp->tsevq.lock); mutex_init(&ptp->tsevq_mux); mutex_init(&ptp->pincfg_mux); + mutex_init(&ptp->n_vclocks_mux); init_waitqueue_head(&ptp->tsev_wq); if (ptp->info->do_aux_work) { @@ -221,6 +235,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ptp->pps_source->lookup_cookie = ptp; } + /* PTP virtual clock is being registered under physical clock */ + if (parent->class && parent->class->name && + strcmp(parent->class->name, "ptp") == 0) + ptp->is_virtual_clock = true; + + if (!ptp->is_virtual_clock) { + ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; + + size = sizeof(int) * ptp->max_vclocks; + ptp->vclock_index = kzalloc(size, GFP_KERNEL); + if (!ptp->vclock_index) { + err = -ENOMEM; + goto no_mem_for_vclocks; + } + } + err = ptp_populate_pin_groups(ptp); if (err) goto no_pin_groups; @@ -265,11 +295,14 @@ no_clock: no_pps: ptp_cleanup_pin_groups(ptp); no_pin_groups: + kfree(ptp->vclock_index); +no_mem_for_vclocks: if (ptp->kworker) kthread_destroy_worker(ptp->kworker); kworker_err: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, index); no_slot: kfree(ptp); @@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register); int ptp_clock_unregister(struct ptp_clock *ptp) { + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); + kfree(ptp->vclock_index); + if (ptp->kworker) { kthread_cancel_delayed_work_sync(&ptp->aux_work); kthread_destroy_worker(ptp->kworker); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 6b97155148f1..dba6be477067 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -18,6 +18,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 +#define PTP_DEFAULT_MAX_VCLOCKS 20 struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; @@ -46,6 +47,24 @@ struct ptp_clock { const struct attribute_group *pin_attr_groups[2]; struct kthread_worker *kworker; struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + int *vclock_index; + struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ + bool is_virtual_clock; +}; + +#define info_to_vclock(d) container_of((d), struct ptp_vclock, info) +#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc) +#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work) + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct cyclecounter cc; + struct timecounter tc; + spinlock_t lock; /* protects tc/cc */ }; /* @@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q) return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; } +/* Check if ptp virtual clock is in use */ +static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) +{ + bool in_use = false; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return true; + + if (!ptp->is_virtual_clock && ptp->n_vclocks) + in_use = true; + + mutex_unlock(&ptp->n_vclocks_mux); + + return in_use; +} + +extern struct class *ptp_class; + /* * see ptp_chardev.c */ @@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[]; int ptp_populate_pin_groups(struct ptp_clock *ptp); void ptp_cleanup_pin_groups(struct ptp_clock *ptp); +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock); +void ptp_vclock_unregister(struct ptp_vclock *vclock); #endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e20e..6a36590ca77a 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -3,6 +3,7 @@ * PTP 1588 clock support - sysfs interface. * * Copyright (C) 2010 OMICRON electronics GmbH + * Copyright 2021 NXP */ #include <linux/capability.h> #include <linux/slab.h> @@ -148,6 +149,159 @@ out: } static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); +static int unregister_vclock(struct device *dev, void *data) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + struct ptp_vclock *vclock; + u8 *num = data; + + vclock = info_to_vclock(info); + dev_info(dev->parent, "delete virtual clock ptp%d\n", + vclock->clock->index); + + ptp_vclock_unregister(vclock); + (*num)--; + + /* For break. Not error. */ + if (*num == 0) + return -EINVAL; + + return 0; +} + +static ssize_t n_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->n_vclocks); + + mutex_unlock(&ptp->n_vclocks_mux); + + return size; +} + +static ssize_t n_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_vclock *vclock; + int err = -EINVAL; + u32 num, i; + + if (kstrtou32(buf, 0, &num)) + return err; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (num > ptp->max_vclocks) { + dev_err(dev, "max value is %d\n", ptp->max_vclocks); + goto out; + } + + /* Need to create more vclocks */ + if (num > ptp->n_vclocks) { + for (i = 0; i < num - ptp->n_vclocks; i++) { + vclock = ptp_vclock_register(ptp); + if (!vclock) + goto out; + + *(ptp->vclock_index + ptp->n_vclocks + i) = + vclock->clock->index; + + dev_info(dev, "new virtual clock ptp%d\n", + vclock->clock->index); + } + } + + /* Need to delete vclocks */ + if (num < ptp->n_vclocks) { + i = ptp->n_vclocks - num; + device_for_each_child_reverse(dev, &i, + unregister_vclock); + + for (i = 1; i <= ptp->n_vclocks - num; i++) + *(ptp->vclock_index + ptp->n_vclocks - i) = -1; + } + + if (num == 0) + dev_info(dev, "only physical clock in use now\n"); + else + dev_info(dev, "guarantee physical clock free running\n"); + + ptp->n_vclocks = num; + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(n_vclocks); + +static ssize_t max_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->max_vclocks); + + return size; +} + +static ssize_t max_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + unsigned int *vclock_index; + int err = -EINVAL; + size_t size; + u32 max; + + if (kstrtou32(buf, 0, &max) || max == 0) + return -EINVAL; + + if (max == ptp->max_vclocks) + return count; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (max < ptp->n_vclocks) + goto out; + + size = sizeof(int) * max; + vclock_index = kzalloc(size, GFP_KERNEL); + if (!vclock_index) { + err = -ENOMEM; + goto out; + } + + size = sizeof(int) * ptp->n_vclocks; + memcpy(vclock_index, ptp->vclock_index, size); + + kfree(ptp->vclock_index); + ptp->vclock_index = vclock_index; + ptp->max_vclocks = max; + + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(max_vclocks); + static struct attribute *ptp_attrs[] = { &dev_attr_clock_name.attr, @@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = { &dev_attr_fifo.attr, &dev_attr_period.attr, &dev_attr_pps_enable.attr, + &dev_attr_n_vclocks.attr, + &dev_attr_max_vclocks.attr, NULL }; @@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj, } else if (attr == &dev_attr_pps_enable.attr) { if (!info->pps) mode = 0; + } else if (attr == &dev_attr_n_vclocks.attr || + attr == &dev_attr_max_vclocks.attr) { + if (ptp->is_virtual_clock) + mode = 0; } return mode; diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c new file mode 100644 index 000000000000..e0f87c57749a --- /dev/null +++ b/drivers/ptp/ptp_vclock.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PTP virtual clock driver + * + * Copyright 2021 NXP + */ +#include <linux/slab.h> +#include "ptp_private.h" + +#define PTP_VCLOCK_CC_SHIFT 31 +#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) +#define PTP_VCLOCK_FADJ_SHIFT 9 +#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL +#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) + +static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + s64 adj; + + adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; + adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_read(&vclock->tc); + vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_adjtime(&vclock->tc, delta); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_read(&vclock->tc); + spin_unlock_irqrestore(&vclock->lock, flags); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_vclock_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_init(&vclock->tc, &vclock->cc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static long ptp_vclock_refresh(struct ptp_clock_info *ptp) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + struct timespec64 ts; + + ptp_vclock_gettime(&vclock->info, &ts); + + return PTP_VCLOCK_REFRESH_INTERVAL; +} + +static const struct ptp_clock_info ptp_vclock_info = { + .owner = THIS_MODULE, + .name = "ptp virtual clock", + /* The maximum ppb value that long scaled_ppm can support */ + .max_adj = 32767999, + .adjfine = ptp_vclock_adjfine, + .adjtime = ptp_vclock_adjtime, + .gettime64 = ptp_vclock_gettime, + .settime64 = ptp_vclock_settime, + .do_aux_work = ptp_vclock_refresh, +}; + +static u64 ptp_vclock_read(const struct cyclecounter *cc) +{ + struct ptp_vclock *vclock = cc_to_vclock(cc); + struct ptp_clock *ptp = vclock->pclock; + struct timespec64 ts = {}; + + if (ptp->info->gettimex64) + ptp->info->gettimex64(ptp->info, &ts, NULL); + else + ptp->info->gettime64(ptp->info, &ts); + + return timespec64_to_ns(&ts); +} + +static const struct cyclecounter ptp_vclock_cc = { + .read = ptp_vclock_read, + .mask = CYCLECOUNTER_MASK(32), + .mult = PTP_VCLOCK_CC_MULT, + .shift = PTP_VCLOCK_CC_SHIFT, +}; + +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) +{ + struct ptp_vclock *vclock; + + vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); + if (!vclock) + return NULL; + + vclock->pclock = pclock; + vclock->info = ptp_vclock_info; + vclock->cc = ptp_vclock_cc; + + snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", + pclock->index); + + spin_lock_init(&vclock->lock); + + vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); + if (IS_ERR_OR_NULL(vclock->clock)) { + kfree(vclock); + return NULL; + } + + timecounter_init(&vclock->tc, &vclock->cc, 0); + ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + + return vclock; +} + +void ptp_vclock_unregister(struct ptp_vclock *vclock) +{ + ptp_clock_unregister(vclock->clock); + kfree(vclock); +} + +int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_clock *ptp; + struct device *dev; + int num = 0; + + if (pclock_index < 0) + return num; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return num; + + ptp = dev_get_drvdata(dev); + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { + put_device(dev); + return num; + } + + *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); + if (!(*vclock_index)) + goto out; + + memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); + num = ptp->n_vclocks; +out: + mutex_unlock(&ptp->n_vclocks_mux); + put_device(dev); + return num; +} +EXPORT_SYMBOL(ptp_get_vclocks_index); + +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_vclock *vclock; + struct ptp_clock *ptp; + unsigned long flags; + struct device *dev; + u64 ns; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return; + + ptp = dev_get_drvdata(dev); + if (!ptp->is_virtual_clock) { + put_device(dev); + return; + } + + vclock = info_to_vclock(ptp->info); + + ns = ktime_to_ns(hwtstamps->hwtstamp); + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_cyc2time(&vclock->tc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + put_device(dev); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} +EXPORT_SYMBOL(ptp_convert_timestamp); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 29dbb603bc91..232daaec56e4 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -758,6 +758,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); /** + * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller + * is responsible to free memory of vclock_index + * @dev: pointer to net_device structure + * @vclock_index: pointer to pointer of vclock index + * + * Return number of phc vclocks + */ +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); + +/** * ethtool_sprintf - Write formatted string to ethtool string data * @data: Pointer to start of string to update * @fmt: Format of string to write diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index aba237c0b3a2..71fac9237725 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,10 @@ #include <linux/device.h> #include <linux/pps_kernel.h> #include <linux/ptp_clock.h> +#include <linux/timecounter.h> +#include <linux/skbuff.h> +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +137,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); */ void ptp_cancel_worker_sync(struct ptp_clock *ptp); +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); + #else static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) @@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 73af4a64a599..40296ed976a9 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -38,7 +38,7 @@ static inline bool net_busy_loop_on(void) static inline bool sk_can_busy_loop(const struct sock *sk) { - return sk->sk_ll_usec && !signal_pending(current); + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); } bool sk_busy_loop_end(void *p, unsigned long start_time); diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h deleted file mode 100644 index 552cf68d28d2..000000000000 --- a/include/net/caif/caif_hsi.h +++ /dev/null @@ -1,200 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson / [email protected] - * Dmitry.Tarnyagin / [email protected] - */ - -#ifndef CAIF_HSI_H_ -#define CAIF_HSI_H_ - -#include <net/caif/caif_layer.h> -#include <net/caif/caif_device.h> -#include <linux/atomic.h> - -/* - * Maximum number of CAIF frames that can reside in the same HSI frame. - */ -#define CFHSI_MAX_PKTS 15 - -/* - * Maximum number of bytes used for the frame that can be embedded in the - * HSI descriptor. - */ -#define CFHSI_MAX_EMB_FRM_SZ 96 - -/* - * Decides if HSI buffers should be prefilled with 0xFF pattern for easier - * debugging. Both TX and RX buffers will be filled before the transfer. - */ -#define CFHSI_DBG_PREFILL 0 - -/* Structure describing a HSI packet descriptor. */ -#pragma pack(1) /* Byte alignment. */ -struct cfhsi_desc { - u8 header; - u8 offset; - u16 cffrm_len[CFHSI_MAX_PKTS]; - u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; -}; -#pragma pack() /* Default alignment. */ - -/* Size of the complete HSI packet descriptor. */ -#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) - -/* - * Size of the complete HSI packet descriptor excluding the optional embedded - * CAIF frame. - */ -#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) - -/* - * Maximum bytes transferred in one transfer. - */ -#define CFHSI_MAX_CAIF_FRAME_SZ 4096 - -#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ) - -/* Size of the complete HSI TX buffer. */ -#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) - -/* Size of the complete HSI RX buffer. */ -#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) - -/* Bitmasks for the HSI descriptor. */ -#define CFHSI_PIGGY_DESC (0x01 << 7) - -#define CFHSI_TX_STATE_IDLE 0 -#define CFHSI_TX_STATE_XFER 1 - -#define CFHSI_RX_STATE_DESC 0 -#define CFHSI_RX_STATE_PAYLOAD 1 - -/* Bitmasks for power management. */ -#define CFHSI_WAKE_UP 0 -#define CFHSI_WAKE_UP_ACK 1 -#define CFHSI_WAKE_DOWN_ACK 2 -#define CFHSI_AWAKE 3 -#define CFHSI_WAKELOCK_HELD 4 -#define CFHSI_SHUTDOWN 5 -#define CFHSI_FLUSH_FIFO 6 - -#ifndef CFHSI_INACTIVITY_TOUT -#define CFHSI_INACTIVITY_TOUT (1 * HZ) -#endif /* CFHSI_INACTIVITY_TOUT */ - -#ifndef CFHSI_WAKE_TOUT -#define CFHSI_WAKE_TOUT (3 * HZ) -#endif /* CFHSI_WAKE_TOUT */ - -#ifndef CFHSI_MAX_RX_RETRIES -#define CFHSI_MAX_RX_RETRIES (10 * HZ) -#endif - -/* Structure implemented by the CAIF HSI driver. */ -struct cfhsi_cb_ops { - void (*tx_done_cb) (struct cfhsi_cb_ops *drv); - void (*rx_done_cb) (struct cfhsi_cb_ops *drv); - void (*wake_up_cb) (struct cfhsi_cb_ops *drv); - void (*wake_down_cb) (struct cfhsi_cb_ops *drv); -}; - -/* Structure implemented by HSI device. */ -struct cfhsi_ops { - int (*cfhsi_up) (struct cfhsi_ops *dev); - int (*cfhsi_down) (struct cfhsi_ops *dev); - int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_wake_up) (struct cfhsi_ops *dev); - int (*cfhsi_wake_down) (struct cfhsi_ops *dev); - int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status); - int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy); - int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev); - struct cfhsi_cb_ops *cb_ops; -}; - -/* Structure holds status of received CAIF frames processing */ -struct cfhsi_rx_state { - int state; - int nfrms; - int pld_len; - int retries; - bool piggy_desc; -}; - -/* Priority mapping */ -enum { - CFHSI_PRIO_CTL = 0, - CFHSI_PRIO_VI, - CFHSI_PRIO_VO, - CFHSI_PRIO_BEBK, - CFHSI_PRIO_LAST, -}; - -struct cfhsi_config { - u32 inactivity_timeout; - u32 aggregation_timeout; - u32 head_align; - u32 tail_align; - u32 q_high_mark; - u32 q_low_mark; -}; - -/* Structure implemented by CAIF HSI drivers. */ -struct cfhsi { - struct caif_dev_common cfdev; - struct net_device *ndev; - struct platform_device *pdev; - struct sk_buff_head qhead[CFHSI_PRIO_LAST]; - struct cfhsi_cb_ops cb_ops; - struct cfhsi_ops *ops; - int tx_state; - struct cfhsi_rx_state rx_state; - struct cfhsi_config cfg; - int rx_len; - u8 *rx_ptr; - u8 *tx_buf; - u8 *rx_buf; - u8 *rx_flip_buf; - spinlock_t lock; - int flow_off_sent; - struct list_head list; - struct work_struct wake_up_work; - struct work_struct wake_down_work; - struct work_struct out_of_sync_work; - struct workqueue_struct *wq; - wait_queue_head_t wake_up_wait; - wait_queue_head_t wake_down_wait; - wait_queue_head_t flush_fifo_wait; - struct timer_list inactivity_timer; - struct timer_list rx_slowpath_timer; - - /* TX aggregation */ - int aggregation_len; - struct timer_list aggregation_timer; - - unsigned long bits; -}; -extern struct platform_driver cfhsi_driver; - -/** - * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters. - * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before - * taking the HSI wakeline down, in milliseconds. - * When using RT Netlink to create, destroy or configure a CAIF HSI interface, - * enum ifla_caif_hsi is used to specify the configuration attributes. - */ -enum ifla_caif_hsi { - __IFLA_CAIF_HSI_UNSPEC, - __IFLA_CAIF_HSI_INACTIVITY_TOUT, - __IFLA_CAIF_HSI_AGGREGATION_TOUT, - __IFLA_CAIF_HSI_HEAD_ALIGN, - __IFLA_CAIF_HSI_TAIL_ALIGN, - __IFLA_CAIF_HSI_QHIGH_WATERMARK, - __IFLA_CAIF_HSI_QLOW_WATERMARK, - __IFLA_CAIF_HSI_MAX -}; - -struct cfhsi_ops *cfhsi_get_ops(void); - -#endif /* CAIF_HSI_H_ */ diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 265fffa33dad..5859e0a16a58 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -360,8 +360,7 @@ enum { #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>, - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, - * 192.88.99.0/24. + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24. * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP * addresses. */ @@ -369,7 +368,6 @@ enum { ((htonl(INADDR_BROADCAST) == a) || \ ipv4_is_multicast(a) || \ ipv4_is_zeronet(a) || \ - ipv4_is_test_198(a) || \ ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ diff --git a/include/net/sock.h b/include/net/sock.h index 8bdd80027ffb..f23cb259b0e2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -316,7 +316,9 @@ struct bpf_local_storage; * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only - * @sk_tsflags: SO_TIMESTAMPING socket options + * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock + * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals @@ -493,6 +495,7 @@ struct sock { seqlock_t sk_stamp_seq; #endif u16 sk_tsflags; + int sk_bind_phc; u8 sk_shutdown; u32 sk_tskey; atomic_t sk_zckey; @@ -2755,7 +2758,8 @@ void sock_def_readable(struct sock *sk); int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); void sock_set_timestamp(struct sock *sk, int optname, bool valbool); -int sock_set_timestamping(struct sock *sk, int optname, int val); +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping); void sock_enable_timestamps(struct sock *sk); void sock_no_linger(struct sock *sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index e668f1bf780d..17df9b047ee4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -686,6 +686,10 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp) static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) { + /* mptcp hooks are only on the slow path */ + if (sk_is_mptcp((struct sock *)tp)) + return; + tp->pred_flags = htonl((tp->tcp_header_len << 26) | ntohl(TCP_FLAG_ACK) | snd_wnd); diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index c7135c9c37a5..b3b93710eff7 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -46,6 +46,7 @@ enum { ETHTOOL_MSG_FEC_SET, ETHTOOL_MSG_MODULE_EEPROM_GET, ETHTOOL_MSG_STATS_GET, + ETHTOOL_MSG_PHC_VCLOCKS_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -88,6 +89,7 @@ enum { ETHTOOL_MSG_FEC_NTF, ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY, ETHTOOL_MSG_STATS_GET_REPLY, + ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -440,6 +442,19 @@ enum { ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +/* PHC VCLOCKS */ + +enum { + ETHTOOL_A_PHC_VCLOCKS_UNSPEC, + ETHTOOL_A_PHC_VCLOCKS_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_PHC_VCLOCKS_NUM, /* u32 */ + ETHTOOL_A_PHC_VCLOCKS_INDEX, /* array, s32 */ + + /* add new constants above here */ + __ETHTOOL_A_PHC_VCLOCKS_CNT, + ETHTOOL_A_PHC_VCLOCKS_MAX = (__ETHTOOL_A_PHC_VCLOCKS_CNT - 1) +}; + /* CABLE TEST */ enum { diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 7ed0b3d1c00a..fcc61c73a666 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -13,7 +13,7 @@ #include <linux/types.h> #include <linux/socket.h> /* for SO_TIMESTAMPING */ -/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +/* SO_TIMESTAMPING flags */ enum { SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), @@ -30,8 +30,9 @@ enum { SOF_TIMESTAMPING_OPT_STATS = (1<<12), SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13), SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14), + SOF_TIMESTAMPING_BIND_PHC = (1 << 15), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_TX_SWHW, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -47,6 +48,18 @@ enum { SOF_TIMESTAMPING_TX_ACK) /** + * struct so_timestamping - SO_TIMESTAMPING parameter + * + * @flags: SO_TIMESTAMPING flags + * @bind_phc: Index of PTP virtual clock bound to sock. This is available + * if flag SOF_TIMESTAMPING_BIND_PHC is set. + */ +struct so_timestamping { + int flags; + int bind_phc; +}; + +/** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * * @flags: no flags defined right now, must be zero for %SIOCSHWTSTAMP diff --git a/net/802/garp.c b/net/802/garp.c index 400bd857e5f5..f6012f8e59f0 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr kfree(attr); } +static void garp_attr_destroy_all(struct garp_applicant *app) +{ + struct rb_node *node, *next; + struct garp_attr *attr; + + for (node = rb_first(&app->gid); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct garp_attr, node); + garp_attr_destroy(app, attr); + } +} + static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; @@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl spin_lock_bh(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); + garp_attr_destroy_all(app); garp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/802/mrp.c b/net/802/mrp.c index bea6e43d45a0..35e04cc5390c 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) kfree(attr); } +static void mrp_attr_destroy_all(struct mrp_applicant *app) +{ + struct rb_node *node, *next; + struct mrp_attr *attr; + + for (node = rb_first(&app->mad); + next = node ? rb_next(node) : NULL, node != NULL; + node = next) { + attr = rb_entry(node, struct mrp_attr, node); + mrp_attr_destroy(app, attr); + } +} + static int mrp_pdu_init(struct mrp_applicant *app) { struct sk_buff *skb; @@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) spin_lock_bh(&app->lock); mrp_mad_event(app, MRP_EVENT_TX); + mrp_attr_destroy_all(app); mrp_pdu_queue(app); spin_unlock_bh(&app->lock); diff --git a/net/core/sock.c b/net/core/sock.c index ba1c0f75cd45..cad107112204 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -139,6 +139,8 @@ #include <net/tcp.h> #include <net/busy_poll.h> +#include <linux/ethtool.h> + static DEFINE_MUTEX(proto_list_mutex); static LIST_HEAD(proto_list); @@ -810,8 +812,47 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool) } } -int sock_set_timestamping(struct sock *sk, int optname, int val) +static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) { + struct net *net = sock_net(sk); + struct net_device *dev = NULL; + bool match = false; + int *vclock_index; + int i, num; + + if (sk->sk_bound_dev_if) + dev = dev_get_by_index(net, sk->sk_bound_dev_if); + + if (!dev) { + pr_err("%s: sock not bind to device\n", __func__); + return -EOPNOTSUPP; + } + + num = ethtool_get_phc_vclocks(dev, &vclock_index); + for (i = 0; i < num; i++) { + if (*(vclock_index + i) == phc_index) { + match = true; + break; + } + } + + if (num > 0) + kfree(vclock_index); + + if (!match) + return -EINVAL; + + sk->sk_bind_phc = phc_index; + + return 0; +} + +int sock_set_timestamping(struct sock *sk, int optname, + struct so_timestamping timestamping) +{ + int val = timestamping.flags; + int ret; + if (val & ~SOF_TIMESTAMPING_MASK) return -EINVAL; @@ -832,6 +873,12 @@ int sock_set_timestamping(struct sock *sk, int optname, int val) !(val & SOF_TIMESTAMPING_OPT_TSONLY)) return -EINVAL; + if (val & SOF_TIMESTAMPING_BIND_PHC) { + ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); + if (ret) + return ret; + } + sk->sk_tsflags = val; sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); @@ -907,6 +954,7 @@ EXPORT_SYMBOL(sock_set_mark); int sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { + struct so_timestamping timestamping; struct sock_txtime sk_txtime; struct sock *sk = sock->sk; int val; @@ -1073,7 +1121,15 @@ set_sndbuf: case SO_TIMESTAMPING_NEW: case SO_TIMESTAMPING_OLD: - ret = sock_set_timestamping(sk, optname, val); + if (optlen == sizeof(timestamping)) { + if (copy_from_sockptr(×tamping, optval, + sizeof(timestamping))) + return -EFAULT; + } else { + memset(×tamping, 0, sizeof(timestamping)); + timestamping.flags = val; + } + ret = sock_set_timestamping(sk, optname, timestamping); break; case SO_RCVLOWAT: @@ -1201,7 +1257,7 @@ set_sndbuf: if (val < 0) ret = -EINVAL; else - sk->sk_ll_usec = val; + WRITE_ONCE(sk->sk_ll_usec, val); } break; case SO_PREFER_BUSY_POLL: @@ -1348,6 +1404,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, struct __kernel_old_timeval tm; struct __kernel_sock_timeval stm; struct sock_txtime txtime; + struct so_timestamping timestamping; } v; int lv = sizeof(int); @@ -1451,7 +1508,9 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_TIMESTAMPING_OLD: - v.val = sk->sk_tsflags; + lv = sizeof(v.timestamping); + v.timestamping.flags = sk->sk_tsflags; + v.timestamping.bind_phc = sk->sk_bind_phc; break; case SO_RCVTIMEO_OLD: diff --git a/net/dsa/switch.c b/net/dsa/switch.c index af71b8638098..248455145982 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -427,7 +427,7 @@ static int dsa_switch_lag_join(struct dsa_switch *ds, info->port, info->lag, info->info); - return 0; + return -EOPNOTSUPP; } static int dsa_switch_lag_leave(struct dsa_switch *ds, @@ -440,7 +440,7 @@ static int dsa_switch_lag_leave(struct dsa_switch *ds, return ds->ops->crosschip_lag_leave(ds, info->sw_index, info->port, info->lag); - return 0; + return -EOPNOTSUPP; } static int dsa_switch_mdb_add(struct dsa_switch *ds, diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 723c9a8a8cdf..0a19470efbfb 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ - tunnels.o fec.o eeprom.o stats.o + tunnels.o fec.o eeprom.o stats.o phc_vclocks.o diff --git a/net/ethtool/common.c b/net/ethtool/common.c index f9dcbad84788..c63e0739dc6a 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -4,6 +4,7 @@ #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/rtnetlink.h> +#include <linux/ptp_clock_kernel.h> #include "common.h" @@ -397,6 +398,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = { [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats", [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo", [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw", + [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc", }; static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT); @@ -554,6 +556,18 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return 0; } +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) +{ + struct ethtool_ts_info info = { }; + int num = 0; + + if (!__ethtool_get_ts_info(dev, &info)) + num = ptp_get_vclocks_index(info.phc_index, vclock_index); + + return num; +} +EXPORT_SYMBOL(ethtool_get_phc_vclocks); + const struct ethtool_phy_ops *ethtool_phy_ops; void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index a7346346114f..73e0f5b626bf 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -248,6 +248,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_TSINFO_GET] = ðnl_tsinfo_request_ops, [ETHTOOL_MSG_MODULE_EEPROM_GET] = ðnl_module_eeprom_request_ops, [ETHTOOL_MSG_STATS_GET] = ðnl_stats_request_ops, + [ETHTOOL_MSG_PHC_VCLOCKS_GET] = ðnl_phc_vclocks_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) @@ -958,6 +959,15 @@ static const struct genl_ops ethtool_genl_ops[] = { .policy = ethnl_stats_get_policy, .maxattr = ARRAY_SIZE(ethnl_stats_get_policy) - 1, }, + { + .cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, + .policy = ethnl_phc_vclocks_get_policy, + .maxattr = ARRAY_SIZE(ethnl_phc_vclocks_get_policy) - 1, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 3e25a47fd482..3fc395c86702 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -347,6 +347,7 @@ extern const struct ethnl_request_ops ethnl_tsinfo_request_ops; extern const struct ethnl_request_ops ethnl_fec_request_ops; extern const struct ethnl_request_ops ethnl_module_eeprom_request_ops; extern const struct ethnl_request_ops ethnl_stats_request_ops; +extern const struct ethnl_request_ops ethnl_phc_vclocks_request_ops; extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1]; extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1]; @@ -382,6 +383,7 @@ extern const struct nla_policy ethnl_fec_get_policy[ETHTOOL_A_FEC_HEADER + 1]; extern const struct nla_policy ethnl_fec_set_policy[ETHTOOL_A_FEC_AUTO + 1]; extern const struct nla_policy ethnl_module_eeprom_get_policy[ETHTOOL_A_MODULE_EEPROM_I2C_ADDRESS + 1]; extern const struct nla_policy ethnl_stats_get_policy[ETHTOOL_A_STATS_GROUPS + 1]; +extern const struct nla_policy ethnl_phc_vclocks_get_policy[ETHTOOL_A_PHC_VCLOCKS_HEADER + 1]; int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info); int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info); diff --git a/net/ethtool/phc_vclocks.c b/net/ethtool/phc_vclocks.c new file mode 100644 index 000000000000..637b2f5297d5 --- /dev/null +++ b/net/ethtool/phc_vclocks.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 NXP + */ +#include "netlink.h" +#include "common.h" + +struct phc_vclocks_req_info { + struct ethnl_req_info base; +}; + +struct phc_vclocks_reply_data { + struct ethnl_reply_data base; + int num; + int *index; +}; + +#define PHC_VCLOCKS_REPDATA(__reply_base) \ + container_of(__reply_base, struct phc_vclocks_reply_data, base) + +const struct nla_policy ethnl_phc_vclocks_get_policy[] = { + [ETHTOOL_A_PHC_VCLOCKS_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), +}; + +static int phc_vclocks_prepare_data(const struct ethnl_req_info *req_base, + struct ethnl_reply_data *reply_base, + struct genl_info *info) +{ + struct phc_vclocks_reply_data *data = PHC_VCLOCKS_REPDATA(reply_base); + struct net_device *dev = reply_base->dev; + int ret; + + ret = ethnl_ops_begin(dev); + if (ret < 0) + return ret; + data->num = ethtool_get_phc_vclocks(dev, &data->index); + ethnl_ops_complete(dev); + + return ret; +} + +static int phc_vclocks_reply_size(const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + int len = 0; + + if (data->num > 0) { + len += nla_total_size(sizeof(u32)); + len += nla_total_size(sizeof(s32) * data->num); + } + + return len; +} + +static int phc_vclocks_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_base, + const struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + if (data->num <= 0) + return 0; + + if (nla_put_u32(skb, ETHTOOL_A_PHC_VCLOCKS_NUM, data->num) || + nla_put(skb, ETHTOOL_A_PHC_VCLOCKS_INDEX, + sizeof(s32) * data->num, data->index)) + return -EMSGSIZE; + + return 0; +} + +static void phc_vclocks_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct phc_vclocks_reply_data *data = + PHC_VCLOCKS_REPDATA(reply_base); + + kfree(data->index); +} + +const struct ethnl_request_ops ethnl_phc_vclocks_request_ops = { + .request_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET, + .reply_cmd = ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY, + .hdr_attr = ETHTOOL_A_PHC_VCLOCKS_HEADER, + .req_info_size = sizeof(struct phc_vclocks_req_info), + .reply_data_size = sizeof(struct phc_vclocks_reply_data), + + .prepare_data = phc_vclocks_prepare_data, + .reply_size = phc_vclocks_reply_size, + .fill_reply = phc_vclocks_fill_reply, + .cleanup_data = phc_vclocks_cleanup_data, +}; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 62682807b4b2..62cd4cd52e84 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1102,7 +1102,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } ipcm_init_sk(&ipc, inet); - ipc.gso_size = up->gso_size; + ipc.gso_size = READ_ONCE(up->gso_size); if (msg->msg_controllen) { err = udp_cmsg_send(sk, msg, &ipc.gso_size); @@ -2695,7 +2695,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, case UDP_SEGMENT: if (val < 0 || val > USHRT_MAX) return -EINVAL; - up->gso_size = val; + WRITE_ONCE(up->gso_size, val); break; case UDP_GRO: @@ -2790,7 +2790,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, break; case UDP_SEGMENT: - val = up->gso_size; + val = READ_ONCE(up->gso_size); break; case UDP_GRO: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 368972dbd919..0cc7ba531b34 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1296,7 +1296,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); ipcm6_init(&ipc6); - ipc6.gso_size = up->gso_size; + ipc6.gso_size = READ_ONCE(up->gso_size); ipc6.sockc.tsflags = sk->sk_tsflags; ipc6.sockc.mark = sk->sk_mark; diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 092d1f635d27..8c03afac5ca0 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -157,19 +157,7 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); - switch (optname) { - case SO_TIMESTAMP_OLD: - case SO_TIMESTAMP_NEW: - case SO_TIMESTAMPNS_OLD: - case SO_TIMESTAMPNS_NEW: - sock_set_timestamp(sk, optname, !!val); - break; - case SO_TIMESTAMPING_NEW: - case SO_TIMESTAMPING_OLD: - sock_set_timestamping(sk, optname, val); - break; - } - + sock_set_timestamp(sk, optname, !!val); unlock_sock_fast(ssk, slow); } @@ -178,7 +166,8 @@ static int mptcp_setsockopt_sol_socket_tstamp(struct mptcp_sock *msk, int optnam } static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, - sockptr_t optval, unsigned int optlen) + sockptr_t optval, + unsigned int optlen) { int val, ret; @@ -205,14 +194,56 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname, case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: - case SO_TIMESTAMPING_OLD: - case SO_TIMESTAMPING_NEW: return mptcp_setsockopt_sol_socket_tstamp(msk, optname, val); } return -ENOPROTOOPT; } +static int mptcp_setsockopt_sol_socket_timestamping(struct mptcp_sock *msk, + int optname, + sockptr_t optval, + unsigned int optlen) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + struct so_timestamping timestamping; + int ret; + + if (optlen == sizeof(timestamping)) { + if (copy_from_sockptr(×tamping, optval, + sizeof(timestamping))) + return -EFAULT; + } else if (optlen == sizeof(int)) { + memset(×tamping, 0, sizeof(timestamping)); + + if (copy_from_sockptr(×tamping.flags, optval, sizeof(int))) + return -EFAULT; + } else { + return -EINVAL; + } + + ret = sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, + KERNEL_SOCKPTR(×tamping), + sizeof(timestamping)); + if (ret) + return ret; + + lock_sock(sk); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow = lock_sock_fast(ssk); + + sock_set_timestamping(sk, optname, timestamping); + unlock_sock_fast(ssk, slow); + } + + release_sock(sk); + + return 0; +} + static int mptcp_setsockopt_sol_socket_linger(struct mptcp_sock *msk, sockptr_t optval, unsigned int optlen) { @@ -299,9 +330,12 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: case SO_TIMESTAMPNS_NEW: + return mptcp_setsockopt_sol_socket_int(msk, optname, optval, + optlen); case SO_TIMESTAMPING_OLD: case SO_TIMESTAMPING_NEW: - return mptcp_setsockopt_sol_socket_int(msk, optname, optval, optlen); + return mptcp_setsockopt_sol_socket_timestamping(msk, optname, + optval, optlen); case SO_LINGER: return mptcp_setsockopt_sol_socket_linger(msk, optval, optlen); case SO_RCVLOWAT: diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index c89c8da99f1a..d4a2db0b2299 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -670,13 +670,13 @@ static bool cmp_key(const struct sw_flow_key *key1, { const long *cp1 = (const long *)((const u8 *)key1 + key_start); const long *cp2 = (const long *)((const u8 *)key2 + key_start); - long diffs = 0; int i; for (i = key_start; i < key_end; i += sizeof(long)) - diffs |= *cp1++ ^ *cp2++; + if (*cp1++ ^ *cp2++) + return false; - return diffs == 0; + return true; } static bool flow_cmp_masked_key(const struct sw_flow *flow, diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 3c1fbf38f4f7..ec0f52567c16 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -398,7 +398,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr) retval = SCTP_SCOPE_LINK; } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || ipv4_is_private_172(addr->v4.sin_addr.s_addr) || - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { + ipv4_is_private_192(addr->v4.sin_addr.s_addr) || + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) { retval = SCTP_SCOPE_PRIVATE; } else { retval = SCTP_SCOPE_GLOBAL; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 6c08e5048d38..b8fa8f1a7277 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1163,7 +1163,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, const struct sctp_transport *transport, __u32 probe_size) { - struct sctp_sender_hb_info hbinfo; + struct sctp_sender_hb_info hbinfo = {}; struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 5f23804f21c7..397a6244dd97 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -335,10 +335,13 @@ void sctp_transport_pl_recv(struct sctp_transport *t) t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); sctp_assoc_sync_pmtu(t->asoc); } - } else if (t->pl.state == SCTP_PL_COMPLETE && ++t->pl.raise_count == 30) { - /* Raise probe_size again after 30 * interval in Search Complete */ - t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ - t->pl.probe_size += SCTP_PL_MIN_STEP; + } else if (t->pl.state == SCTP_PL_COMPLETE) { + t->pl.raise_count++; + if (t->pl.raise_count == 30) { + /* Raise probe_size again after 30 * interval in Search Complete */ + t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ + t->pl.probe_size += SCTP_PL_MIN_STEP; + } } } diff --git a/net/socket.c b/net/socket.c index bd9233da2497..0b2dad3bdf7f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -104,6 +104,7 @@ #include <linux/sockios.h> #include <net/busy_poll.h> #include <linux/errqueue.h> +#include <linux/ptp_clock_kernel.h> #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sysctl_net_busy_read __read_mostly; @@ -873,12 +874,18 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, empty = 0; if (shhwtstamps && (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) && - !skb_is_swtx_tstamp(skb, false_tstamp) && - ktime_to_timespec64_cond(shhwtstamps->hwtstamp, tss.ts + 2)) { - empty = 0; - if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && - !skb_is_err_queue(skb)) - put_ts_pktinfo(msg, skb); + !skb_is_swtx_tstamp(skb, false_tstamp)) { + if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC) + ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc); + + if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp, + tss.ts + 2)) { + empty = 0; + + if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) && + !skb_is_err_queue(skb)) + put_ts_pktinfo(msg, skb); + } } if (!empty) { if (sock_flag(sk, SOCK_TSTAMP_NEW)) diff --git a/tools/testing/selftests/net/timestamping.c b/tools/testing/selftests/net/timestamping.c index 21091be70688..aee631c5284e 100644 --- a/tools/testing/selftests/net/timestamping.c +++ b/tools/testing/selftests/net/timestamping.c @@ -47,7 +47,7 @@ static void usage(const char *error) { if (error) printf("invalid option: %s\n", error); - printf("timestamping interface option*\n\n" + printf("timestamping <interface> [bind_phc_index] [option]*\n\n" "Options:\n" " IP_MULTICAST_LOOP - looping outgoing multicasts\n" " SO_TIMESTAMP - normal software time stamping, ms resolution\n" @@ -58,6 +58,7 @@ static void usage(const char *error) " SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n" " SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n" " SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n" + " SOF_TIMESTAMPING_BIND_PHC - request to bind a PHC of PTP vclock\n" " SIOCGSTAMP - check last socket time stamp\n" " SIOCGSTAMPNS - more accurate socket time stamp\n" " PTPV2 - use PTPv2 messages\n"); @@ -311,7 +312,6 @@ static void recvpacket(int sock, int recvmsg_flags, int main(int argc, char **argv) { - int so_timestamping_flags = 0; int so_timestamp = 0; int so_timestampns = 0; int siocgstamp = 0; @@ -325,6 +325,8 @@ int main(int argc, char **argv) struct ifreq device; struct ifreq hwtstamp; struct hwtstamp_config hwconfig, hwconfig_requested; + struct so_timestamping so_timestamping_get = { 0, -1 }; + struct so_timestamping so_timestamping = { 0, -1 }; struct sockaddr_in addr; struct ip_mreq imr; struct in_addr iaddr; @@ -342,7 +344,12 @@ int main(int argc, char **argv) exit(1); } - for (i = 2; i < argc; i++) { + if (argc >= 3 && sscanf(argv[2], "%d", &so_timestamping.bind_phc) == 1) + val = 3; + else + val = 2; + + for (i = val; i < argc; i++) { if (!strcasecmp(argv[i], "SO_TIMESTAMP")) so_timestamp = 1; else if (!strcasecmp(argv[i], "SO_TIMESTAMPNS")) @@ -356,17 +363,19 @@ int main(int argc, char **argv) else if (!strcasecmp(argv[i], "PTPV2")) ptpv2 = 1; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_TX_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_TX_HARDWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_TX_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_TX_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_TX_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RX_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RX_HARDWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RX_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RX_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_SOFTWARE; else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE")) - so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE; + so_timestamping.flags |= SOF_TIMESTAMPING_RAW_HARDWARE; + else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_BIND_PHC")) + so_timestamping.flags |= SOF_TIMESTAMPING_BIND_PHC; else usage(argv[i]); } @@ -385,10 +394,10 @@ int main(int argc, char **argv) hwtstamp.ifr_data = (void *)&hwconfig; memset(&hwconfig, 0, sizeof(hwconfig)); hwconfig.tx_type = - (so_timestamping_flags & SOF_TIMESTAMPING_TX_HARDWARE) ? + (so_timestamping.flags & SOF_TIMESTAMPING_TX_HARDWARE) ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; hwconfig.rx_filter = - (so_timestamping_flags & SOF_TIMESTAMPING_RX_HARDWARE) ? + (so_timestamping.flags & SOF_TIMESTAMPING_RX_HARDWARE) ? ptpv2 ? HWTSTAMP_FILTER_PTP_V2_L4_SYNC : HWTSTAMP_FILTER_PTP_V1_L4_SYNC : HWTSTAMP_FILTER_NONE; hwconfig_requested = hwconfig; @@ -413,6 +422,9 @@ int main(int argc, char **argv) sizeof(struct sockaddr_in)) < 0) bail("bind"); + if (setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, interface, if_len)) + bail("bind device"); + /* set multicast group for outgoing packets */ inet_aton("224.0.1.130", &iaddr); /* alternate PTP domain 1 */ addr.sin_addr = iaddr; @@ -444,10 +456,9 @@ int main(int argc, char **argv) &enabled, sizeof(enabled)) < 0) bail("setsockopt SO_TIMESTAMPNS"); - if (so_timestamping_flags && - setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, - &so_timestamping_flags, - sizeof(so_timestamping_flags)) < 0) + if (so_timestamping.flags && + setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping, + sizeof(so_timestamping)) < 0) bail("setsockopt SO_TIMESTAMPING"); /* request IP_PKTINFO for debugging purposes */ @@ -468,14 +479,18 @@ int main(int argc, char **argv) else printf("SO_TIMESTAMPNS %d\n", val); - if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &val, &len) < 0) { + len = sizeof(so_timestamping_get); + if (getsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &so_timestamping_get, + &len) < 0) { printf("%s: %s\n", "getsockopt SO_TIMESTAMPING", strerror(errno)); } else { - printf("SO_TIMESTAMPING %d\n", val); - if (val != so_timestamping_flags) - printf(" not the expected value %d\n", - so_timestamping_flags); + printf("SO_TIMESTAMPING flags %d, bind phc %d\n", + so_timestamping_get.flags, so_timestamping_get.bind_phc); + if (so_timestamping_get.flags != so_timestamping.flags || + so_timestamping_get.bind_phc != so_timestamping.bind_phc) + printf(" not expected, flags %d, bind phc %d\n", + so_timestamping.flags, so_timestamping.bind_phc); } /* send packets forever every five seconds */ |