diff options
Diffstat (limited to 'drivers/net')
974 files changed, 48339 insertions, 23350 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8ca0bc223b30..9920b3a68ed1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -290,6 +290,19 @@ config GTP To compile this drivers as a module, choose M here: the module will be called gtp. +config PFCP + tristate "Packet Forwarding Control Protocol (PFCP)" + depends on INET + select NET_UDP_TUNNEL + help + This allows one to create PFCP virtual interfaces that allows to + set up software and hardware offload of PFCP packets. + Note that this module does not support PFCP protocol in the kernel space. + There is no support for parsing any PFCP messages. + + To compile this drivers as a module, choose M here: the module + will be called pfcp. + config AMT tristate "Automatic Multicast Tunneling (AMT)" depends on INET && IP_MULTICAST @@ -507,7 +520,7 @@ source "drivers/net/ipa/Kconfig" config NET_SB1000 tristate "General Instruments Surfboard 1000" - depends on PNP + depends on ISA && PNP help This is a driver for the General Instrument (also known as NextLevel) SURFboard 1000 internal @@ -627,6 +640,7 @@ config NETDEVSIM depends on PSAMPLE || PSAMPLE=n depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n select NET_DEVLINK + select PAGE_POOL help This driver is a developer testing tool and software model that can be used to test various control path networking APIs, especially diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 7cab36f94782..9c053673d6b2 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_BAREUDP) += bareudp.o obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o +obj-$(CONFIG_PFCP) += pfcp.o obj-$(CONFIG_NET_VRF) += vrf.o obj-$(CONFIG_VSOCKMON) += vsockmon.o obj-$(CONFIG_MHI_NET) += mhi_net.o diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig index a51b9dab6d3a..d1d07a1d4fbc 100644 --- a/drivers/net/arcnet/Kconfig +++ b/drivers/net/arcnet/Kconfig @@ -4,7 +4,7 @@ # menuconfig ARCNET - depends on NETDEVICES && (ISA || PCI || PCMCIA) + depends on NETDEVICES && (ISA || PCI || PCMCIA) && HAS_IOPORT tristate "ARCnet support" help If you have a network card of this type, say Y and check out the diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index b54275389f8a..bee60b377d7c 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -16,6 +16,7 @@ #ifdef __KERNEL__ #include <linux/interrupt.h> +#include <linux/workqueue.h> /* * RECON_THRESHOLD is the maximum number of RECON messages to receive @@ -268,7 +269,7 @@ struct arcnet_local { struct net_device *dev; int reply_status; - struct tasklet_struct reply_tasklet; + struct work_struct reply_work; /* * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 166bfc3c8e6c..530c15d6a5eb 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -54,6 +54,7 @@ #include <linux/errqueue.h> #include <linux/leds.h> +#include <linux/workqueue.h> #include "arcdevice.h" #include "com9026.h" @@ -424,9 +425,9 @@ out: rtnl_unlock(); } -static void arcnet_reply_tasklet(struct tasklet_struct *t) +static void arcnet_reply_work(struct work_struct *t) { - struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet); + struct arcnet_local *lp = from_work(lp, t, reply_work); struct sk_buff *ackskb, *skb; struct sock_exterr_skb *serr; @@ -527,7 +528,7 @@ int arcnet_open(struct net_device *dev) arc_cont(D_PROTO, "\n"); } - tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet); + INIT_WORK(&lp->reply_work, arcnet_reply_work); arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n"); @@ -620,7 +621,7 @@ int arcnet_close(struct net_device *dev) netif_stop_queue(dev); netif_carrier_off(dev); - tasklet_kill(&lp->reply_tasklet); + cancel_work_sync(&lp->reply_work); /* flush TX and disable RX */ lp->hw.intmask(dev, 0); @@ -984,7 +985,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) ->ack_tx(dev, ackstatus); } lp->reply_status = ackstatus; - tasklet_hi_schedule(&lp->reply_tasklet); + queue_work(system_bh_highpri_wq, &lp->reply_work); } if (lp->cur_tx != -1) release_arcbuf(dev, lp->cur_tx); diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 339db6e4a1d5..d5c56ca91b77 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -61,6 +61,7 @@ struct bareudp_dev { static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; + IP_TUNNEL_DECLARE_FLAGS(key) = { }; struct bareudp_dev *bareudp; unsigned short family; unsigned int len; @@ -137,7 +138,10 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + + __set_bit(IP_TUNNEL_KEY_BIT, key); + + tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0); if (!tun_dst) { bareudp->dev->stats.rx_dropped++; goto drop; @@ -285,10 +289,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); const struct ip_tunnel_key *key = &info->key; struct rtable *rt; __be16 sport, df; @@ -316,7 +320,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? + htons(IP_DF) : 0; skb_scrub_packet(skb, xnet); err = -ENOSPC; @@ -338,7 +343,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, bareudp->port, !net_eq(bareudp->net, dev_net(bareudp->dev)), - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; free_dst: @@ -350,10 +356,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct bareudp_dev *bareudp, const struct ip_tunnel_info *info) { + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct socket *sock = rcu_dereference(bareudp->sock); - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); const struct ip_tunnel_key *key = &info->key; struct dst_entry *dst = NULL; struct in6_addr saddr, daddr; @@ -402,7 +408,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, &saddr, &daddr, prio, ttl, info->key.label, sport, bareudp->port, - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; free_dst: diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2c5ed0a7cb18..3b0c7758ea4f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3014,8 +3014,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) tags = NULL; /* Find out through which dev should the packet go */ - rt = ip_route_output(dev_net(bond->dev), targets[i], 0, - RTO_ONLINK, 0); + rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0, + RT_SCOPE_LINK); if (IS_ERR(rt)) { /* there's no route to target - try to send arp * probe to generate any traffic (arp_validate=0) @@ -4710,7 +4710,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) } } - bond_dev->mtu = new_mtu; + WRITE_ONCE(bond_dev->mtu, new_mtu); return 0; @@ -5245,7 +5245,7 @@ static inline int bond_slave_override(struct bonding *bond, /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave_rcu(bond, slave, iter) { - if (slave->queue_id == skb_get_queue_mapping(skb)) { + if (READ_ONCE(slave->queue_id) == skb_get_queue_mapping(skb)) { if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { bond_dev_queue_xmit(bond, skb, slave->dev); @@ -5933,7 +5933,7 @@ static void bond_uninit(struct net_device *bond_dev) bond_set_slave_arr(bond, NULL, NULL); - list_del(&bond->bond_list); + list_del_rcu(&bond->bond_list); bond_debug_unregister(bond); } @@ -6347,7 +6347,7 @@ static int bond_init(struct net_device *bond_dev) spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev); - list_add_tail(&bond->bond_list, &bn->dev_list); + list_add_tail_rcu(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 29b4c3d1b9b6..2a6a424806aa 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -51,7 +51,8 @@ static int bond_fill_slave_info(struct sk_buff *skb, slave_dev->addr_len, slave->perm_hwaddr)) goto nla_put_failure; - if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id)) + if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, + READ_ONCE(slave->queue_id))) goto nla_put_failure; if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio)) diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 4cdbc7e084f4..0cacd7027e35 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1589,7 +1589,7 @@ static int bond_option_queue_id_set(struct bonding *bond, goto err_no_cmd; /* Actually set the qids for the slave */ - update_slave->queue_id = qid; + WRITE_ONCE(update_slave->queue_id, qid); out: return ret; diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 43be458422b3..7edf72ec816a 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -209,7 +209,7 @@ static void bond_info_show_slave(struct seq_file *seq, seq_printf(seq, "Permanent HW addr: %*phC\n", slave->dev->addr_len, slave->perm_hwaddr); - seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); + seq_printf(seq, "Slave queue ID: %d\n", READ_ONCE(slave->queue_id)); if (BOND_MODE(bond) == BOND_MODE_8023AD) { const struct port *port = &SLAVE_AD_INFO(slave)->port; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 2805135a7205..1e13bb170515 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -37,12 +37,12 @@ static ssize_t bonding_show_bonds(const struct class *cls, { const struct bond_net *bn = container_of_const(attr, struct bond_net, class_attr_bonding_masters); - int res = 0; struct bonding *bond; + int res = 0; - rtnl_lock(); + rcu_read_lock(); - list_for_each_entry(bond, &bn->dev_list, bond_list) { + list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) @@ -55,7 +55,7 @@ static ssize_t bonding_show_bonds(const struct class *cls, if (res) buf[res-1] = '\n'; /* eat the leftover space */ - rtnl_unlock(); + rcu_read_unlock(); return res; } @@ -170,10 +170,9 @@ static ssize_t bonding_show_slaves(struct device *d, struct slave *slave; int res = 0; - if (!rtnl_trylock()) - return restart_syscall(); + rcu_read_lock(); - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) @@ -184,7 +183,7 @@ static ssize_t bonding_show_slaves(struct device *d, res += sysfs_emit_at(buf, res, "%s ", slave->dev->name); } - rtnl_unlock(); + rcu_read_unlock(); if (res) buf[res-1] = '\n'; /* eat the leftover space */ @@ -626,10 +625,9 @@ static ssize_t bonding_show_queue_id(struct device *d, struct slave *slave; int res = 0; - if (!rtnl_trylock()) - return restart_syscall(); + rcu_read_lock(); - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { /* not enough space for another interface_name:queue_id pair */ if ((PAGE_SIZE - res) > 10) @@ -638,12 +636,13 @@ static ssize_t bonding_show_queue_id(struct device *d, break; } res += sysfs_emit_at(buf, res, "%s:%d ", - slave->dev->name, slave->queue_id); + slave->dev->name, + READ_ONCE(slave->queue_id)); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ - rtnl_unlock(); + rcu_read_unlock(); return res; } diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c index 313866f2c0e4..36d0e8440b5b 100644 --- a/drivers/net/bonding/bond_sysfs_slave.c +++ b/drivers/net/bonding/bond_sysfs_slave.c @@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr); static ssize_t queue_id_show(struct slave *slave, char *buf) { - return sysfs_emit(buf, "%d\n", slave->queue_id); + return sysfs_emit(buf, "%d\n", READ_ONCE(slave->queue_id)); } static SLAVE_ATTR_RO(queue_id); diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig index 9ef1359319f0..467ef19de1c1 100644 --- a/drivers/net/can/cc770/Kconfig +++ b/drivers/net/can/cc770/Kconfig @@ -7,6 +7,7 @@ if CAN_CC770 config CAN_CC770_ISA tristate "ISA Bus based legacy CC770 driver" + depends on ISA help This driver adds legacy support for CC770 and AN82527 chips connected to the ISA bus using I/O port, memory mapped or diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 3a3be5cdfc1f..83e724e0ab87 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -338,7 +338,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } EXPORT_SYMBOL_GPL(can_change_mtu); diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 4b2f9cb17fc3..01168db4c106 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -87,6 +87,7 @@ config CAN_PLX_PCI config CAN_SJA1000_ISA tristate "ISA Bus based legacy SJA1000 driver" + depends on ISA help This driver adds legacy support for SJA1000 chips connected to the ISA bus using I/O port, memory mapped or indirect access. diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c index 285635c23443..f67e85807100 100644 --- a/drivers/net/can/vcan.c +++ b/drivers/net/can/vcan.c @@ -140,7 +140,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu) !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index f7fabba707ea..9e1b7d41005f 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -135,7 +135,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu) !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index b2eeff04f4c8..8f50abe739b7 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1266,95 +1266,70 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port, phy_modes(interface)); } -static void b53_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port, + phy_interface_t interface) { struct b53_device *dev = ds->priv; - struct ethtool_keee *p = &dev->ports[port].eee; - u8 rgmii_ctrl = 0, reg = 0, off; - bool tx_pause = false; - bool rx_pause = false; - - if (!phy_is_pseudo_fixed_link(phydev)) - return; + u8 rgmii_ctrl = 0, off; - /* Enable flow control on BCM5301x's CPU port */ - if (is5301x(dev) && dsa_is_cpu_port(ds, port)) - tx_pause = rx_pause = true; + if (port == dev->imp_port) + off = B53_RGMII_CTRL_IMP; + else + off = B53_RGMII_CTRL_P(port); - if (phydev->pause) { - if (phydev->asym_pause) - tx_pause = true; - rx_pause = true; - } + /* Configure the port RGMII clock delay by DLL disabled and + * tx_clk aligned timing (restoring to reset defaults) + */ + b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); + rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | + RGMII_CTRL_TIMING_SEL); - b53_force_port_config(dev, port, phydev->speed, phydev->duplex, - tx_pause, rx_pause); - b53_force_link(dev, port, phydev->link); + /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make + * sure that we enable the port TX clock internal delay to + * account for this internal delay that is inserted, otherwise + * the switch won't be able to receive correctly. + * + * PHY_INTERFACE_MODE_RGMII means that we are not introducing + * any delay neither on transmission nor reception, so the + * BCM53125 must also be configured accordingly to account for + * the lack of delay and introduce + * + * The BCM53125 switch has its RX clock and TX clock control + * swapped, hence the reason why we modify the TX clock path in + * the "RGMII" case + */ + if (interface == PHY_INTERFACE_MODE_RGMII_TXID) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC; + if (interface == PHY_INTERFACE_MODE_RGMII) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; + rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; + b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); - if (is63xx(dev) && port >= B53_63XX_RGMII0) - b53_adjust_63xx_rgmii(ds, port, phydev->interface); + dev_info(ds->dev, "Configured port %d for %s\n", port, + phy_modes(interface)); +} - if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { - if (port == dev->imp_port) - off = B53_RGMII_CTRL_IMP; - else - off = B53_RGMII_CTRL_P(port); +static void b53_adjust_5325_mii(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + u8 reg = 0; - /* Configure the port RGMII clock delay by DLL disabled and - * tx_clk aligned timing (restoring to reset defaults) - */ - b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); - rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | - RGMII_CTRL_TIMING_SEL); - - /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make - * sure that we enable the port TX clock internal delay to - * account for this internal delay that is inserted, otherwise - * the switch won't be able to receive correctly. - * - * PHY_INTERFACE_MODE_RGMII means that we are not introducing - * any delay neither on transmission nor reception, so the - * BCM53125 must also be configured accordingly to account for - * the lack of delay and introduce - * - * The BCM53125 switch has its RX clock and TX clock control - * swapped, hence the reason why we modify the TX clock path in - * the "RGMII" case - */ - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - rgmii_ctrl |= RGMII_CTRL_DLL_TXC; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) - rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; - rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; - b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); - - dev_info(ds->dev, "Configured port %d for %s\n", port, - phy_modes(phydev->interface)); - } + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + ®); - /* configure MII port if necessary */ - if (is5325(dev)) { + /* reverse mii needs to be enabled */ + if (!(reg & PORT_OVERRIDE_RV_MII_25)) { + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + reg | PORT_OVERRIDE_RV_MII_25); b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, ®); - /* reverse mii needs to be enabled */ if (!(reg & PORT_OVERRIDE_RV_MII_25)) { - b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, - reg | PORT_OVERRIDE_RV_MII_25); - b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, - ®); - - if (!(reg & PORT_OVERRIDE_RV_MII_25)) { - dev_err(ds->dev, - "Failed to enable reverse MII mode\n"); - return; - } + dev_err(ds->dev, + "Failed to enable reverse MII mode\n"); + return; } } - - /* Re-negotiate EEE if it was enabled already */ - p->eee_enabled = b53_eee_init(ds, port, phydev); } void b53_port_event(struct dsa_switch *ds, int port) @@ -1408,30 +1383,48 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port, dev->ops->phylink_get_caps(dev, port, config); } -static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds, - int port, +static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { - struct b53_device *dev = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct b53_device *dev = dp->ds->priv; if (!dev->ops->phylink_mac_select_pcs) return NULL; - return dev->ops->phylink_mac_select_pcs(dev, port, interface); + return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface); } -void b53_phylink_mac_config(struct dsa_switch *ds, int port, - unsigned int mode, - const struct phylink_link_state *state) +static void b53_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) { + struct dsa_port *dp = dsa_phylink_to_port(config); + phy_interface_t interface = state->interface; + struct dsa_switch *ds = dp->ds; + struct b53_device *dev = ds->priv; + int port = dp->index; + + if (is63xx(dev) && port >= B53_63XX_RGMII0) + b53_adjust_63xx_rgmii(ds, port, interface); + + if (mode == MLO_AN_FIXED) { + if (is531x5(dev) && phy_interface_mode_is_rgmii(interface)) + b53_adjust_531x5_rgmii(ds, port, interface); + + /* configure MII port if necessary */ + if (is5325(dev)) + b53_adjust_5325_mii(ds, port); + } } -EXPORT_SYMBOL(b53_phylink_mac_config); -void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface) +static void b53_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) { - struct b53_device *dev = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct b53_device *dev = dp->ds->priv; + int port = dp->index; if (mode == MLO_AN_PHY) return; @@ -1445,24 +1438,31 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, dev->ops->serdes_link_set) dev->ops->serdes_link_set(dev, port, mode, interface, false); } -EXPORT_SYMBOL(b53_phylink_mac_link_down); -void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, - int speed, int duplex, - bool tx_pause, bool rx_pause) +static void b53_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) { + struct dsa_port *dp = dsa_phylink_to_port(config); + struct dsa_switch *ds = dp->ds; struct b53_device *dev = ds->priv; + struct ethtool_keee *p = &dev->ports[dp->index].eee; + int port = dp->index; - if (is63xx(dev) && port >= B53_63XX_RGMII0) - b53_adjust_63xx_rgmii(ds, port, interface); - - if (mode == MLO_AN_PHY) + if (mode == MLO_AN_PHY) { + /* Re-negotiate EEE if it was enabled already */ + p->eee_enabled = b53_eee_init(ds, port, phydev); return; + } if (mode == MLO_AN_FIXED) { + /* Force flow control on BCM5301x's CPU port */ + if (is5301x(dev) && dsa_is_cpu_port(ds, port)) + tx_pause = rx_pause = true; + b53_force_port_config(dev, port, speed, duplex, tx_pause, rx_pause); b53_force_link(dev, port, true); @@ -1473,7 +1473,6 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, dev->ops->serdes_link_set) dev->ops->serdes_link_set(dev, port, mode, interface, true); } -EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack) @@ -2268,6 +2267,13 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port) return JMS_MAX_SIZE; } +static const struct phylink_mac_ops b53_phylink_mac_ops = { + .mac_select_pcs = b53_phylink_mac_select_pcs, + .mac_config = b53_phylink_mac_config, + .mac_link_down = b53_phylink_mac_link_down, + .mac_link_up = b53_phylink_mac_link_up, +}; + static const struct dsa_switch_ops b53_switch_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = b53_setup, @@ -2278,12 +2284,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, - .adjust_link = b53_adjust_link, .phylink_get_caps = b53_phylink_get_caps, - .phylink_mac_select_pcs = b53_phylink_mac_select_pcs, - .phylink_mac_config = b53_phylink_mac_config, - .phylink_mac_link_down = b53_phylink_mac_link_down, - .phylink_mac_link_up = b53_phylink_mac_link_up, .port_enable = b53_enable_port, .port_disable = b53_disable_port, .get_mac_eee = b53_get_mac_eee, @@ -2726,6 +2727,7 @@ struct b53_device *b53_switch_alloc(struct device *base, dev->priv = priv; dev->ops = ops; ds->ops = &b53_switch_ops; + ds->phylink_mac_ops = &b53_phylink_mac_ops; dev->vlan_enabled = true; /* Let DSA handle the case were multiple bridges span the same switch * device and different VLAN awareness settings are requested, which diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index c13a907947f1..05141176daf5 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -352,18 +352,6 @@ int b53_br_flags(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack); int b53_setup_devlink_resources(struct dsa_switch *ds); void b53_port_event(struct dsa_switch *ds, int port); -void b53_phylink_mac_config(struct dsa_switch *ds, int port, - unsigned int mode, - const struct phylink_link_state *state); -void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); -void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, - int speed, int duplex, - bool tx_pause, bool rx_pause); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack); int b53_vlan_add(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index bc77ee9e6d0a..ed1e6560df25 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -740,16 +740,19 @@ static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port, MAC_10 | MAC_100 | MAC_1000; } -static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, +static void bcm_sf2_sw_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct dsa_port *dp = dsa_phylink_to_port(config); u32 id_mode_dis = 0, port_mode; + struct bcm_sf2_priv *priv; u32 reg_rgmii_ctrl; u32 reg; - if (port == core_readl(priv, CORE_IMP0_PRT_ID)) + priv = bcm_sf2_to_priv(dp->ds); + + if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID)) return; switch (state->interface) { @@ -770,7 +773,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, return; } - reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port); + reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index); /* Clear id_mode_dis bit, and the existing port mode, let * RGMII_MODE_EN bet set by mac_link_{up,down} @@ -809,13 +812,16 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, reg_writel(priv, reg, reg_rgmii_ctrl); } -static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, +static void bcm_sf2_sw_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct bcm_sf2_priv *priv; + int port = dp->index; u32 reg, offset; + priv = bcm_sf2_to_priv(dp->ds); if (priv->wol_ports_mask & BIT(port)) return; @@ -824,23 +830,26 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, reg &= ~LINK_STS; core_writel(priv, reg, offset); - bcm_sf2_sw_mac_link_set(ds, port, interface, false); + bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false); } -static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, +static void bcm_sf2_sw_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_keee *p = &priv->dev->ports[port].eee; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct bcm_sf2_priv *priv; u32 reg_rgmii_ctrl = 0; + struct ethtool_keee *p; + int port = dp->index; u32 reg, offset; - bcm_sf2_sw_mac_link_set(ds, port, interface, true); + bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true); + priv = bcm_sf2_to_priv(dp->ds); offset = bcm_sf2_port_override_offset(priv, port); if (phy_interface_mode_is_rgmii(interface) || @@ -886,8 +895,10 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, core_writel(priv, reg, offset); - if (mode == MLO_AN_PHY && phydev) - p->eee_enabled = b53_eee_init(ds, port, phydev); + if (mode == MLO_AN_PHY && phydev) { + p = &priv->dev->ports[port].eee; + p->eee_enabled = b53_eee_init(dp->ds, port, phydev); + } } static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, @@ -1196,6 +1207,12 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port, return cnt; } +static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = { + .mac_config = bcm_sf2_sw_mac_config, + .mac_link_down = bcm_sf2_sw_mac_link_down, + .mac_link_up = bcm_sf2_sw_mac_link_up, +}; + static const struct dsa_switch_ops bcm_sf2_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = bcm_sf2_sw_setup, @@ -1206,9 +1223,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, .phylink_get_caps = bcm_sf2_sw_get_caps, - .phylink_mac_config = bcm_sf2_sw_mac_config, - .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, - .phylink_mac_link_up = bcm_sf2_sw_mac_link_up, .phylink_fixed_state = bcm_sf2_sw_fixed_state, .suspend = bcm_sf2_sw_suspend, .resume = bcm_sf2_sw_resume, @@ -1399,6 +1413,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->dev = dev; ds = dev->ds; ds->ops = &bcm_sf2_ops; + ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops; /* Advertise the 8 egress queues */ ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES; diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c index 5249a1c2a80b..bfe21f9f7dcd 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c @@ -27,7 +27,8 @@ void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data, } /* Get nanoseconds from PTP clock */ -static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek) +static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek, + struct ptp_system_timestamp *sts) { u16 nsl, nsh; @@ -45,16 +46,19 @@ static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek) nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + ptp_read_system_prets(sts); nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + ptp_read_system_postts(sts); return (u64)nsl | ((u64)nsh << 16); } -static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek) +static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek, + struct ptp_system_timestamp *sts) { u64 ns; - ns = hellcreek_ptp_clock_read(hellcreek); + ns = hellcreek_ptp_clock_read(hellcreek, sts); if (ns < hellcreek->last_ts) hellcreek->seconds++; hellcreek->last_ts = ns; @@ -72,7 +76,7 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns) { u64 s; - __hellcreek_ptp_gettime(hellcreek); + __hellcreek_ptp_gettime(hellcreek, NULL); if (hellcreek->last_ts > ns) s = hellcreek->seconds * NSEC_PER_SEC; else @@ -81,14 +85,15 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns) return s; } -static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int hellcreek_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); u64 ns; mutex_lock(&hellcreek->ptp_lock); - ns = __hellcreek_ptp_gettime(hellcreek); + ns = __hellcreek_ptp_gettime(hellcreek, sts); mutex_unlock(&hellcreek->ptp_lock); *ts = ns_to_timespec64(ns); @@ -184,7 +189,7 @@ static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) if (abs(delta) > MAX_SLOW_OFFSET_ADJ) { struct timespec64 now, then = ns_to_timespec64(delta); - hellcreek_ptp_gettime(ptp, &now); + hellcreek_ptp_gettimex(ptp, &now, NULL); now = timespec64_add(now, then); hellcreek_ptp_settime(ptp, &now); @@ -233,7 +238,7 @@ static void hellcreek_ptp_overflow_check(struct work_struct *work) hellcreek = dw_overflow_to_hellcreek(dw); mutex_lock(&hellcreek->ptp_lock); - __hellcreek_ptp_gettime(hellcreek); + __hellcreek_ptp_gettime(hellcreek, NULL); mutex_unlock(&hellcreek->ptp_lock); schedule_delayed_work(&hellcreek->overflow_work, @@ -409,7 +414,7 @@ int hellcreek_ptp_setup(struct hellcreek *hellcreek) hellcreek->ptp_clock_info.pps = 0; hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine; hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime; - hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime; + hellcreek->ptp_clock_info.gettimex64 = hellcreek_ptp_gettimex; hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime; hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable; hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index fcb20eac332a..02f07b870f10 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1007,15 +1007,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = { static void lan9303_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { + u8 *buf = data; unsigned int u; if (stringset != ETH_SS_STATS) return; - for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { - strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, - ETH_GSTRING_LEN); - } + for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) + ethtool_puts(&buf, lan9303_mib[u].name); } static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, @@ -1293,14 +1292,29 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port, } } -static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void lan9303_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void lan9303_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ +} + +static void lan9303_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, + int speed, int duplex, bool tx_pause, bool rx_pause) { - struct lan9303 *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct lan9303 *chip = dp->ds->priv; + struct dsa_switch *ds = dp->ds; + int port = dp->index; u32 ctl; u32 reg; @@ -1330,6 +1344,12 @@ static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port, regmap_write(chip->regmap, flow_ctl_reg[port], reg); } +static const struct phylink_mac_ops lan9303_phylink_mac_ops = { + .mac_config = lan9303_phylink_mac_config, + .mac_link_down = lan9303_phylink_mac_link_down, + .mac_link_up = lan9303_phylink_mac_link_up, +}; + static const struct dsa_switch_ops lan9303_switch_ops = { .get_tag_protocol = lan9303_get_tag_protocol, .setup = lan9303_setup, @@ -1337,7 +1357,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = { .phy_read = lan9303_phy_read, .phy_write = lan9303_phy_write, .phylink_get_caps = lan9303_phylink_get_caps, - .phylink_mac_link_up = lan9303_phylink_mac_link_up, .get_ethtool_stats = lan9303_get_ethtool_stats, .get_sset_count = lan9303_get_sset_count, .port_enable = lan9303_port_enable, @@ -1365,6 +1384,7 @@ static int lan9303_register_switch(struct lan9303 *chip) chip->ds->num_ports = LAN9303_NUM_PORTS; chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; + chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops; base = chip->phy_addr_base; chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index de48b194048f..a557049e34f5 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1670,11 +1670,13 @@ static void gswip_port_set_pause(struct gswip_priv *priv, int port, mdio_phy, GSWIP_MDIO_PHYp(port)); } -static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, +static void gswip_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct gswip_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; u32 miicfg = 0; miicfg |= GSWIP_MII_CFG_LDCLKDIS; @@ -1700,7 +1702,7 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, miicfg |= GSWIP_MII_CFG_MODE_GMII; break; default: - dev_err(ds->dev, + dev_err(dp->ds->dev, "Unsupported interface: %d\n", state->interface); return; } @@ -1726,28 +1728,32 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, } } -static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void gswip_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct gswip_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); - if (!dsa_is_cpu_port(ds, port)) + if (!dsa_port_is_cpu(dp)) gswip_port_set_link(priv, port, false); } -static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void gswip_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct gswip_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct gswip_priv *priv = dp->ds->priv; + int port = dp->index; - if (!dsa_is_cpu_port(ds, port)) { + if (!dsa_port_is_cpu(dp)) { gswip_port_set_link(priv, port, true); gswip_port_set_speed(priv, port, speed, interface); gswip_port_set_duplex(priv, port, duplex); @@ -1824,6 +1830,12 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(gswip_rmon_cnt); } +static const struct phylink_mac_ops gswip_phylink_mac_ops = { + .mac_config = gswip_phylink_mac_config, + .mac_link_down = gswip_phylink_mac_link_down, + .mac_link_up = gswip_phylink_mac_link_up, +}; + static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .get_tag_protocol = gswip_get_tag_protocol, .setup = gswip_setup, @@ -1842,9 +1854,6 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = { .port_change_mtu = gswip_port_change_mtu, .port_max_mtu = gswip_port_max_mtu, .phylink_get_caps = gswip_xrx200_phylink_get_caps, - .phylink_mac_config = gswip_phylink_mac_config, - .phylink_mac_link_down = gswip_phylink_mac_link_down, - .phylink_mac_link_up = gswip_phylink_mac_link_up, .get_strings = gswip_get_strings, .get_ethtool_stats = gswip_get_ethtool_stats, .get_sset_count = gswip_get_sset_count, @@ -1868,9 +1877,6 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = { .port_change_mtu = gswip_port_change_mtu, .port_max_mtu = gswip_port_max_mtu, .phylink_get_caps = gswip_xrx300_phylink_get_caps, - .phylink_mac_config = gswip_phylink_mac_config, - .phylink_mac_link_down = gswip_phylink_mac_link_down, - .phylink_mac_link_up = gswip_phylink_mac_link_up, .get_strings = gswip_get_strings, .get_ethtool_stats = gswip_get_ethtool_stats, .get_sset_count = gswip_get_sset_count, @@ -2136,6 +2142,7 @@ static int gswip_probe(struct platform_device *pdev) priv->ds->num_ports = priv->hw_info->max_ports; priv->ds->priv = priv; priv->ds->ops = priv->hw_info->ops; + priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops; priv->dev = dev; mutex_init(&priv->pce_table_lock); version = gswip_switch_r(priv, GSWIP_VERSION); diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index 394ca8678d2b..c1b906c05a02 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -4,6 +4,8 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON depends on NET_DSA select NET_DSA_TAG_KSZ select NET_DSA_TAG_NONE + select NET_IEEE8021Q_HELPERS + select DCB help This driver adds support for Microchip KSZ9477 series switch and KSZ8795/KSZ88x3 switch chips. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 49459a50dbc8..1cfba1ec9355 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o -ksz_switch-objs := ksz_common.o +ksz_switch-objs := ksz_common.o ksz_dcb.o ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o ksz_switch-objs += ksz8795.o ksz_switch-objs += lan937x_main.o diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h index 1a5225264e6a..ae43077e76c3 100644 --- a/drivers/net/dsa/microchip/ksz8.h +++ b/drivers/net/dsa/microchip/ksz8.h @@ -19,8 +19,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port); void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port); int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val); -int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, - u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries); void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt); void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, u64 *dropped, u64 *cnt); @@ -56,9 +54,10 @@ int ksz8_reset_switch(struct ksz_device *dev); int ksz8_switch_init(struct ksz_device *dev); void ksz8_switch_exit(struct ksz_device *dev); int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu); -void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port, - unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, +void ksz8_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause); +int ksz8_all_queues_split(struct ksz_device *dev, int queues); #endif diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 14923535ca7e..d27b9c36d73f 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -127,37 +127,71 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu) return -EOPNOTSUPP; } -static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue) +static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues) { - u8 hi, lo; + u8 mask_4q, mask_2q; + u8 reg_4q, reg_2q; + u8 data_4q = 0; + u8 data_2q = 0; + int ret; - /* Number of queues can only be 1, 2, or 4. */ - switch (queue) { - case 4: - case 3: - queue = PORT_QUEUE_SPLIT_4; - break; - case 2: - queue = PORT_QUEUE_SPLIT_2; - break; - default: - queue = PORT_QUEUE_SPLIT_1; + if (ksz_is_ksz88x3(dev)) { + mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN; + mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN; + reg_4q = REG_PORT_CTRL_0; + reg_2q = REG_PORT_CTRL_2; + + /* KSZ8795 family switches have Weighted Fair Queueing (WFQ) + * enabled by default. Enable it for KSZ8873 family switches + * too. Default value for KSZ8873 family is strict priority, + * which should be enabled by using TC_SETUP_QDISC_ETS, not + * by default. + */ + ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE, + WEIGHTED_FAIR_QUEUE_ENABLE); + if (ret) + return ret; + } else { + mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN; + mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN; + reg_4q = REG_PORT_CTRL_13; + reg_2q = REG_PORT_CTRL_0; + + /* TODO: this is legacy from initial KSZ8795 driver, should be + * moved to appropriate place in the future. + */ + ret = ksz_rmw8(dev, REG_SW_CTRL_19, + SW_OUT_RATE_LIMIT_QUEUE_BASED, + SW_OUT_RATE_LIMIT_QUEUE_BASED); + if (ret) + return ret; } - ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo); - ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi); - lo &= ~PORT_QUEUE_SPLIT_L; - if (queue & PORT_QUEUE_SPLIT_2) - lo |= PORT_QUEUE_SPLIT_L; - hi &= ~PORT_QUEUE_SPLIT_H; - if (queue & PORT_QUEUE_SPLIT_4) - hi |= PORT_QUEUE_SPLIT_H; - ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo); - ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi); - - /* Default is port based for egress rate limit. */ - if (queue != PORT_QUEUE_SPLIT_1) - ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED, - true); + + if (queues == 4) + data_4q = mask_4q; + else if (queues == 2) + data_2q = mask_2q; + + ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q); + if (ret) + return ret; + + return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q); +} + +int ksz8_all_queues_split(struct ksz_device *dev, int queues) +{ + struct dsa_switch *ds = dev->ds; + const struct dsa_port *dp; + + dsa_switch_for_each_port(dp, ds) { + int ret = ksz8_port_queue_split(dev, dp->index, queues); + + if (ret) + return ret; + } + + return 0; } void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) @@ -385,39 +419,39 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data) int timeout = 100; const u32 *masks; const u16 *regs; + int ret; masks = dev->info->masks; regs = dev->info->regs; do { - ksz_read8(dev, regs[REG_IND_DATA_CHECK], data); + ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data); + if (ret) + return ret; + timeout--; } while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout); /* Entry is not ready for accessing. */ - if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) { - return -EAGAIN; - /* Entry is ready for accessing. */ - } else { - ksz_read8(dev, regs[REG_IND_DATA_8], data); + if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) + return -ETIMEDOUT; - /* There is no valid entry in the table. */ - if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) - return -ENXIO; - } - return 0; + /* Entry is ready for accessing. */ + return ksz_read8(dev, regs[REG_IND_DATA_8], data); } -int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, - u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries) +static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u16 *entries) { u32 data_hi, data_lo; const u8 *shifts; const u32 *masks; const u16 *regs; u16 ctrl_addr; + u64 buf = 0; u8 data; - int rc; + int cnt; + int ret; shifts = dev->info->shifts; masks = dev->info->masks; @@ -426,49 +460,50 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr, ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr; mutex_lock(&dev->alu_mutex); - ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr); + if (ret) + goto unlock_alu; + + ret = ksz8_valid_dyn_entry(dev, &data); + if (ret) + goto unlock_alu; - rc = ksz8_valid_dyn_entry(dev, &data); - if (rc == -EAGAIN) { - if (addr == 0) - *entries = 0; - } else if (rc == -ENXIO) { + if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) { *entries = 0; - /* At least one valid entry in the table. */ - } else { - u64 buf = 0; - int cnt; - - ksz_read64(dev, regs[REG_IND_DATA_HI], &buf); - data_hi = (u32)(buf >> 32); - data_lo = (u32)buf; - - /* Check out how many valid entry in the table. */ - cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H]; - cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H]; - cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >> - shifts[DYNAMIC_MAC_ENTRIES]; - *entries = cnt + 1; - - *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >> - shifts[DYNAMIC_MAC_FID]; - *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >> - shifts[DYNAMIC_MAC_SRC_PORT]; - *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >> - shifts[DYNAMIC_MAC_TIMESTAMP]; - - mac_addr[5] = (u8)data_lo; - mac_addr[4] = (u8)(data_lo >> 8); - mac_addr[3] = (u8)(data_lo >> 16); - mac_addr[2] = (u8)(data_lo >> 24); - - mac_addr[1] = (u8)data_hi; - mac_addr[0] = (u8)(data_hi >> 8); - rc = 0; + goto unlock_alu; } + + ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf); + if (ret) + goto unlock_alu; + + data_hi = (u32)(buf >> 32); + data_lo = (u32)buf; + + /* Check out how many valid entry in the table. */ + cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H]; + cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H]; + cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >> + shifts[DYNAMIC_MAC_ENTRIES]; + *entries = cnt + 1; + + *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >> + shifts[DYNAMIC_MAC_FID]; + *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >> + shifts[DYNAMIC_MAC_SRC_PORT]; + + mac_addr[5] = (u8)data_lo; + mac_addr[4] = (u8)(data_lo >> 8); + mac_addr[3] = (u8)(data_lo >> 16); + mac_addr[2] = (u8)(data_lo >> 24); + + mac_addr[1] = (u8)data_hi; + mac_addr[0] = (u8)(data_hi >> 8); + +unlock_alu: mutex_unlock(&dev->alu_mutex); - return rc; + return ret; } static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr, @@ -1193,28 +1228,28 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) int ksz8_fdb_dump(struct ksz_device *dev, int port, dsa_fdb_dump_cb_t *cb, void *data) { - int ret = 0; - u16 i = 0; - u16 entries = 0; - u8 timestamp = 0; - u8 fid; - u8 src_port; u8 mac[ETH_ALEN]; + u8 src_port, fid; + u16 entries = 0; + int ret, i; - do { + for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) { ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port, - ×tamp, &entries); - if (!ret && port == src_port) { + &entries); + if (ret) + return ret; + + if (i >= entries) + return 0; + + if (port == src_port) { ret = cb(mac, fid, false, data); if (ret) - break; + return ret; } - i++; - } while (i < entries); - if (i >= entries) - ret = 0; + } - return ret; + return 0; } static int ksz8_add_sta_mac(struct ksz_device *dev, int port, @@ -1512,6 +1547,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) { struct dsa_switch *ds = dev->ds; const u32 *masks; + int queues; u8 member; masks = dev->info->masks; @@ -1519,19 +1555,20 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* enable broadcast storm limit */ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - if (!ksz_is_ksz88x3(dev)) - ksz8795_set_prio_queue(dev, port, 4); + /* For KSZ88x3 enable only one queue by default, otherwise we won't + * be able to get rid of PCP prios on Port 2. + */ + if (ksz_is_ksz88x3(dev)) + queues = 1; + else + queues = dev->info->num_tx_queues; - /* disable DiffServ priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false); + ksz8_port_queue_split(dev, port, queues); /* replace priority */ ksz_port_cfg(dev, port, P_802_1P_CTRL, masks[PORT_802_1P_REMAPPING], false); - /* enable 802.1p priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true); - if (cpu_port) member = dsa_user_ports(ds); else @@ -1701,11 +1738,15 @@ static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex, SW_10_MBIT, ctrl); } -void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port, - unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, +void ksz8_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + int port = dp->index; + /* If the port is the CPU port, apply special handling. Only the CPU * port is configured via global registers. */ diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index 7c9341ef73b0..69566a5d9cda 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -124,7 +124,8 @@ #define PORT_BASED_PRIO_3 3 #define PORT_INSERT_TAG BIT(2) #define PORT_REMOVE_TAG BIT(1) -#define PORT_QUEUE_SPLIT_L BIT(0) +#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0) +#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0) #define REG_PORT_1_CTRL_1 0x11 #define REG_PORT_2_CTRL_1 0x21 @@ -143,6 +144,7 @@ #define REG_PORT_4_CTRL_2 0x42 #define REG_PORT_5_CTRL_2 0x52 +#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7) #define PORT_INGRESS_FILTER BIT(6) #define PORT_DISCARD_NON_VID BIT(5) #define PORT_FORCE_FLOW_CTRL BIT(4) @@ -463,10 +465,7 @@ #define REG_PORT_4_CTRL_13 0xE1 #define REG_PORT_5_CTRL_13 0xF1 -#define PORT_QUEUE_SPLIT_H BIT(1) -#define PORT_QUEUE_SPLIT_1 0 -#define PORT_QUEUE_SPLIT_2 1 -#define PORT_QUEUE_SPLIT_4 2 +#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1) #define PORT_DROP_TAG BIT(0) #define REG_PORT_1_CTRL_14 0xB2 @@ -794,5 +793,6 @@ #define TAIL_TAG_LOOKUP BIT(7) #define FID_ENTRIES 128 +#define KSZ8_DYN_MAC_ENTRIES 1024 #endif diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 7f745628c84d..f8ad7833f5d9 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1158,18 +1158,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* enable broadcast storm limit */ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - /* disable DiffServ priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); - /* replace priority */ ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, false); ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, MTI_PVID_REPLACE, false); - /* enable 802.1p priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); - /* force flow control for non-PHY ports only */ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c index 8b2f5be667e0..ca7830ab168a 100644 --- a/drivers/net/dsa/microchip/ksz9477_tc_flower.c +++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c @@ -124,6 +124,9 @@ static int ksz9477_flower_parse_key(struct ksz_device *dev, int port, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 2b510f150dd8..1e0085cd9a9a 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -24,10 +24,12 @@ #include <linux/of_net.h> #include <linux/micrel_phy.h> #include <net/dsa.h> +#include <net/ieee8021q.h> #include <net/pkt_cls.h> #include <net/switchdev.h> #include "ksz_common.h" +#include "ksz_dcb.h" #include "ksz_ptp.h" #include "ksz8.h" #include "ksz9477.h" @@ -253,6 +255,28 @@ static const struct ksz_drive_strength ksz8830_drive_strengths[] = { { KSZ8873_DRIVE_STRENGTH_16MA, 16000 }, }; +static void ksz8830_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state); +static void ksz_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state); +static void ksz_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface); + +static const struct phylink_mac_ops ksz8830_phylink_mac_ops = { + .mac_config = ksz8830_phylink_mac_config, + .mac_link_down = ksz_phylink_mac_link_down, + .mac_link_up = ksz8_phylink_mac_link_up, +}; + +static const struct phylink_mac_ops ksz8_phylink_mac_ops = { + .mac_config = ksz_phylink_mac_config, + .mac_link_down = ksz_phylink_mac_link_down, + .mac_link_up = ksz8_phylink_mac_link_up, +}; + static const struct ksz_dev_ops ksz8_dev_ops = { .setup = ksz8_setup, .get_port_addr = ksz8_get_port_addr, @@ -277,7 +301,6 @@ static const struct ksz_dev_ops ksz8_dev_ops = { .mirror_add = ksz8_port_mirror_add, .mirror_del = ksz8_port_mirror_del, .get_caps = ksz8_get_caps, - .phylink_mac_link_up = ksz8_phylink_mac_link_up, .config_cpu_port = ksz8_config_cpu_port, .enable_stp_addr = ksz8_enable_stp_addr, .reset = ksz8_reset_switch, @@ -286,13 +309,19 @@ static const struct ksz_dev_ops ksz8_dev_ops = { .change_mtu = ksz8_change_mtu, }; -static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, +static void ksz9477_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, + int speed, int duplex, bool tx_pause, bool rx_pause); +static const struct phylink_mac_ops ksz9477_phylink_mac_ops = { + .mac_config = ksz_phylink_mac_config, + .mac_link_down = ksz_phylink_mac_link_down, + .mac_link_up = ksz9477_phylink_mac_link_up, +}; + static const struct ksz_dev_ops ksz9477_dev_ops = { .setup = ksz9477_setup, .get_port_addr = ksz9477_get_port_addr, @@ -319,7 +348,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .mdb_add = ksz9477_mdb_add, .mdb_del = ksz9477_mdb_del, .change_mtu = ksz9477_change_mtu, - .phylink_mac_link_up = ksz9477_phylink_mac_link_up, .get_wol = ksz9477_get_wol, .set_wol = ksz9477_set_wol, .wol_pre_shutdown = ksz9477_wol_pre_shutdown, @@ -331,6 +359,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .exit = ksz9477_switch_exit, }; +static const struct phylink_mac_ops lan937x_phylink_mac_ops = { + .mac_config = ksz_phylink_mac_config, + .mac_link_down = ksz_phylink_mac_link_down, + .mac_link_up = ksz9477_phylink_mac_link_up, +}; + static const struct ksz_dev_ops lan937x_dev_ops = { .setup = lan937x_setup, .teardown = lan937x_teardown, @@ -359,7 +393,6 @@ static const struct ksz_dev_ops lan937x_dev_ops = { .mdb_add = ksz9477_mdb_add, .mdb_del = ksz9477_mdb_del, .change_mtu = lan937x_change_mtu, - .phylink_mac_link_up = ksz9477_phylink_mac_link_up, .config_cpu_port = lan937x_config_cpu_port, .tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc, .enable_stp_addr = ksz9477_enable_stp_addr, @@ -1194,9 +1227,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 3, /* total port count */ .port_nirqs = 3, .num_tx_queues = 4, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1223,7 +1257,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, + .num_ipms = 4, .ops = &ksz8_dev_ops, + .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), @@ -1262,7 +1298,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, + .num_ipms = 4, .ops = &ksz8_dev_ops, + .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), @@ -1287,7 +1325,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x10, /* can be configured as cpu port */ .port_cnt = 5, /* total cpu and user ports */ .num_tx_queues = 4, + .num_ipms = 4, .ops = &ksz8_dev_ops, + .phylink_mac_ops = &ksz8_phylink_mac_ops, .ksz87xx_eee_link_erratum = true, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), @@ -1312,7 +1352,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .cpu_ports = 0x4, /* can be configured as cpu port */ .port_cnt = 3, .num_tx_queues = 4, + .num_ipms = 4, .ops = &ksz8_dev_ops, + .phylink_mac_ops = &ksz8830_phylink_mac_ops, .mib_names = ksz88xx_mib_names, .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1336,9 +1378,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 7, /* total physical port count */ .port_nirqs = 4, .num_tx_queues = 4, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1370,7 +1413,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 6, /* total physical port count */ .port_nirqs = 2, .num_tx_queues = 4, + .num_ipms = 8, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1402,7 +1447,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 7, /* total physical port count */ .port_nirqs = 2, .num_tx_queues = 4, + .num_ipms = 8, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1432,7 +1479,9 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 3, /* total port count */ .port_nirqs = 2, .num_tx_queues = 4, + .num_ipms = 8, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1458,9 +1507,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 3, /* total port count */ .port_nirqs = 3, .num_tx_queues = 4, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1486,9 +1536,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 7, /* total port count */ .port_nirqs = 3, .num_tx_queues = 4, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1519,8 +1570,8 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 7, /* total physical port count */ .port_nirqs = 3, .num_tx_queues = 4, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &ksz9477_dev_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), @@ -1551,9 +1602,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 5, /* total physical port count */ .port_nirqs = 6, .num_tx_queues = 8, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &lan937x_dev_ops, + .phylink_mac_ops = &lan937x_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1578,9 +1630,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 6, /* total physical port count */ .port_nirqs = 6, .num_tx_queues = 8, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &lan937x_dev_ops, + .phylink_mac_ops = &lan937x_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1605,9 +1658,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 8, /* total physical port count */ .port_nirqs = 6, .num_tx_queues = 8, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &lan937x_dev_ops, + .phylink_mac_ops = &lan937x_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1636,9 +1690,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 5, /* total physical port count */ .port_nirqs = 6, .num_tx_queues = 8, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &lan937x_dev_ops, + .phylink_mac_ops = &lan937x_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -1667,9 +1722,10 @@ const struct ksz_chip_data ksz_switch_chips[] = { .port_cnt = 8, /* total physical port count */ .port_nirqs = 6, .num_tx_queues = 8, + .num_ipms = 8, .tc_cbs_supported = true, - .tc_ets_supported = true, .ops = &lan937x_dev_ops, + .phylink_mac_ops = &lan937x_phylink_mac_ops, .mib_names = ksz9477_mib_names, .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), .reg_mib_cnt = MIB_COUNTER_NUM, @@ -2304,6 +2360,7 @@ static int ksz_setup(struct dsa_switch *ds) ksz_init_mib_timer(dev); ds->configure_vlan_while_not_filtering = false; + ds->dscp_prio_mapping_is_global = true; if (dev->dev_ops->setup) { ret = dev->dev_ops->setup(ds); @@ -2347,6 +2404,10 @@ static int ksz_setup(struct dsa_switch *ds) goto out_ptp_clock_unregister; } + ret = ksz_dcb_init(dev); + if (ret) + goto out_ptp_clock_unregister; + /* start switch */ regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL], SW_START, SW_START); @@ -2523,14 +2584,15 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) return 0; } -static void ksz_mac_link_down(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface) +static void ksz_phylink_mac_link_down(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) { - struct ksz_device *dev = ds->priv; - struct ksz_port *p = &dev->ports[port]; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; /* Read all MIB counters when the link is going down. */ - p->read = true; + dev->ports[dp->index].read = true; /* timer started */ if (dev->mib_read_interval) schedule_delayed_work(&dev->mib_read, 0); @@ -2660,9 +2722,33 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port, return dev->dev_ops->mdb_del(dev, port, mdb, db); } +static int ksz9477_set_default_prio_queue_mapping(struct ksz_device *dev, + int port) +{ + u32 queue_map = 0; + int ipm; + + for (ipm = 0; ipm < dev->info->num_ipms; ipm++) { + int queue; + + /* Traffic Type (TT) is corresponding to the Internal Priority + * Map (IPM) in the switch. Traffic Class (TC) is + * corresponding to the queue in the switch. + */ + queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues); + if (queue < 0) + return queue; + + queue_map |= queue << (ipm * KSZ9477_PORT_TC_MAP_S); + } + + return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map); +} + static int ksz_port_setup(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; + int ret; if (!dsa_is_user_port(ds, port)) return 0; @@ -2670,11 +2756,17 @@ static int ksz_port_setup(struct dsa_switch *ds, int port) /* setup user port */ dev->dev_ops->port_setup(dev, port, false); + if (!is_ksz8(dev)) { + ret = ksz9477_set_default_prio_queue_mapping(dev, port); + if (ret) + return ret; + } + /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. */ - return 0; + return ksz_dcb_init_port(dev, port); } void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) @@ -3065,16 +3157,23 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit) return interface; } -static void ksz_phylink_mac_config(struct dsa_switch *ds, int port, +static void ksz8830_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + + dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN); +} + +static void ksz_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct ksz_device *dev = ds->priv; - - if (ksz_is_ksz88x3(dev)) { - dev->ports[port].manual_flow = !(state->pause & MLO_PAUSE_AN); - return; - } + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + int port = dp->index; /* Internal PHYs */ if (dev->info->internal_phy[port]) @@ -3087,9 +3186,6 @@ static void ksz_phylink_mac_config(struct dsa_switch *ds, int port, ksz_set_xmii(dev, port, state->interface); - if (dev->dev_ops->phylink_mac_config) - dev->dev_ops->phylink_mac_config(dev, port, mode, state); - if (dev->dev_ops->setup_rgmii_delay) dev->dev_ops->setup_rgmii_delay(dev, port); } @@ -3187,13 +3283,16 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex, ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val); } -static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, +static void ksz9477_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, + int speed, int duplex, bool tx_pause, bool rx_pause) { + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ksz_device *dev = dp->ds->priv; + int port = dp->index; struct ksz_port *p; p = &dev->ports[port]; @@ -3209,18 +3308,6 @@ static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port, ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause); } -static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, bool rx_pause) -{ - struct ksz_device *dev = ds->priv; - - dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, phydev, - speed, duplex, tx_pause, rx_pause); -} - static int ksz_switch_detect(struct ksz_device *dev) { u8 id1, id2, id4; @@ -3522,7 +3609,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port, for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) { int queue; - if (tc_prio > KSZ9477_MAX_TC_PRIO) + if (tc_prio >= dev->info->num_ipms) break; queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]); @@ -3534,8 +3621,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port, static int ksz_tc_ets_del(struct ksz_device *dev, int port) { - int ret, queue, tc_prio, s; - u32 queue_map = 0; + int ret, queue; /* To restore the default chip configuration, set all queues to use the * WRR scheduler with a weight of 1. @@ -3547,31 +3633,10 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port) return ret; } - switch (dev->info->num_tx_queues) { - case 2: - s = 2; - break; - case 4: - s = 1; - break; - case 8: - s = 0; - break; - default: - return -EINVAL; - } - /* Revert the queue mapping for TC-priority to its default setting on * the chip. */ - for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) { - int queue; - - queue = tc_prio >> s; - queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S); - } - - return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map); + return ksz9477_set_default_prio_queue_mapping(dev, port); } static int ksz_tc_ets_validate(struct ksz_device *dev, int port, @@ -3616,7 +3681,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port, struct ksz_device *dev = ds->priv; int ret; - if (!dev->info->tc_ets_supported) + if (is_ksz8(dev)) return -EOPNOTSUPP; if (qopt->parent != TC_H_ROOT) { @@ -3881,9 +3946,6 @@ static const struct dsa_switch_ops ksz_switch_ops = { .phy_read = ksz_phy_read16, .phy_write = ksz_phy_write16, .phylink_get_caps = ksz_phylink_get_caps, - .phylink_mac_config = ksz_phylink_mac_config, - .phylink_mac_link_up = ksz_phylink_mac_link_up, - .phylink_mac_link_down = ksz_mac_link_down, .port_setup = ksz_port_setup, .set_ageing_time = ksz_set_ageing_time, .get_strings = ksz_get_strings, @@ -3925,6 +3987,13 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_setup_tc = ksz_setup_tc, .get_mac_eee = ksz_get_mac_eee, .set_mac_eee = ksz_set_mac_eee, + .port_get_default_prio = ksz_port_get_default_prio, + .port_set_default_prio = ksz_port_set_default_prio, + .port_get_dscp_prio = ksz_port_get_dscp_prio, + .port_add_dscp_prio = ksz_port_add_dscp_prio, + .port_del_dscp_prio = ksz_port_del_dscp_prio, + .port_get_apptrust = ksz_port_get_apptrust, + .port_set_apptrust = ksz_port_set_apptrust, }; struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) @@ -4328,6 +4397,9 @@ int ksz_switch_register(struct ksz_device *dev) /* set the real number of ports */ dev->ds->num_ports = dev->info->port_cnt; + /* set the phylink ops */ + dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops; + /* Host port interface will be self detected, or specifically set in * device tree. */ diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 40c11b0d6b62..c784fd23a993 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -19,9 +19,13 @@ #include "ksz_ptp.h" #define KSZ_MAX_NUM_PORTS 8 +/* all KSZ switches count ports from 1 */ +#define KSZ_PORT_1 0 +#define KSZ_PORT_2 1 struct ksz_device; struct ksz_port; +struct phylink_mac_ops; enum ksz_regmap_width { KSZ_REGMAP_8, @@ -58,9 +62,10 @@ struct ksz_chip_data { int port_cnt; u8 port_nirqs; u8 num_tx_queues; + u8 num_ipms; /* number of Internal Priority Maps */ bool tc_cbs_supported; - bool tc_ets_supported; const struct ksz_dev_ops *ops; + const struct phylink_mac_ops *phylink_mac_ops; bool ksz87xx_eee_link_erratum; const struct ksz_mib_names *mib_names; int mib_cnt; @@ -349,9 +354,6 @@ struct ksz_dev_ops { int (*change_mtu)(struct ksz_device *dev, int port, int mtu); void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); void (*port_init_cnt)(struct ksz_device *dev, int port); - void (*phylink_mac_config)(struct ksz_device *dev, int port, - unsigned int mode, - const struct phylink_link_state *state); void (*phylink_mac_link_up)(struct ksz_device *dev, int port, unsigned int mode, phy_interface_t interface, @@ -620,6 +622,11 @@ static inline bool ksz_is_ksz88x3(struct ksz_device *dev) return dev->chip_id == KSZ8830_CHIP_ID; } +static inline bool is_ksz8(struct ksz_device *dev) +{ + return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev); +} + static inline int is_lan937x(struct ksz_device *dev) { return dev->chip_id == LAN9370_CHIP_ID || @@ -722,7 +729,6 @@ static inline int is_lan937x(struct ksz_device *dev) #define KSZ9477_PORT_MRI_TC_MAP__4 0x0808 #define KSZ9477_PORT_TC_MAP_S 4 -#define KSZ9477_MAX_TC_PRIO 7 /* CBS related registers */ #define REG_PORT_MTI_QUEUE_INDEX__4 0x0900 diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c new file mode 100644 index 000000000000..a97106327562 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_dcb.c @@ -0,0 +1,809 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2024 Pengutronix, Oleksij Rempel <[email protected]> + +#include <linux/dsa/ksz_common.h> +#include <net/dsa.h> +#include <net/dscp.h> +#include <net/ieee8021q.h> + +#include "ksz_common.h" +#include "ksz_dcb.h" +#include "ksz8.h" + +#define KSZ8_REG_PORT_1_CTRL_0 0x10 +#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6) +#define KSZ8_PORT_802_1P_ENABLE BIT(5) +#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3) + +#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60 +#define KSZ8765_REG_TOS_DSCP_CTRL 0x90 + +#define KSZ9477_REG_SW_MAC_TOS_CTRL 0x033e +#define KSZ9477_SW_TOS_DSCP_REMAP BIT(0) +#define KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M GENMASK(5, 3) + +#define KSZ9477_REG_DIFFSERV_PRIO_MAP 0x0340 + +#define KSZ9477_REG_PORT_MRI_PRIO_CTRL 0x0801 +#define KSZ9477_PORT_HIGHEST_PRIO BIT(7) +#define KSZ9477_PORT_OR_PRIO BIT(6) +#define KSZ9477_PORT_MAC_PRIO_ENABLE BIT(4) +#define KSZ9477_PORT_VLAN_PRIO_ENABLE BIT(3) +#define KSZ9477_PORT_802_1P_PRIO_ENABLE BIT(2) +#define KSZ9477_PORT_DIFFSERV_PRIO_ENABLE BIT(1) +#define KSZ9477_PORT_ACL_PRIO_ENABLE BIT(0) + +#define KSZ9477_REG_PORT_MRI_MAC_CTRL 0x0802 +#define KSZ9477_PORT_BASED_PRIO_M GENMASK(2, 0) + +struct ksz_apptrust_map { + u8 apptrust; + u8 bit; +}; + +static const struct ksz_apptrust_map ksz8_apptrust_map_to_bit[] = { + { DCB_APP_SEL_PCP, KSZ8_PORT_802_1P_ENABLE }, + { IEEE_8021QAZ_APP_SEL_DSCP, KSZ8_PORT_DIFFSERV_ENABLE }, +}; + +static const struct ksz_apptrust_map ksz9477_apptrust_map_to_bit[] = { + { DCB_APP_SEL_PCP, KSZ9477_PORT_802_1P_PRIO_ENABLE }, + { IEEE_8021QAZ_APP_SEL_DSCP, KSZ9477_PORT_DIFFSERV_PRIO_ENABLE }, +}; + +/* ksz_supported_apptrust[] - Supported apptrust selectors and Priority Order + * of Internal Priority Map (IPM) sources. + * + * This array defines the apptrust selectors supported by the hardware, where + * the index within the array indicates the priority of the selector - lower + * indices correspond to higher priority. This fixed priority scheme is due to + * the hardware's design, which does not support configurable priority among + * different priority sources. + * + * The priority sources, including Tail Tag, ACL, VLAN PCP and DSCP are ordered + * by the hardware's fixed logic, as detailed below. The order reflects a + * non-configurable precedence where certain types of priority information + * override others: + * + * 1. Tail Tag - Highest priority, overrides ACL, VLAN PCP, and DSCP priorities. + * 2. ACL - Overrides VLAN PCP and DSCP priorities. + * 3. VLAN PCP - Overrides DSCP priority. + * 4. DSCP - Lowest priority, does not override any other priority source. + * + * In this context, the array's lower index (higher priority) for + * 'DCB_APP_SEL_PCP' suggests its relative priority over + * 'IEEE_8021QAZ_APP_SEL_DSCP' within the system's fixed priority scheme. + * + * DCB_APP_SEL_PCP - Priority Code Point selector + * IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector + */ +static const u8 ksz_supported_apptrust[] = { + DCB_APP_SEL_PCP, + IEEE_8021QAZ_APP_SEL_DSCP, +}; + +static const char * const ksz_supported_apptrust_variants[] = { + "empty", "dscp", "pcp", "dscp pcp" +}; + +static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg, + u8 *mask, int *shift) +{ + if (is_ksz8(dev)) { + *reg = KSZ8_REG_PORT_1_CTRL_0; + *mask = KSZ8_PORT_BASED_PRIO_M; + *shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M); + } else { + *reg = KSZ9477_REG_PORT_MRI_MAC_CTRL; + *mask = KSZ9477_PORT_BASED_PRIO_M; + *shift = __bf_shf(KSZ9477_PORT_BASED_PRIO_M); + } +} + +/** + * ksz_get_dscp_prio_reg - Retrieves the DSCP-to-priority-mapping register + * @dev: Pointer to the KSZ switch device structure + * @reg: Pointer to the register address to be set + * @per_reg: Pointer to the number of DSCP values per register + * @mask: Pointer to the mask to be set + * + * This function retrieves the DSCP to priority mapping register, the number of + * DSCP values per register, and the mask to be set. + */ +static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg, + int *per_reg, u8 *mask) +{ + if (ksz_is_ksz87xx(dev)) { + *reg = KSZ8765_REG_TOS_DSCP_CTRL; + *per_reg = 4; + *mask = GENMASK(1, 0); + } else if (ksz_is_ksz88x3(dev)) { + *reg = KSZ88X3_REG_TOS_DSCP_CTRL; + *per_reg = 4; + *mask = GENMASK(1, 0); + } else { + *reg = KSZ9477_REG_DIFFSERV_PRIO_MAP; + *per_reg = 2; + *mask = GENMASK(2, 0); + } +} + +/** + * ksz_get_apptrust_map_and_reg - Retrieves the apptrust map and register + * @dev: Pointer to the KSZ switch device structure + * @map: Pointer to the apptrust map to be set + * @reg: Pointer to the register address to be set + * @mask: Pointer to the mask to be set + * + * This function retrieves the apptrust map and register address for the + * apptrust configuration. + */ +static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev, + const struct ksz_apptrust_map **map, + int *reg, u8 *mask) +{ + if (is_ksz8(dev)) { + *map = ksz8_apptrust_map_to_bit; + *reg = KSZ8_REG_PORT_1_CTRL_0; + *mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE; + } else { + *map = ksz9477_apptrust_map_to_bit; + *reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL; + *mask = KSZ9477_PORT_802_1P_PRIO_ENABLE | + KSZ9477_PORT_DIFFSERV_PRIO_ENABLE; + } +} + +/** + * ksz_port_get_default_prio - Retrieves the default priority for a port on a + * KSZ switch + * @ds: Pointer to the DSA switch structure + * @port: Port number from which to get the default priority + * + * This function fetches the default priority for the specified port on a KSZ + * switch. + * + * Return: The default priority of the port on success, or a negative error + * code on failure. + */ +int ksz_port_get_default_prio(struct dsa_switch *ds, int port) +{ + struct ksz_device *dev = ds->priv; + int ret, reg, shift; + u8 data, mask; + + ksz_get_default_port_prio_reg(dev, ®, &mask, &shift); + + ret = ksz_pread8(dev, port, reg, &data); + if (ret) + return ret; + + return (data & mask) >> shift; +} + +/** + * ksz88x3_port_set_default_prio_quirks - Quirks for default priority + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to set the default priority + * @prio: Priority value to set + * + * This function implements quirks for setting the default priority on KSZ88x3 + * devices. On Port 2, no other priority providers are working + * except of PCP. So, configuring default priority on Port 2 is not possible. + * On Port 1, it is not possible to configure port priority if PCP + * apptrust on Port 2 is disabled. Since we disable multiple queues on the + * switch to disable PCP on Port 2, we need to ensure that the default priority + * configuration on Port 1 is in agreement with the configuration on Port 2. + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port, + u8 prio) +{ + if (!prio) + return 0; + + if (port == KSZ_PORT_2) { + dev_err(dev->dev, "Port priority configuration is not working on Port 2\n"); + return -EINVAL; + } else if (port == KSZ_PORT_1) { + u8 port2_data; + int ret; + + ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0, + &port2_data); + if (ret) + return ret; + + if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { + dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n"); + return -EINVAL; + } + } + + return 0; +} + +/** + * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ + * switch + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to set the default priority + * @prio: Priority value to set + * + * This function sets the default priority for the specified port on a KSZ + * switch. + * + * Return: 0 on success, or a negative error code on failure. + */ +int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio) +{ + struct ksz_device *dev = ds->priv; + int reg, shift, ret; + u8 mask; + + if (prio >= dev->info->num_ipms) + return -EINVAL; + + if (ksz_is_ksz88x3(dev)) { + ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio); + if (ret) + return ret; + } + + ksz_get_default_port_prio_reg(dev, ®, &mask, &shift); + + return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask); +} + +/** + * ksz_port_get_dscp_prio - Retrieves the priority for a DSCP value on a KSZ + * switch + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to get the priority + * @dscp: DSCP value for which to get the priority + * + * This function fetches the priority value from switch global DSCP-to-priorty + * mapping table for the specified DSCP value. + * + * Return: The priority value for the DSCP on success, or a negative error + * code on failure. + */ +int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) +{ + struct ksz_device *dev = ds->priv; + int reg, per_reg, ret, shift; + u8 data, mask; + + ksz_get_dscp_prio_reg(dev, ®, &per_reg, &mask); + + /* If DSCP remapping is disabled, DSCP bits 3-5 are used as Internal + * Priority Map (IPM) + */ + if (!is_ksz8(dev)) { + ret = ksz_read8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, &data); + if (ret) + return ret; + + /* If DSCP remapping is disabled, DSCP bits 3-5 are used as + * Internal Priority Map (IPM) + */ + if (!(data & KSZ9477_SW_TOS_DSCP_REMAP)) + return FIELD_GET(KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M, + dscp); + } + + /* In case DSCP remapping is enabled, we need to write the DSCP to + * priority mapping table. + */ + reg += dscp / per_reg; + ret = ksz_read8(dev, reg, &data); + if (ret) + return ret; + + shift = (dscp % per_reg) * (8 / per_reg); + + return (data >> shift) & mask; +} + +/** + * ksz_set_global_dscp_entry - Sets the global DSCP-to-priority mapping entry + * @dev: Pointer to the KSZ switch device structure + * @dscp: DSCP value for which to set the priority + * @ipm: Priority value to set + * + * This function sets the global DSCP-to-priority mapping entry for the + * specified DSCP value. + * + * Return: 0 on success, or a negative error code on failure. + */ +static int ksz_set_global_dscp_entry(struct ksz_device *dev, u8 dscp, u8 ipm) +{ + int reg, per_reg, shift; + u8 mask; + + ksz_get_dscp_prio_reg(dev, ®, &per_reg, &mask); + + shift = (dscp % per_reg) * (8 / per_reg); + + return ksz_rmw8(dev, reg + (dscp / per_reg), mask << shift, + ipm << shift); +} + +/** + * ksz_init_global_dscp_map - Initializes the global DSCP-to-priority mapping + * @dev: Pointer to the KSZ switch device structure + * + * This function initializes the global DSCP-to-priority mapping table for the + * switch. + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz_init_global_dscp_map(struct ksz_device *dev) +{ + int ret, dscp; + + /* On KSZ9xxx variants, DSCP remapping is disabled by default. + * Enable to have, predictable and reproducible behavior across + * different devices. + */ + if (!is_ksz8(dev)) { + ret = ksz_rmw8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, + KSZ9477_SW_TOS_DSCP_REMAP, + KSZ9477_SW_TOS_DSCP_REMAP); + if (ret) + return ret; + } + + for (dscp = 0; dscp < DSCP_MAX; dscp++) { + int ipm, tt; + + /* Map DSCP to Traffic Type, which is corresponding to the + * Internal Priority Map (IPM) in the switch. + */ + if (!is_ksz8(dev)) { + ipm = ietf_dscp_to_ieee8021q_tt(dscp); + } else { + /* On KSZ8xxx variants we do not have IPM to queue + * remapping table. We need to convert DSCP to Traffic + * Type and then to queue. + */ + tt = ietf_dscp_to_ieee8021q_tt(dscp); + if (tt < 0) + return tt; + + ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues); + } + + if (ipm < 0) + return ipm; + + ret = ksz_set_global_dscp_entry(dev, dscp, ipm); + } + + return 0; +} + +/** + * ksz_port_add_dscp_prio - Adds a DSCP-to-priority mapping entry for a port on + * a KSZ switch. + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to add the DSCP-to-priority mapping entry + * @dscp: DSCP value for which to add the priority + * @prio: Priority value to set + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) +{ + struct ksz_device *dev = ds->priv; + + if (prio >= dev->info->num_ipms) + return -ERANGE; + + return ksz_set_global_dscp_entry(dev, dscp, prio); +} + +/** + * ksz_port_del_dscp_prio - Deletes a DSCP-to-priority mapping entry for a port + * on a KSZ switch. + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to delete the DSCP-to-priority mapping entry + * @dscp: DSCP value for which to delete the priority + * @prio: Priority value to delete + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) +{ + struct ksz_device *dev = ds->priv; + int ipm; + + if (ksz_port_get_dscp_prio(ds, port, dscp) != prio) + return 0; + + if (is_ksz8(dev)) { + ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE, + dev->info->num_tx_queues); + if (ipm < 0) + return ipm; + } else { + ipm = IEEE8021Q_TT_BE; + } + + return ksz_set_global_dscp_entry(dev, dscp, ipm); +} + +/** + * ksz_apptrust_error - Prints an error message for an invalid apptrust selector + * @dev: Pointer to the KSZ switch device structure + * + * This function prints an error message when an invalid apptrust selector is + * provided. + */ +static void ksz_apptrust_error(struct ksz_device *dev) +{ + char supported_apptrust_variants[64]; + int i; + + supported_apptrust_variants[0] = '\0'; + for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust_variants); i++) { + if (i > 0) + strlcat(supported_apptrust_variants, ", ", + sizeof(supported_apptrust_variants)); + strlcat(supported_apptrust_variants, + ksz_supported_apptrust_variants[i], + sizeof(supported_apptrust_variants)); + } + + dev_err(dev->dev, "Invalid apptrust selector or priority order. Supported: %s\n", + supported_apptrust_variants); +} + +/** + * ksz_port_set_apptrust_validate - Validates the apptrust selectors + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to set the apptrust selectors + * @sel: Array of apptrust selectors to validate + * @nsel: Number of apptrust selectors in the array + * + * This function validates the apptrust selectors provided and ensures that + * they are in the correct order. + * + * This family of switches supports two apptrust selectors: DCB_APP_SEL_PCP and + * IEEE_8021QAZ_APP_SEL_DSCP. The priority order of the selectors is fixed and + * cannot be changed. The order is as follows: + * 1. DCB_APP_SEL_PCP - Priority Code Point selector (highest priority) + * 2. IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector + * (lowest priority) + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port, + const u8 *sel, int nsel) +{ + int i, j, found; + int j_prev = 0; + + /* Iterate through the requested selectors */ + for (i = 0; i < nsel; i++) { + found = 0; + + /* Check if the current selector is supported by the hardware */ + for (j = 0; j < sizeof(ksz_supported_apptrust); j++) { + if (sel[i] != ksz_supported_apptrust[j]) + continue; + + found = 1; + + /* Ensure that no higher priority selector (lower index) + * precedes a lower priority one + */ + if (i > 0 && j <= j_prev) + goto err_sel_not_vaild; + + j_prev = j; + break; + } + + if (!found) + goto err_sel_not_vaild; + } + + return 0; + +err_sel_not_vaild: + ksz_apptrust_error(dev); + + return -EINVAL; +} + +/** + * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1 + * of KSZ88x3 devices + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to set the apptrust selectors + * @reg: Register address for the apptrust configuration + * @port1_data: Data to set for the apptrust configuration + * + * This function implements a quirk for apptrust configuration on Port 1 of + * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not + * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2 + * seems to be permanently hardwired to PCP classification, so we need to + * do Port 1 configuration always in agreement with Port 2 configuration. + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port, + int reg, u8 port1_data) +{ + u8 port2_data; + int ret; + + /* If no apptrust is requested for Port 1, no need to care about Port 2 + * configuration. + */ + if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE))) + return 0; + + /* We got request to enable any apptrust on Port 1. To make it possible, + * we need to enable multiple queues on the switch. If we enable + * multiqueue support, PCP classification on Port 2 will be + * automatically activated by HW. + */ + ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data); + if (ret) + return ret; + + /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed + * the interest in PCP classification on Port 2. In this case, + * multiqueue support is enabled and we can enable any apptrust on + * Port 1. + * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP + * classification on Port 2 is still active, but the driver disabled + * multiqueue support and made frame prioritization inactive for + * all ports. In this case, we can't enable any apptrust on Port 1. + */ + if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { + dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n"); + return -EINVAL; + } + + return 0; +} + +/** + * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2 + * of KSZ88x3 devices + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to set the apptrust selectors + * @reg: Register address for the apptrust configuration + * @port2_data: Data to set for the apptrust configuration + * + * This function implements a quirk for apptrust configuration on Port 2 of + * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and + * that it is not possible to disable PCP on Port 2. The only way to disable PCP + * on Port 2 is to disable multiple queues on the switch. + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port, + int reg, u8 port2_data) +{ + struct dsa_switch *ds = dev->ds; + u8 port1_data; + int ret; + + /* First validate Port 2 configuration. DiffServ/DSCP is not working + * on this port. + */ + if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) { + dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n"); + return -EINVAL; + } + + /* If PCP support is requested, we need to enable all queues on the + * switch to make PCP priority working on Port 2. + */ + if (port2_data & KSZ8_PORT_802_1P_ENABLE) + return ksz8_all_queues_split(dev, dev->info->num_tx_queues); + + /* We got request to disable PCP priority on Port 2. + * Now, we need to compare Port 2 configuration with Port 1 + * configuration. + */ + ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data); + if (ret) + return ret; + + /* If Port 1 has any apptrust enabled, we can't disable multiple queues + * on the switch, so we can't disable PCP on Port 2. + */ + if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) { + dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n"); + return -EINVAL; + } + + /* Now we need to ensure that default priority on Port 1 is set to 0 + * otherwise we can't disable multiqueue support on the switch. + */ + ret = ksz_port_get_default_prio(ds, KSZ_PORT_1); + if (ret < 0) { + return ret; + } else if (ret) { + dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n"); + return -EINVAL; + } + + /* Port 1 has no apptrust or default priority set and we got request to + * disable PCP on Port 2. We can disable multiqueue support to disable + * PCP on Port 2. + */ + return ksz8_all_queues_split(dev, 1); +} + +/** + * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3 + * devices + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to set the apptrust selectors + * @reg: Register address for the apptrust configuration + * @data: Data to set for the apptrust configuration + * + * This function implements a quirk for apptrust configuration on KSZ88x3 + * devices. It ensures that apptrust configuration on Port 1 and + * Port 2 is done in agreement with each other. + * + * Return: 0 on success, or a negative error code on failure + */ +static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port, + int reg, u8 data) +{ + if (port == KSZ_PORT_1) + return ksz88x3_port1_apptrust_quirk(dev, port, reg, data); + else if (port == KSZ_PORT_2) + return ksz88x3_port2_apptrust_quirk(dev, port, reg, data); + + return 0; +} + +/** + * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ + * switch + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to set the apptrust selectors + * @sel: Array of apptrust selectors to set + * @nsel: Number of apptrust selectors in the array + * + * This function sets the apptrust selectors for the specified port on a KSZ + * switch. + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_port_set_apptrust(struct dsa_switch *ds, int port, + const u8 *sel, int nsel) +{ + const struct ksz_apptrust_map *map; + struct ksz_device *dev = ds->priv; + int reg, i, ret; + u8 data = 0; + u8 mask; + + ret = ksz_port_set_apptrust_validate(dev, port, sel, nsel); + if (ret) + return ret; + + ksz_get_apptrust_map_and_reg(dev, &map, ®, &mask); + + for (i = 0; i < nsel; i++) { + int j; + + for (j = 0; j < ARRAY_SIZE(ksz_supported_apptrust); j++) { + if (sel[i] != ksz_supported_apptrust[j]) + continue; + + data |= map[j].bit; + break; + } + } + + if (ksz_is_ksz88x3(dev)) { + ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data); + if (ret) + return ret; + } + + return ksz_prmw8(dev, port, reg, mask, data); +} + +/** + * ksz_port_get_apptrust - Retrieves the apptrust selectors for a port on a KSZ + * switch + * @ds: Pointer to the DSA switch structure + * @port: Port number for which to get the apptrust selectors + * @sel: Array to store the apptrust selectors + * @nsel: Number of apptrust selectors in the array + * + * This function fetches the apptrust selectors for the specified port on a KSZ + * switch. + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel) +{ + const struct ksz_apptrust_map *map; + struct ksz_device *dev = ds->priv; + int reg, i, ret; + u8 data; + u8 mask; + + ksz_get_apptrust_map_and_reg(dev, &map, ®, &mask); + + ret = ksz_pread8(dev, port, reg, &data); + if (ret) + return ret; + + *nsel = 0; + for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust); i++) { + if (data & map[i].bit) + sel[(*nsel)++] = ksz_supported_apptrust[i]; + } + + return 0; +} + +/** + * ksz_dcb_init_port - Initializes the DCB configuration for a port on a KSZ + * @dev: Pointer to the KSZ switch device structure + * @port: Port number for which to initialize the DCB configuration + * + * This function initializes the DCB configuration for the specified port on a + * KSZ switch. Particular DCB configuration is set for the port, including the + * default priority and apptrust selectors. + * The default priority is set to Best Effort, and the apptrust selectors are + * set to all supported selectors. + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_dcb_init_port(struct ksz_device *dev, int port) +{ + const u8 ksz_default_apptrust[] = { DCB_APP_SEL_PCP }; + int ret, ipm; + + if (is_ksz8(dev)) { + ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE, + dev->info->num_tx_queues); + if (ipm < 0) + return ipm; + } else { + ipm = IEEE8021Q_TT_BE; + } + + /* Set the default priority for the port to Best Effort */ + ret = ksz_port_set_default_prio(dev->ds, port, ipm); + if (ret) + return ret; + + return ksz_port_set_apptrust(dev->ds, port, ksz_default_apptrust, + ARRAY_SIZE(ksz_default_apptrust)); +} + +/** + * ksz_dcb_init - Initializes the DCB configuration for a KSZ switch + * @dev: Pointer to the KSZ switch device structure + * + * This function initializes the DCB configuration for a KSZ switch. The global + * DSCP-to-priority mapping table is initialized. + * + * Return: 0 on success, or a negative error code on failure + */ +int ksz_dcb_init(struct ksz_device *dev) +{ + int ret; + + ret = ksz_init_global_dscp_map(dev); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/dsa/microchip/ksz_dcb.h b/drivers/net/dsa/microchip/ksz_dcb.h new file mode 100644 index 000000000000..e2065223ba90 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_dcb.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <[email protected]> */ + +#ifndef __KSZ_DCB_H +#define __KSZ_DCB_H + +#include <net/dsa.h> + +#include "ksz_common.h" + +int ksz_port_get_default_prio(struct dsa_switch *ds, int port); +int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio); +int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp); +int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio); +int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio); +int ksz_port_set_apptrust(struct dsa_switch *ds, int port, + const unsigned char *sel, + int nsel); +int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel); +int ksz_dcb_init_port(struct ksz_device *dev, int port); +int ksz_dcb_init(struct ksz_device *dev); + +#endif /* __KSZ_DCB_H */ diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index c8166fb440ab..8e8d83213b04 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -222,7 +222,6 @@ MODULE_DEVICE_TABLE(spi, ksz_spi_ids); static struct spi_driver ksz_spi_driver = { .driver = { .name = "ksz-switch", - .owner = THIS_MODULE, .of_match_table = ksz_dt_ids, }, .id_table = ksz_spi_ids, @@ -233,13 +232,6 @@ static struct spi_driver ksz_spi_driver = { module_spi_driver(ksz_spi_driver); -MODULE_ALIAS("spi:ksz9477"); -MODULE_ALIAS("spi:ksz9896"); -MODULE_ALIAS("spi:ksz9897"); -MODULE_ALIAS("spi:ksz9893"); -MODULE_ALIAS("spi:ksz9563"); -MODULE_ALIAS("spi:ksz8563"); -MODULE_ALIAS("spi:ksz9567"); MODULE_ALIAS("spi:lan937x"); MODULE_AUTHOR("Tristram Ha <[email protected]>"); MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver"); diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c index fa3ee85a99c1..51df42ccdbe6 100644 --- a/drivers/net/dsa/mt7530-mdio.c +++ b/drivers/net/dsa/mt7530-mdio.c @@ -18,7 +18,8 @@ static int mt7530_regmap_write(void *context, unsigned int reg, unsigned int val) { - struct mii_bus *bus = context; + struct mt7530_priv *priv = context; + struct mii_bus *bus = priv->bus; u16 page, r, lo, hi; int ret; @@ -27,36 +28,35 @@ mt7530_regmap_write(void *context, unsigned int reg, unsigned int val) lo = val & 0xffff; hi = val >> 16; - /* MT7530 uses 31 as the pseudo port */ - ret = bus->write(bus, 0x1f, 0x1f, page); + ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page); if (ret < 0) return ret; - ret = bus->write(bus, 0x1f, r, lo); + ret = bus->write(bus, priv->mdiodev->addr, r, lo); if (ret < 0) return ret; - ret = bus->write(bus, 0x1f, 0x10, hi); + ret = bus->write(bus, priv->mdiodev->addr, 0x10, hi); return ret; } static int mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val) { - struct mii_bus *bus = context; + struct mt7530_priv *priv = context; + struct mii_bus *bus = priv->bus; u16 page, r, lo, hi; int ret; page = (reg >> 6) & 0x3ff; r = (reg >> 2) & 0xf; - /* MT7530 uses 31 as the pseudo port */ - ret = bus->write(bus, 0x1f, 0x1f, page); + ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page); if (ret < 0) return ret; - lo = bus->read(bus, 0x1f, r); - hi = bus->read(bus, 0x1f, 0x10); + lo = bus->read(bus, priv->mdiodev->addr, r); + hi = bus->read(bus, priv->mdiodev->addr, 0x10); *val = (hi << 16) | (lo & 0xffff); @@ -107,8 +107,7 @@ mt7531_create_sgmii(struct mt7530_priv *priv) mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock; mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock; - regmap = devm_regmap_init(priv->dev, - &mt7530_regmap_bus, priv->bus, + regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv, mt7531_pcs_config[i]); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); @@ -153,6 +152,7 @@ mt7530_probe(struct mdio_device *mdiodev) priv->bus = mdiodev->bus; priv->dev = &mdiodev->dev; + priv->mdiodev = mdiodev; ret = mt7530_probe_common(priv); if (ret) @@ -203,8 +203,8 @@ mt7530_probe(struct mdio_device *mdiodev) regmap_config->reg_stride = 4; regmap_config->max_register = MT7530_CREV; regmap_config->disable_locking = true; - priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, - priv->bus, regmap_config); + priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv, + regmap_config); if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8090390edaf9..598434d8d6e4 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -74,108 +74,94 @@ static const struct mt7530_mib_desc mt7530_mib[] = { MIB_DESC(1, 0xb8, "RxArlDrop"), }; -/* Since phy_device has not yet been created and - * phy_{read,write}_mmd_indirect is not available, we provide our own - * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers - * to complete this function. - */ -static int -core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad) +static void +mt7530_mutex_lock(struct mt7530_priv *priv) +{ + if (priv->bus) + mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); +} + +static void +mt7530_mutex_unlock(struct mt7530_priv *priv) +{ + if (priv->bus) + mutex_unlock(&priv->bus->mdio_lock); +} + +static void +core_write(struct mt7530_priv *priv, u32 reg, u32 val) { struct mii_bus *bus = priv->bus; - int value, ret; + int ret; + + mt7530_mutex_lock(priv); /* Write the desired MMD Devad */ - ret = bus->write(bus, 0, MII_MMD_CTRL, devad); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_CTRL, MDIO_MMD_VEND2); if (ret < 0) goto err; /* Write the desired MMD register address */ - ret = bus->write(bus, 0, MII_MMD_DATA, prtad); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_DATA, reg); if (ret < 0) goto err; /* Select the Function : DATA with no post increment */ - ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR); if (ret < 0) goto err; - /* Read the content of the MMD's selected register */ - value = bus->read(bus, 0, MII_MMD_DATA); - - return value; + /* Write the data into MMD's selected register */ + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_DATA, val); err: - dev_err(&bus->dev, "failed to read mmd register\n"); + if (ret < 0) + dev_err(&bus->dev, "failed to write mmd register\n"); - return ret; + mt7530_mutex_unlock(priv); } -static int -core_write_mmd_indirect(struct mt7530_priv *priv, int prtad, - int devad, u32 data) +static void +core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) { struct mii_bus *bus = priv->bus; + u32 val; int ret; + mt7530_mutex_lock(priv); + /* Write the desired MMD Devad */ - ret = bus->write(bus, 0, MII_MMD_CTRL, devad); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_CTRL, MDIO_MMD_VEND2); if (ret < 0) goto err; /* Write the desired MMD register address */ - ret = bus->write(bus, 0, MII_MMD_DATA, prtad); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_DATA, reg); if (ret < 0) goto err; /* Select the Function : DATA with no post increment */ - ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR)); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR); if (ret < 0) goto err; + /* Read the content of the MMD's selected register */ + val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_DATA); + val &= ~mask; + val |= set; /* Write the data into MMD's selected register */ - ret = bus->write(bus, 0, MII_MMD_DATA, data); + ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MII_MMD_DATA, val); err: if (ret < 0) - dev_err(&bus->dev, - "failed to write mmd register\n"); - return ret; -} - -static void -mt7530_mutex_lock(struct mt7530_priv *priv) -{ - if (priv->bus) - mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); -} - -static void -mt7530_mutex_unlock(struct mt7530_priv *priv) -{ - if (priv->bus) - mutex_unlock(&priv->bus->mdio_lock); -} - -static void -core_write(struct mt7530_priv *priv, u32 reg, u32 val) -{ - mt7530_mutex_lock(priv); - - core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); - - mt7530_mutex_unlock(priv); -} - -static void -core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set) -{ - u32 val; - - mt7530_mutex_lock(priv); - - val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2); - val &= ~mask; - val |= set; - core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val); + dev_err(&bus->dev, "failed to write mmd register\n"); mt7530_mutex_unlock(priv); } @@ -431,23 +417,23 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface) mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1)); - xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; + xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK; - if (xtal == HWTRAP_XTAL_25MHZ) + if (xtal == MT7530_XTAL_25MHZ) ssc_delta = 0x57; else ssc_delta = 0x87; if (priv->id == ID_MT7621) { /* PLL frequency: 125MHz: 1.0GBit */ - if (xtal == HWTRAP_XTAL_40MHZ) + if (xtal == MT7530_XTAL_40MHZ) ncpo1 = 0x0640; - if (xtal == HWTRAP_XTAL_25MHZ) + if (xtal == MT7530_XTAL_25MHZ) ncpo1 = 0x0a00; } else { /* PLL frequency: 250MHz: 2.0Gbit */ - if (xtal == HWTRAP_XTAL_40MHZ) + if (xtal == MT7530_XTAL_40MHZ) ncpo1 = 0x0c80; - if (xtal == HWTRAP_XTAL_25MHZ) + if (xtal == MT7530_XTAL_25MHZ) ncpo1 = 0x1400; } @@ -470,19 +456,20 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface) static void mt7531_pll_setup(struct mt7530_priv *priv) { + enum mt7531_xtal_fsel xtal; u32 top_sig; u32 hwstrap; - u32 xtal; u32 val; val = mt7530_read(priv, MT7531_CREV); top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); - hwstrap = mt7530_read(priv, MT7531_HWTRAP); + hwstrap = mt7530_read(priv, MT753X_TRAP); if ((val & CHIP_REV_M) > 0) - xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ : - HWTRAP_XTAL_FSEL_25MHZ; + xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ : + MT7531_XTAL_FSEL_25MHZ; else - xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK; + xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ : + MT7531_XTAL_FSEL_40MHZ; /* Step 1 : Disable MT7531 COREPLL */ val = mt7530_read(priv, MT7531_PLLGP_EN); @@ -511,13 +498,13 @@ mt7531_pll_setup(struct mt7530_priv *priv) usleep_range(25, 35); switch (xtal) { - case HWTRAP_XTAL_FSEL_25MHZ: + case MT7531_XTAL_FSEL_25MHZ: val = mt7530_read(priv, MT7531_PLLGP_CR0); val &= ~RG_COREPLL_SDM_PCW_M; val |= 0x140000 << RG_COREPLL_SDM_PCW_S; mt7530_write(priv, MT7531_PLLGP_CR0, val); break; - case HWTRAP_XTAL_FSEL_40MHZ: + case MT7531_XTAL_FSEL_40MHZ: val = mt7530_read(priv, MT7531_PLLGP_CR0); val &= ~RG_COREPLL_SDM_PCW_M; val |= 0x190000 << RG_COREPLL_SDM_PCW_S; @@ -871,19 +858,15 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) return 0; } -static const char *p5_intf_modes(unsigned int p5_interface) +static const char *mt7530_p5_mode_str(unsigned int mode) { - switch (p5_interface) { - case P5_DISABLED: - return "DISABLED"; - case P5_INTF_SEL_PHY_P0: - return "PHY P0"; - case P5_INTF_SEL_PHY_P4: - return "PHY P4"; - case P5_INTF_SEL_GMAC5: - return "GMAC5"; + switch (mode) { + case MUX_PHY_P0: + return "MUX PHY P0"; + case MUX_PHY_P4: + return "MUX PHY P4"; default: - return "unknown"; + return "GMAC5"; } } @@ -895,34 +878,31 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) mutex_lock(&priv->reg_mutex); - val = mt7530_read(priv, MT7530_MHWTRAP); + val = mt7530_read(priv, MT753X_MTRAP); - val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS; - val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL; + val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE; - switch (priv->p5_intf_sel) { - case P5_INTF_SEL_PHY_P0: - /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */ - val |= MHWTRAP_PHY0_SEL; + switch (priv->p5_mode) { + /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */ + case MUX_PHY_P0: + val |= MT7530_P5_PHY0_SEL; fallthrough; - case P5_INTF_SEL_PHY_P4: - /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */ - val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS; + /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */ + case MUX_PHY_P4: /* Setup the MAC by default for the cpu port */ - mt7530_write(priv, MT7530_PMCR_P(5), 0x56300); - break; - case P5_INTF_SEL_GMAC5: - /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */ - val &= ~MHWTRAP_P5_DIS; + mt7530_write(priv, MT753X_PMCR_P(5), 0x56300); break; + + /* GMAC5: P5 -> SoC MAC or external PHY */ default: + val |= MT7530_P5_MAC_SEL; break; } /* Setup RGMII settings */ if (phy_interface_mode_is_rgmii(interface)) { - val |= MHWTRAP_P5_RGMII_MODE; + val |= MT7530_P5_RGMII_MODE; /* P5 RGMII RX Clock Control: delay setting for 1000M */ mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN); @@ -942,10 +922,10 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1)); } - mt7530_write(priv, MT7530_MHWTRAP, val); + mt7530_write(priv, MT753X_MTRAP, val); - dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n", - val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface)); + dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val, + mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface)); mutex_unlock(&priv->reg_mutex); } @@ -1125,42 +1105,34 @@ mt753x_trap_frames(struct mt7530_priv *priv) * VLAN-untagged. */ mt7530_rmw(priv, MT753X_BPC, - MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK | - MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK | - MT753X_BPDU_PORT_FW_MASK, - MT753X_PAE_BPDU_FR | - MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) | - MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_BPDU_CPU_ONLY); + PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK | + BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK, + PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) | + BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + TO_CPU_FW_CPU_ONLY); /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress * them VLAN-untagged. */ mt7530_rmw(priv, MT753X_RGAC1, - MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK | - MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR | - MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK, - MT753X_R02_BPDU_FR | - MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) | - MT753X_R01_BPDU_FR | - MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_BPDU_CPU_ONLY); + R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK | + R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK, + R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR | + R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + TO_CPU_FW_CPU_ONLY); /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress * them VLAN-untagged. */ mt7530_rmw(priv, MT753X_RGAC2, - MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK | - MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR | - MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK, - MT753X_R0E_BPDU_FR | - MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) | - MT753X_R03_BPDU_FR | - MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | - MT753X_BPDU_CPU_ONLY); + R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK | + R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK, + R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR | + R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + TO_CPU_FW_CPU_ONLY); } static void @@ -1173,7 +1145,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port) PORT_SPEC_TAG); /* Enable flooding on the CPU port */ - mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | + mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port))); /* Add the CPU port to the CPU port bitmap for MT7531 and the switch on @@ -1218,6 +1190,14 @@ mt7530_port_enable(struct dsa_switch *ds, int port, mutex_unlock(&priv->reg_mutex); + if (priv->id != ID_MT7530 && priv->id != ID_MT7621) + return 0; + + if (port == 5) + mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS); + else if (port == 6) + mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS); + return 0; } @@ -1236,6 +1216,15 @@ mt7530_port_disable(struct dsa_switch *ds, int port) PCR_MATRIX_CLR); mutex_unlock(&priv->reg_mutex); + + if (priv->id != ID_MT7530 && priv->id != ID_MT7621) + return; + + /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */ + if (port == 5 && priv->p5_mode == GMAC5) + mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS); + else if (port == 6) + mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS); } static int @@ -1337,15 +1326,15 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, flags.val & BR_LEARNING ? 0 : SA_DIS); if (flags.mask & BR_FLOOD) - mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)), + mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)), flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0); if (flags.mask & BR_MCAST_FLOOD) - mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)), + mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)), flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0); if (flags.mask & BR_BCAST_FLOOD) - mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)), + mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)), flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); return 0; @@ -1423,7 +1412,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, G0_PORT_VID_DEF); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + for (i = 0; i < priv->ds->num_ports; i++) { if (dsa_is_user_port(ds, i) && dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { all_user_ports_removed = false; @@ -1881,20 +1870,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int mt753x_mirror_port_get(unsigned int id, u32 val) -{ - return (id == ID_MT7531 || id == ID_MT7988) ? - MT7531_MIRROR_PORT_GET(val) : - MIRROR_PORT(val); -} - -static int mt753x_mirror_port_set(unsigned int id, u32 val) -{ - return (id == ID_MT7531 || id == ID_MT7988) ? - MT7531_MIRROR_PORT_SET(val) : - MIRROR_PORT(val); -} - static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress, struct netlink_ext_ack *extack) @@ -1910,14 +1885,14 @@ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port, val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id)); /* MT7530 only supports one monitor port */ - monitor_port = mt753x_mirror_port_get(priv->id, val); + monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val); if (val & MT753X_MIRROR_EN(priv->id) && monitor_port != mirror->to_local_port) return -EEXIST; val |= MT753X_MIRROR_EN(priv->id); - val &= ~MT753X_MIRROR_MASK(priv->id); - val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port); + val &= ~MT753X_MIRROR_PORT_MASK(priv->id); + val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port); mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val); val = mt7530_read(priv, MT7530_PCR_P(port)); @@ -2405,7 +2380,7 @@ mt7530_setup(struct dsa_switch *ds) } /* Waiting for MT7530 got to stable */ - INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); + INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP); ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 20, 1000000); if (ret < 0) { @@ -2420,7 +2395,7 @@ mt7530_setup(struct dsa_switch *ds) return -ENODEV; } - if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) { + if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) { dev_err(priv->dev, "MT7530 with a 20MHz XTAL is not supported!\n"); return -EINVAL; @@ -2440,13 +2415,13 @@ mt7530_setup(struct dsa_switch *ds) mt7530_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_MASK, RD_TAP(16)); - /* Enable port 6 */ - val = mt7530_read(priv, MT7530_MHWTRAP); - val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; - val |= MHWTRAP_MANUAL; - mt7530_write(priv, MT7530_MHWTRAP, val); + /* Allow modifying the trap and directly access PHY registers via the + * MDIO bus the switch is on. + */ + mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP | + MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP); - if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ) + if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ) mt7530_pll_setup(priv); mt753x_trap_frames(priv); @@ -2454,12 +2429,12 @@ mt7530_setup(struct dsa_switch *ds) /* Enable and reset MIB counters */ mt7530_mib_reset(ds); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + for (i = 0; i < priv->ds->num_ports; i++) { /* Clear link settings and enable force mode to force link down * on all ports until they're enabled later. */ - mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | - PMCR_FORCE_MODE, PMCR_FORCE_MODE); + mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | + MT7530_FORCE_MODE, MT7530_FORCE_MODE); /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, @@ -2490,13 +2465,11 @@ mt7530_setup(struct dsa_switch *ds) if (ret) return ret; - /* Setup port 5 */ - if (!dsa_is_unused_port(ds, 5)) { - priv->p5_intf_sel = P5_INTF_SEL_GMAC5; - } else { + /* Check for PHY muxing on port 5 */ + if (dsa_is_unused_port(ds, 5)) { /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY. - * Set priv->p5_intf_sel to the appropriate value if PHY muxing - * is detected. + * Set priv->p5_mode to the appropriate value if PHY muxing is + * detected. */ for_each_child_of_node(dn, mac_np) { if (!of_device_is_compatible(mac_np, @@ -2511,7 +2484,8 @@ mt7530_setup(struct dsa_switch *ds) if (!phy_node) continue; - if (phy_node->parent == priv->dev->of_node->parent) { + if (phy_node->parent == priv->dev->of_node->parent || + phy_node->parent->parent == priv->dev->of_node) { ret = of_get_phy_mode(mac_np, &interface); if (ret && ret != -ENODEV) { of_node_put(mac_np); @@ -2520,18 +2494,20 @@ mt7530_setup(struct dsa_switch *ds) } id = of_mdio_parse_addr(ds->dev, phy_node); if (id == 0) - priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; + priv->p5_mode = MUX_PHY_P0; if (id == 4) - priv->p5_intf_sel = P5_INTF_SEL_PHY_P4; + priv->p5_mode = MUX_PHY_P4; } of_node_put(mac_np); of_node_put(phy_node); break; } - if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 || - priv->p5_intf_sel == P5_INTF_SEL_PHY_P4) + if (priv->p5_mode == MUX_PHY_P0 || + priv->p5_mode == MUX_PHY_P4) { + mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS); mt7530_setup_port5(ds, interface); + } } #ifdef CONFIG_GPIOLIB @@ -2562,15 +2538,15 @@ mt7531_setup_common(struct dsa_switch *ds) mt7530_mib_reset(ds); /* Disable flooding on all ports */ - mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | + mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK); - for (i = 0; i < MT7530_NUM_PORTS; i++) { + for (i = 0; i < priv->ds->num_ports; i++) { /* Clear link settings and enable force mode to force link down * on all ports until they're enabled later. */ - mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | - MT7531_FORCE_MODE, MT7531_FORCE_MODE); + mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK | + MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK); /* Disable forwarding by default on all ports */ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK, @@ -2629,7 +2605,7 @@ mt7531_setup(struct dsa_switch *ds) } /* Waiting for MT7530 got to stable */ - INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP); + INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP); ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0, 20, 1000000); if (ret < 0) { @@ -2652,8 +2628,8 @@ mt7531_setup(struct dsa_switch *ds) priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN); /* Force link down on all ports before internal reset */ - for (i = 0; i < MT7530_NUM_PORTS; i++) - mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); + for (i = 0; i < priv->ds->num_ports; i++) + mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK); /* Reset the switch through internal reset */ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); @@ -2661,16 +2637,16 @@ mt7531_setup(struct dsa_switch *ds) if (!priv->p5_sgmii) { mt7531_pll_setup(priv); } else { - /* Let ds->user_mii_bus be able to access external phy. */ + /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on + * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO + * to expose the MDIO bus of the switch. + */ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK, MT7531_EXT_P_MDC_11); mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK, MT7531_EXT_P_MDIO_12); } - if (!dsa_is_unused_port(ds, 5)) - priv->p5_intf_sel = P5_INTF_SEL_GMAC5; - mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK, MT7531_GPIO0_INTERRUPT); @@ -2679,21 +2655,26 @@ mt7531_setup(struct dsa_switch *ds) * phy_[read,write]_mmd_indirect is called, we provide our own * mt7531_ind_mmd_phy_[read,write] to complete this function. */ - val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR, + val = mt7531_ind_c45_phy_read(priv, + MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), MDIO_MMD_VEND2, CORE_PLL_GROUP4); val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE; val &= ~MT7531_PHY_PLL_OFF; - mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2, - CORE_PLL_GROUP4, val); + mt7531_ind_c45_phy_write(priv, + MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr), + MDIO_MMD_VEND2, CORE_PLL_GROUP4, val); /* Disable EEE advertisement on the switch PHYs. */ - for (i = MT753X_CTRL_PHY_ADDR; - i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) { + for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr); + i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS; + i++) { mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); } - mt7531_setup_common(ds); + ret = mt7531_setup_common(ds); + if (ret) + return ret; /* Setup VLAN ID 0 for VLAN-unaware bridges */ ret = mt7530_setup_vlan0(priv); @@ -2709,6 +2690,8 @@ mt7531_setup(struct dsa_switch *ds) static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { + config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD; + switch (port) { /* Ports which are connected to switch PHYs. There is no MII pinout. */ case 0 ... 4: @@ -2740,6 +2723,8 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port, { struct mt7530_priv *priv = ds->priv; + config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD; + switch (port) { /* Ports which are connected to switch PHYs. There is no MII pinout. */ case 0 ... 4: @@ -2779,14 +2764,17 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port, case 0 ... 3: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); + + config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD; break; /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */ case 6: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); - config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10000FD; + + config->mac_capabilities |= MAC_10000FD; + break; } } @@ -2802,7 +2790,7 @@ mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode, mt7530_setup_port6(priv->ds, interface); } -static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port, +static void mt7531_rgmii_setup(struct mt7530_priv *priv, phy_interface_t interface, struct phy_device *phydev) { @@ -2853,62 +2841,70 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode, if (phy_interface_mode_is_rgmii(interface)) { dp = dsa_to_port(ds, port); phydev = dp->user->phydev; - mt7531_rgmii_setup(priv, port, interface, phydev); + mt7531_rgmii_setup(priv, interface, phydev); } } static struct phylink_pcs * -mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port, +mt753x_phylink_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mt7530_priv *priv = dp->ds->priv; switch (interface) { case PHY_INTERFACE_MODE_TRGMII: - return &priv->pcs[port].pcs; + return &priv->pcs[dp->index].pcs; case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: - return priv->ports[port].sgmii_pcs; + return priv->ports[dp->index].sgmii_pcs; default: return NULL; } } static void -mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, +mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct dsa_switch *ds = dp->ds; + struct mt7530_priv *priv; + int port = dp->index; + + priv = ds->priv; if ((port == 5 || port == 6) && priv->info->mac_port_config) priv->info->mac_port_config(ds, port, mode, state->interface); /* Are we connected to external phy */ if (port == 5 && dsa_is_user_port(ds, 5)) - mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY); + mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY); } -static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void mt753x_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mt7530_priv *priv = dp->ds->priv; - mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK); + mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK); } -static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void mt753x_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct mt7530_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mt7530_priv *priv = dp->ds->priv; u32 mcr; - mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK; + mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK; switch (speed) { case SPEED_1000: @@ -2923,9 +2919,9 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, if (duplex == DUPLEX_FULL) { mcr |= PMCR_FORCE_FDX; if (tx_pause) - mcr |= PMCR_TX_FC_EN; + mcr |= PMCR_FORCE_TX_FC_EN; if (rx_pause) - mcr |= PMCR_RX_FC_EN; + mcr |= PMCR_FORCE_RX_FC_EN; } if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { @@ -2940,7 +2936,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port, } } - mt7530_set(priv, MT7530_PMCR_P(port), mcr); + mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr); } static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, @@ -2948,9 +2944,7 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, { struct mt7530_priv *priv = ds->priv; - /* This switch only supports full-duplex at 1Gbps */ - config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | - MAC_10 | MAC_100 | MAC_1000FD; + config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE; priv->info->mac_port_get_caps(ds, port, config); } @@ -3038,6 +3032,8 @@ mt753x_setup(struct dsa_switch *ds) ret = mt7530_setup_mdio(priv); if (ret && priv->irq) mt7530_free_irq_common(priv); + if (ret) + return ret; /* Initialise the PCS devices */ for (i = 0; i < priv->ds->num_ports; i++) { @@ -3060,10 +3056,10 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) { struct mt7530_priv *priv = ds->priv; - u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port)); + u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port)); e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN); - e->tx_lpi_timer = GET_LPI_THRESH(eeecr); + e->tx_lpi_timer = LPI_THRESH_GET(eeecr); return 0; } @@ -3077,11 +3073,11 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, if (e->tx_lpi_timer > 0xFFF) return -EINVAL; - set = SET_LPI_THRESH(e->tx_lpi_timer); + set = LPI_THRESH_SET(e->tx_lpi_timer); if (!e->tx_lpi_enabled) /* Force LPI Mode without a delay */ set |= LPI_MODE_EN; - mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set); + mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set); return 0; } @@ -3110,10 +3106,12 @@ mt753x_conduit_state_change(struct dsa_switch *ds, else priv->active_cpu_ports &= ~mask; - if (priv->active_cpu_ports) - val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports)); + if (priv->active_cpu_ports) { + val = MT7530_CPU_EN | + MT7530_CPU_PORT(__ffs(priv->active_cpu_ports)); + } - mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val); + mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val); } static int mt7988_setup(struct dsa_switch *ds) @@ -3160,16 +3158,19 @@ const struct dsa_switch_ops mt7530_switch_ops = { .port_mirror_add = mt753x_port_mirror_add, .port_mirror_del = mt753x_port_mirror_del, .phylink_get_caps = mt753x_phylink_get_caps, - .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs, - .phylink_mac_config = mt753x_phylink_mac_config, - .phylink_mac_link_down = mt753x_phylink_mac_link_down, - .phylink_mac_link_up = mt753x_phylink_mac_link_up, .get_mac_eee = mt753x_get_mac_eee, .set_mac_eee = mt753x_set_mac_eee, .conduit_state_change = mt753x_conduit_state_change, }; EXPORT_SYMBOL_GPL(mt7530_switch_ops); +static const struct phylink_mac_ops mt753x_phylink_mac_ops = { + .mac_select_pcs = mt753x_phylink_mac_select_pcs, + .mac_config = mt753x_phylink_mac_config, + .mac_link_down = mt753x_phylink_mac_link_down, + .mac_link_up = mt753x_phylink_mac_link_up, +}; + const struct mt753x_info mt753x_table[] = { [ID_MT7621] = { .id = ID_MT7621, @@ -3236,17 +3237,11 @@ mt7530_probe_common(struct mt7530_priv *priv) if (!priv->info) return -EINVAL; - /* Sanity check if these required device operations are filled - * properly. - */ - if (!priv->info->sw_setup || !priv->info->phy_read_c22 || - !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps) - return -EINVAL; - priv->id = priv->info->id; priv->dev = dev; priv->ds->priv = priv; priv->ds->ops = &mt7530_switch_ops; + priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(dev, priv); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index a08053390b28..2ea4e24628c6 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -36,78 +36,97 @@ enum mt753x_id { #define MT753X_AGC 0xc #define LOCAL_EN BIT(7) -/* Registers to mac forward control for unknown frames */ -#define MT7530_MFC 0x10 -#define BC_FFP(x) (((x) & 0xff) << 24) -#define BC_FFP_MASK BC_FFP(~0) -#define UNM_FFP(x) (((x) & 0xff) << 16) -#define UNM_FFP_MASK UNM_FFP(~0) -#define UNU_FFP(x) (((x) & 0xff) << 8) -#define UNU_FFP_MASK UNU_FFP(~0) -#define CPU_EN BIT(7) -#define CPU_PORT_MASK GENMASK(6, 4) -#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x) -#define MIRROR_EN BIT(3) -#define MIRROR_PORT(x) ((x) & 0x7) -#define MIRROR_MASK 0x7 - -/* Registers for CPU forward control */ +/* Register for MAC forward control */ +#define MT753X_MFC 0x10 +#define BC_FFP_MASK GENMASK(31, 24) +#define BC_FFP(x) FIELD_PREP(BC_FFP_MASK, x) +#define UNM_FFP_MASK GENMASK(23, 16) +#define UNM_FFP(x) FIELD_PREP(UNM_FFP_MASK, x) +#define UNU_FFP_MASK GENMASK(15, 8) +#define UNU_FFP(x) FIELD_PREP(UNU_FFP_MASK, x) +#define MT7530_CPU_EN BIT(7) +#define MT7530_CPU_PORT_MASK GENMASK(6, 4) +#define MT7530_CPU_PORT(x) FIELD_PREP(MT7530_CPU_PORT_MASK, x) +#define MT7530_MIRROR_EN BIT(3) +#define MT7530_MIRROR_PORT_MASK GENMASK(2, 0) +#define MT7530_MIRROR_PORT_GET(x) FIELD_GET(MT7530_MIRROR_PORT_MASK, x) +#define MT7530_MIRROR_PORT_SET(x) FIELD_PREP(MT7530_MIRROR_PORT_MASK, x) +#define MT7531_QRY_FFP_MASK GENMASK(7, 0) +#define MT7531_QRY_FFP(x) FIELD_PREP(MT7531_QRY_FFP_MASK, x) + +/* Register for CPU forward control */ #define MT7531_CFC 0x4 #define MT7531_MIRROR_EN BIT(19) -#define MT7531_MIRROR_MASK (MIRROR_MASK << 16) -#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK) -#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16) +#define MT7531_MIRROR_PORT_MASK GENMASK(18, 16) +#define MT7531_MIRROR_PORT_GET(x) FIELD_GET(MT7531_MIRROR_PORT_MASK, x) +#define MT7531_MIRROR_PORT_SET(x) FIELD_PREP(MT7531_MIRROR_PORT_MASK, x) #define MT7531_CPU_PMAP_MASK GENMASK(7, 0) #define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x) -#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \ - MT7531_CFC : MT7530_MFC) -#define MT753X_MIRROR_EN(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \ - MT7531_MIRROR_EN : MIRROR_EN) -#define MT753X_MIRROR_MASK(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \ - MT7531_MIRROR_MASK : MIRROR_MASK) +#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_CFC : MT753X_MFC) -/* Registers for BPDU and PAE frame control*/ +#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_MIRROR_EN : MT7530_MIRROR_EN) + +#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_MIRROR_PORT_MASK : \ + MT7530_MIRROR_PORT_MASK) + +#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_MIRROR_PORT_GET(val) : \ + MT7530_MIRROR_PORT_GET(val)) + +#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \ + id == ID_MT7988) ? \ + MT7531_MIRROR_PORT_SET(val) : \ + MT7530_MIRROR_PORT_SET(val)) + +/* Register for BPDU and PAE frame control */ #define MT753X_BPC 0x24 -#define MT753X_PAE_BPDU_FR BIT(25) -#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22) -#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x) -#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) -#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) -#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6) -#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x) -#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) - -/* Register for :01 and :02 MAC DA frame control */ +#define PAE_BPDU_FR BIT(25) +#define PAE_EG_TAG_MASK GENMASK(24, 22) +#define PAE_EG_TAG(x) FIELD_PREP(PAE_EG_TAG_MASK, x) +#define PAE_PORT_FW_MASK GENMASK(18, 16) +#define PAE_PORT_FW(x) FIELD_PREP(PAE_PORT_FW_MASK, x) +#define BPDU_EG_TAG_MASK GENMASK(8, 6) +#define BPDU_EG_TAG(x) FIELD_PREP(BPDU_EG_TAG_MASK, x) +#define BPDU_PORT_FW_MASK GENMASK(2, 0) + +/* Register for 01-80-C2-00-00-[01,02] MAC DA frame control */ #define MT753X_RGAC1 0x28 -#define MT753X_R02_BPDU_FR BIT(25) -#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22) -#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x) -#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16) -#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x) -#define MT753X_R01_BPDU_FR BIT(9) -#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6) -#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x) -#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0) - -/* Register for :03 and :0E MAC DA frame control */ +#define R02_BPDU_FR BIT(25) +#define R02_EG_TAG_MASK GENMASK(24, 22) +#define R02_EG_TAG(x) FIELD_PREP(R02_EG_TAG_MASK, x) +#define R02_PORT_FW_MASK GENMASK(18, 16) +#define R02_PORT_FW(x) FIELD_PREP(R02_PORT_FW_MASK, x) +#define R01_BPDU_FR BIT(9) +#define R01_EG_TAG_MASK GENMASK(8, 6) +#define R01_EG_TAG(x) FIELD_PREP(R01_EG_TAG_MASK, x) +#define R01_PORT_FW_MASK GENMASK(2, 0) + +/* Register for 01-80-C2-00-00-[03,0E] MAC DA frame control */ #define MT753X_RGAC2 0x2c -#define MT753X_R0E_BPDU_FR BIT(25) -#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22) -#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x) -#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16) -#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x) -#define MT753X_R03_BPDU_FR BIT(9) -#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6) -#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x) -#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0) - -enum mt753x_bpdu_port_fw { - MT753X_BPDU_FOLLOW_MFC, - MT753X_BPDU_CPU_EXCLUDE = 4, - MT753X_BPDU_CPU_INCLUDE = 5, - MT753X_BPDU_CPU_ONLY = 6, - MT753X_BPDU_DROP = 7, +#define R0E_BPDU_FR BIT(25) +#define R0E_EG_TAG_MASK GENMASK(24, 22) +#define R0E_EG_TAG(x) FIELD_PREP(R0E_EG_TAG_MASK, x) +#define R0E_PORT_FW_MASK GENMASK(18, 16) +#define R0E_PORT_FW(x) FIELD_PREP(R0E_PORT_FW_MASK, x) +#define R03_BPDU_FR BIT(9) +#define R03_EG_TAG_MASK GENMASK(8, 6) +#define R03_EG_TAG(x) FIELD_PREP(R03_EG_TAG_MASK, x) +#define R03_PORT_FW_MASK GENMASK(2, 0) + +enum mt753x_to_cpu_fw { + TO_CPU_FW_SYSTEM_DEFAULT, + TO_CPU_FW_CPU_EXCLUDE = 4, + TO_CPU_FW_CPU_INCLUDE = 5, + TO_CPU_FW_CPU_ONLY = 6, + TO_CPU_FW_DROP = 7, }; /* Registers for address table access */ @@ -304,48 +323,55 @@ enum mt7530_vlan_port_acc_frm { #define G0_PORT_VID_DEF G0_PORT_VID(0) /* Register for port MAC control register */ -#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100)) -#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18) +#define MT753X_PMCR_P(x) (0x3000 + ((x) * 0x100)) +#define PMCR_IFG_XMIT_MASK GENMASK(19, 18) +#define PMCR_IFG_XMIT(x) FIELD_PREP(PMCR_IFG_XMIT_MASK, x) #define PMCR_EXT_PHY BIT(17) #define PMCR_MAC_MODE BIT(16) -#define PMCR_FORCE_MODE BIT(15) -#define PMCR_TX_EN BIT(14) -#define PMCR_RX_EN BIT(13) +#define MT7530_FORCE_MODE BIT(15) +#define PMCR_MAC_TX_EN BIT(14) +#define PMCR_MAC_RX_EN BIT(13) #define PMCR_BACKOFF_EN BIT(9) #define PMCR_BACKPR_EN BIT(8) #define PMCR_FORCE_EEE1G BIT(7) #define PMCR_FORCE_EEE100 BIT(6) -#define PMCR_TX_FC_EN BIT(5) -#define PMCR_RX_FC_EN BIT(4) +#define PMCR_FORCE_RX_FC_EN BIT(5) +#define PMCR_FORCE_TX_FC_EN BIT(4) #define PMCR_FORCE_SPEED_1000 BIT(3) #define PMCR_FORCE_SPEED_100 BIT(2) #define PMCR_FORCE_FDX BIT(1) #define PMCR_FORCE_LNK BIT(0) -#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \ - PMCR_FORCE_SPEED_1000) -#define MT7531_FORCE_LNK BIT(31) -#define MT7531_FORCE_SPD BIT(30) -#define MT7531_FORCE_DPX BIT(29) -#define MT7531_FORCE_RX_FC BIT(28) -#define MT7531_FORCE_TX_FC BIT(27) -#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \ - MT7531_FORCE_SPD | \ - MT7531_FORCE_DPX | \ - MT7531_FORCE_RX_FC | \ - MT7531_FORCE_TX_FC) -#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \ - PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \ - PMCR_TX_FC_EN | PMCR_RX_FC_EN | \ - PMCR_FORCE_FDX | PMCR_FORCE_LNK | \ - PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100) - -#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100) -#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24) -#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16) +#define MT7531_FORCE_MODE_LNK BIT(31) +#define MT7531_FORCE_MODE_SPD BIT(30) +#define MT7531_FORCE_MODE_DPX BIT(29) +#define MT7531_FORCE_MODE_RX_FC BIT(28) +#define MT7531_FORCE_MODE_TX_FC BIT(27) +#define MT7531_FORCE_MODE_EEE100 BIT(26) +#define MT7531_FORCE_MODE_EEE1G BIT(25) +#define MT7531_FORCE_MODE_MASK (MT7531_FORCE_MODE_LNK | \ + MT7531_FORCE_MODE_SPD | \ + MT7531_FORCE_MODE_DPX | \ + MT7531_FORCE_MODE_RX_FC | \ + MT7531_FORCE_MODE_TX_FC | \ + MT7531_FORCE_MODE_EEE100 | \ + MT7531_FORCE_MODE_EEE1G) +#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \ + PMCR_FORCE_EEE1G | \ + PMCR_FORCE_EEE100 | \ + PMCR_FORCE_RX_FC_EN | \ + PMCR_FORCE_TX_FC_EN | \ + PMCR_FORCE_SPEED_1000 | \ + PMCR_FORCE_SPEED_100 | \ + PMCR_FORCE_FDX | PMCR_FORCE_LNK) + +#define MT753X_PMEEECR_P(x) (0x3004 + (x) * 0x100) +#define WAKEUP_TIME_1000_MASK GENMASK(31, 24) +#define WAKEUP_TIME_1000(x) FIELD_PREP(WAKEUP_TIME_1000_MASK, x) +#define WAKEUP_TIME_100_MASK GENMASK(23, 16) +#define WAKEUP_TIME_100(x) FIELD_PREP(WAKEUP_TIME_100_MASK, x) #define LPI_THRESH_MASK GENMASK(15, 4) -#define LPI_THRESH_SHT 4 -#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK) -#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT) +#define LPI_THRESH_GET(x) FIELD_GET(LPI_THRESH_MASK, x) +#define LPI_THRESH_SET(x) FIELD_PREP(LPI_THRESH_MASK, x) #define LPI_MODE_EN BIT(0) #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100) @@ -470,32 +496,30 @@ enum mt7531_clk_skew { MT7531_CLK_SKEW_REVERSE = 3, }; -/* Register for hw trap status */ -#define MT7530_HWTRAP 0x7800 -#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9)) -#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9)) -#define HWTRAP_XTAL_40MHZ (BIT(10)) -#define HWTRAP_XTAL_20MHZ (BIT(9)) - -#define MT7531_HWTRAP 0x7800 -#define HWTRAP_XTAL_FSEL_MASK BIT(7) -#define HWTRAP_XTAL_FSEL_25MHZ BIT(7) -#define HWTRAP_XTAL_FSEL_40MHZ 0 -/* Unique fields of (M)HWSTRAP for MT7531 */ -#define XTAL_FSEL_S 7 -#define XTAL_FSEL_M BIT(7) -#define PHY_EN BIT(6) -#define CHG_STRAP BIT(8) - -/* Register for hw trap modification */ -#define MT7530_MHWTRAP 0x7804 -#define MHWTRAP_PHY0_SEL BIT(20) -#define MHWTRAP_MANUAL BIT(16) -#define MHWTRAP_P5_MAC_SEL BIT(13) -#define MHWTRAP_P6_DIS BIT(8) -#define MHWTRAP_P5_RGMII_MODE BIT(7) -#define MHWTRAP_P5_DIS BIT(6) -#define MHWTRAP_PHY_ACCESS BIT(5) +/* Register for trap status */ +#define MT753X_TRAP 0x7800 +#define MT7530_XTAL_MASK (BIT(10) | BIT(9)) +#define MT7530_XTAL_25MHZ (BIT(10) | BIT(9)) +#define MT7530_XTAL_40MHZ BIT(10) +#define MT7530_XTAL_20MHZ BIT(9) +#define MT7531_XTAL25 BIT(7) + +/* Register for trap modification */ +#define MT753X_MTRAP 0x7804 +#define MT7530_P5_PHY0_SEL BIT(20) +#define MT7530_CHG_TRAP BIT(16) +#define MT7530_P5_MAC_SEL BIT(13) +#define MT7530_P6_DIS BIT(8) +#define MT7530_P5_RGMII_MODE BIT(7) +#define MT7530_P5_DIS BIT(6) +#define MT7530_PHY_INDIRECT_ACCESS BIT(5) +#define MT7531_CHG_STRAP BIT(8) +#define MT7531_PHY_EN BIT(6) + +enum mt7531_xtal_fsel { + MT7531_XTAL_FSEL_25MHZ, + MT7531_XTAL_FSEL_40MHZ, +}; /* Register for TOP signal control */ #define MT7530_TOP_SIG_CTRL 0x7808 @@ -629,7 +653,7 @@ enum mt7531_clk_skew { #define MT7531_PHY_PLL_OFF BIT(5) #define MT7531_PHY_PLL_BYPASS_MODE BIT(4) -#define MT753X_CTRL_PHY_ADDR 0 +#define MT753X_CTRL_PHY_ADDR(addr) ((addr + 1) & 0x1f) #define CORE_PLL_GROUP5 0x404 #define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff) @@ -702,12 +726,11 @@ struct mt7530_port { struct phylink_pcs *sgmii_pcs; }; -/* Port 5 interface select definitions */ -enum p5_interface_select { - P5_DISABLED, - P5_INTF_SEL_PHY_P0, - P5_INTF_SEL_PHY_P4, - P5_INTF_SEL_GMAC5, +/* Port 5 mode definitions of the MT7530 switch */ +enum mt7530_p5_mode { + GMAC5, + MUX_PHY_P0, + MUX_PHY_P4, }; struct mt7530_priv; @@ -720,15 +743,14 @@ struct mt753x_pcs { /* struct mt753x_info - This is the main data structure for holding the specific * part for each supported device + * @id: Holding the identifier to a switch model + * @pcs_ops: Holding the pointer to the MAC PCS operations structure * @sw_setup: Holding the handler to a device initialization * @phy_read_c22: Holding the way reading PHY port using C22 * @phy_write_c22: Holding the way writing PHY port using C22 * @phy_read_c45: Holding the way reading PHY port using C45 * @phy_write_c45: Holding the way writing PHY port using C45 - * @phy_mode_supported: Check if the PHY type is being supported on a certain - * port - * @mac_port_validate: Holding the way to set addition validate type for a - * certan MAC port + * @mac_port_get_caps: Holding the handler that provides MAC capabilities * @mac_port_config: Holding the way setting up the PHY attribute to a * certain MAC port */ @@ -747,9 +769,6 @@ struct mt753x_info { int regnum, u16 val); void (*mac_port_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); - void (*mac_port_validate)(struct dsa_switch *ds, int port, - phy_interface_t interface, - unsigned long *supported); void (*mac_port_config)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); @@ -770,7 +789,7 @@ struct mt753x_info { * @ports: Holding the state among ports * @reg_mutex: The lock for protecting among process accessing * registers - * @p5_intf_sel: Holding the current port 5 interface select + * @p5_mode: Holding the current mode of port 5 of the MT7530 switch * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch * has got SGMII * @irq: IRQ number of the switch @@ -778,6 +797,7 @@ struct mt753x_info { * @irq_enable: IRQ enable bits, synced to SYS_INT_EN * @create_sgmii: Pointer to function creating SGMII PCS instance(s) * @active_cpu_ports: Holding the active CPU ports + * @mdiodev: The pointer to the MDIO device structure */ struct mt7530_priv { struct device *dev; @@ -791,7 +811,7 @@ struct mt7530_priv { const struct mt753x_info *info; unsigned int id; bool mcm; - enum p5_interface_select p5_intf_sel; + enum mt7530_p5_mode p5_mode; bool p5_sgmii; u8 mirror_rx; u8 mirror_tx; @@ -804,6 +824,7 @@ struct mt7530_priv { u32 irq_enable; int (*create_sgmii)(struct mt7530_priv *priv); u8 active_cpu_ports; + struct mdio_device *mdiodev; }; struct mt7530_hw_vlan_entry { diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 59b5dd0e2f41..07c897b13de1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip) { struct mv88e6xxx_mdio_bus *mdio_bus; - mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, - list); + mdio_bus = list_first_entry_or_null(&chip->mdios, + struct mv88e6xxx_mdio_bus, list); if (!mdio_bus) return NULL; @@ -637,12 +637,12 @@ static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, MAC_1000FD; } -static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip) +static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port) { u16 reg, val; int err; - err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, ®); + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); if (err) return err; @@ -651,16 +651,16 @@ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip) return 0xf; val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT; - err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val); + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val); if (err) return err; - err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val); + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val); if (err) return err; /* Restore PHY_DETECT value */ - err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg); + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg); if (err) return err; @@ -688,7 +688,30 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, if (err <= 0) return; - cmode = mv88e6352_get_port4_serdes_cmode(chip); + cmode = mv88e63xx_get_port_serdes_cmode(chip, port); + if (cmode < 0) + dev_err(chip->dev, "p%d: failed to read serdes cmode\n", + port); + else + mv88e6xxx_translate_cmode(cmode, supported); + } +} + +static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, + struct phylink_config *config) +{ + unsigned long *supported = config->supported_interfaces; + int cmode; + + /* Translate the default cmode */ + mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported); + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | + MAC_1000FD; + + /* Port 0/1 are serdes only ports */ + if (port == 0 || port == 1) { + cmode = mv88e63xx_get_port_serdes_cmode(chip, port); if (cmode < 0) dev_err(chip->dev, "p%d: failed to read serdes cmode\n", port); @@ -838,24 +861,27 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, } } -static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds, - int port, - phy_interface_t interface) +static struct phylink_pcs * +mv88e6xxx_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP); if (chip->info->ops->pcs_ops) - pcs = chip->info->ops->pcs_ops->pcs_select(chip, port, + pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index, interface); return pcs; } -static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port, +static int mv88e6xxx_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; + int port = dp->index; int err = 0; /* In inband mode, the link may come up at any time while the link @@ -874,11 +900,13 @@ static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, +static void mv88e6xxx_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; + int port = dp->index; int err = 0; mv88e6xxx_reg_lock(chip); @@ -894,13 +922,15 @@ err_unlock: mv88e6xxx_reg_unlock(chip); if (err && err != -EOPNOTSUPP) - dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port); + dev_err(chip->dev, "p%d: failed to configure MAC/PCS\n", port); } -static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port, +static int mv88e6xxx_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; + int port = dp->index; int err = 0; /* Undo the forced down state above after completing configuration @@ -924,12 +954,14 @@ static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, +static void mv88e6xxx_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; const struct mv88e6xxx_ops *ops; + int port = dp->index; int err = 0; ops = chip->info->ops; @@ -952,14 +984,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, "p%d: failed to force MAC link down\n", port); } -static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, +static void mv88e6xxx_mac_link_up(struct phylink_config *config, struct phy_device *phydev, + unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct mv88e6xxx_chip *chip = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mv88e6xxx_chip *chip = dp->ds->priv; const struct mv88e6xxx_ops *ops; + int port = dp->index; int err = 0; ops = chip->info->ops; @@ -985,7 +1019,7 @@ error: mv88e6xxx_reg_unlock(chip); if (err && err != -EOPNOTSUPP) - dev_err(ds->dev, + dev_err(chip->dev, "p%d: failed to configure MAC link up\n", port); } @@ -3123,6 +3157,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) { struct gpio_desc *gpiod = chip->reset; + int err; /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { @@ -3131,17 +3166,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) * mid-byte, causing the first EEPROM read after the reset * from the wrong location resulting in the switch booting * to wrong mode and inoperable. + * For this reason, switch families with EEPROM support + * generally wait for EEPROM loads to complete as their pre- + * and post-reset handlers. */ - if (chip->info->ops->get_eeprom) - mv88e6xxx_g2_eeprom_wait(chip); + if (chip->info->ops->hardware_reset_pre) { + err = chip->info->ops->hardware_reset_pre(chip); + if (err) + dev_err(chip->dev, "pre-reset error: %d\n", err); + } gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); usleep_range(10000, 20000); - if (chip->info->ops->get_eeprom) - mv88e6xxx_g2_eeprom_wait(chip); + if (chip->info->ops->hardware_reset_post) { + err = chip->info->ops->hardware_reset_post(chip); + if (err) + dev_err(chip->dev, "post-reset error: %d\n", err); + } } } @@ -4371,6 +4415,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4561,6 +4607,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4661,6 +4709,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4755,6 +4805,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4813,6 +4865,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4869,6 +4923,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4928,6 +4984,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -4981,6 +5039,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .watchdog_ops = &mv88e6250_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset, + .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done, .reset = mv88e6250_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -5028,6 +5088,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5087,13 +5149,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e632x_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -5133,13 +5197,15 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e632x_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -5183,6 +5249,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5338,6 +5406,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5400,6 +5470,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5462,6 +5534,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5527,6 +5601,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = { .watchdog_ops = &mv88e6393x_watchdog_ops, .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, + .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, + .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, .atu_get_hash = mv88e6165_g1_atu_get_hash, @@ -5705,7 +5781,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141, .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6141", - .num_databases = 4096, + .num_databases = 256, .num_macs = 2048, .num_ports = 6, .num_internal_phys = 5, @@ -6164,7 +6240,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6341", - .num_databases = 4096, + .num_databases = 256, .num_macs = 2048, .num_internal_phys = 5, .num_ports = 6, @@ -6970,6 +7046,15 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index, return err_sync ? : err_pvt; } +static const struct phylink_mac_ops mv88e6xxx_phylink_mac_ops = { + .mac_select_pcs = mv88e6xxx_mac_select_pcs, + .mac_prepare = mv88e6xxx_mac_prepare, + .mac_config = mv88e6xxx_mac_config, + .mac_finish = mv88e6xxx_mac_finish, + .mac_link_down = mv88e6xxx_mac_link_down, + .mac_link_up = mv88e6xxx_mac_link_up, +}; + static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_tag_protocol = mv88e6xxx_get_tag_protocol, .change_tag_protocol = mv88e6xxx_change_tag_protocol, @@ -6978,12 +7063,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_setup = mv88e6xxx_port_setup, .port_teardown = mv88e6xxx_port_teardown, .phylink_get_caps = mv88e6xxx_get_caps, - .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs, - .phylink_mac_prepare = mv88e6xxx_mac_prepare, - .phylink_mac_config = mv88e6xxx_mac_config, - .phylink_mac_finish = mv88e6xxx_mac_finish, - .phylink_mac_link_down = mv88e6xxx_mac_link_down, - .phylink_mac_link_up = mv88e6xxx_mac_link_up, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats, @@ -7052,6 +7131,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) ds->priv = chip; ds->dev = dev; ds->ops = &mv88e6xxx_switch_ops; + ds->phylink_mac_ops = &mv88e6xxx_phylink_mac_ops; ds->ageing_time_min = chip->info->age_time_coeff; ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 85eb293381a7..c34caf9815c5 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -487,6 +487,12 @@ struct mv88e6xxx_ops { int (*ppu_enable)(struct mv88e6xxx_chip *chip); int (*ppu_disable)(struct mv88e6xxx_chip *chip); + /* Additional handlers to run before and after hard reset, to make sure + * that the switch and EEPROM are in a good state. + */ + int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip); + int (*hardware_reset_post)(struct mv88e6xxx_chip *chip); + /* Switch Software Reset */ int (*reset)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 49444a72ff09..9820cd596757 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1); } +static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip) +{ + /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */ + int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM); + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val); + if (err) + return err; + + val |= MV88E6185_G1_CTL1_RELOAD_EEPROM; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val); + if (err) + return err; + + return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0); +} + +/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */ +static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val); + if (err < 0) { + dev_err(chip->dev, "Error reading status"); + return err; + } + + /* If the switch is still resetting, it may not + * respond on the bus, and so MDIO read returns + * 0xffff. Differentiate between that, and waiting for + * the EEPROM to be done by bit 0 being set. + */ + if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))) + return -EBUSY; + + return 0; +} + +/* As the EEInt (EEPROM done) flag clears on read if the status register, this + * function must be called directly after a hard reset or EEPROM ReLoad request, + * or the done condition may have been missed + */ +int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip) +{ + const unsigned long timeout = jiffies + 1 * HZ; + int ret; + + /* Wait up to 1 second for the switch to finish reading the + * EEPROM. + */ + while (time_before(jiffies, timeout)) { + ret = mv88e6xxx_g1_is_eeprom_done(chip); + if (ret != -EBUSY) + return ret; + } + + dev_err(chip->dev, "Timeout waiting for EEPROM done"); + return -ETIMEDOUT; +} + +int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = mv88e6xxx_g1_is_eeprom_done(chip); + if (ret != -EBUSY) + return ret; + + /* Pre-reset, we don't know the state of the switch - when + * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because + * the switch is actually busy reading the EEPROM, or because + * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated + * status register read already. + * + * To account for the latter case, trigger another EEPROM reload for + * another chance at seeing the done flag. + */ + ret = mv88e6250_g1_eeprom_reload(chip); + if (ret) + return ret; + + return mv88e6xxx_g1_wait_eeprom_done(chip); +} + /* Offset 0x01: Switch MAC Address Register Bytes 0 & 1 * Offset 0x02: Switch MAC Address Register Bytes 2 & 3 * Offset 0x03: Switch MAC Address Register Bytes 4 & 5 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 1095261f5b49..3dbb7a1b8fe1 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr); int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip); +int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 3c5509e75a54..85952d841f28 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1755,6 +1755,9 @@ static int vsc9959_stream_identify(struct flow_cls_offload *f, BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) return -EOPNOTSUPP; + if (flow_rule_match_has_control_flags(rule, f->common.extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 8d9d271ac3af..968cb81088bf 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -523,28 +523,30 @@ static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port, } } -static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port, +static void ar9331_sw_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct ar9331_sw_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ar9331_sw_priv *priv = dp->ds->priv; struct regmap *regmap = priv->regmap; int ret; - ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port), + ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(dp->index), AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0); if (ret) dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret); } -static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void ar9331_sw_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct ar9331_sw_priv *priv = ds->priv; - struct ar9331_sw_port *p = &priv->port[port]; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ar9331_sw_priv *priv = dp->ds->priv; struct regmap *regmap = priv->regmap; + int port = dp->index; int ret; ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port), @@ -552,23 +554,24 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port, if (ret) dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret); - cancel_delayed_work_sync(&p->mib_read); + cancel_delayed_work_sync(&priv->port[port].mib_read); } -static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void ar9331_sw_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct ar9331_sw_priv *priv = ds->priv; - struct ar9331_sw_port *p = &priv->port[port]; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ar9331_sw_priv *priv = dp->ds->priv; struct regmap *regmap = priv->regmap; + int port = dp->index; u32 val; int ret; - schedule_delayed_work(&p->mib_read, 0); + schedule_delayed_work(&priv->port[port].mib_read, 0); val = AR9331_SW_PORT_STATUS_MAC_MASK; switch (speed) { @@ -684,14 +687,17 @@ static void ar9331_get_pause_stats(struct dsa_switch *ds, int port, spin_unlock(&p->stats_lock); } +static const struct phylink_mac_ops ar9331_phylink_mac_ops = { + .mac_config = ar9331_sw_phylink_mac_config, + .mac_link_down = ar9331_sw_phylink_mac_link_down, + .mac_link_up = ar9331_sw_phylink_mac_link_up, +}; + static const struct dsa_switch_ops ar9331_sw_ops = { .get_tag_protocol = ar9331_sw_get_tag_protocol, .setup = ar9331_sw_setup, .port_disable = ar9331_sw_port_disable, .phylink_get_caps = ar9331_sw_phylink_get_caps, - .phylink_mac_config = ar9331_sw_phylink_mac_config, - .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down, - .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up, .get_stats64 = ar9331_get_stats64, .get_pause_stats = ar9331_get_pause_stats, }; @@ -1059,6 +1065,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev) ds->priv = priv; priv->ops = ar9331_sw_ops; ds->ops = &priv->ops; + ds->phylink_mac_ops = &ar9331_phylink_mac_ops; dev_set_drvdata(&mdiodev->dev, priv); for (i = 0; i < ARRAY_SIZE(priv->port); i++) { diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index dab66c0c6f64..b3c27cf538e8 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -1283,11 +1283,13 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde } static struct phylink_pcs * -qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port, +qca8k_phylink_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { - struct qca8k_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct qca8k_priv *priv = dp->ds->priv; struct phylink_pcs *pcs = NULL; + int port = dp->index; switch (interface) { case PHY_INTERFACE_MODE_SGMII: @@ -1311,13 +1313,18 @@ qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port, } static void -qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, +qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct qca8k_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct dsa_switch *ds = dp->ds; + struct qca8k_priv *priv; + int port = dp->index; int cpu_port_index; u32 reg; + priv = ds->priv; + switch (port) { case 0: /* 1st CPU port */ if (state->interface != PHY_INTERFACE_MODE_RGMII && @@ -1426,20 +1433,24 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port, } static void -qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, +qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct qca8k_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct qca8k_priv *priv = dp->ds->priv; - qca8k_port_set_status(priv, port, 0); + qca8k_port_set_status(priv, dp->index, 0); } static void -qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, - phy_interface_t interface, struct phy_device *phydev, - int speed, int duplex, bool tx_pause, bool rx_pause) +qca8k_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) { - struct qca8k_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct qca8k_priv *priv = dp->ds->priv; + int port = dp->index; u32 reg; if (phylink_autoneg_inband(mode)) { @@ -1463,10 +1474,10 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, if (duplex == DUPLEX_FULL) reg |= QCA8K_PORT_STATUS_DUPLEX; - if (rx_pause || dsa_is_cpu_port(ds, port)) + if (rx_pause || dsa_port_is_cpu(dp)) reg |= QCA8K_PORT_STATUS_RXFLOW; - if (tx_pause || dsa_is_cpu_port(ds, port)) + if (tx_pause || dsa_port_is_cpu(dp)) reg |= QCA8K_PORT_STATUS_TXFLOW; } @@ -1991,6 +2002,13 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static const struct phylink_mac_ops qca8k_phylink_mac_ops = { + .mac_select_pcs = qca8k_phylink_mac_select_pcs, + .mac_config = qca8k_phylink_mac_config, + .mac_link_down = qca8k_phylink_mac_link_down, + .mac_link_up = qca8k_phylink_mac_link_up, +}; + static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, @@ -2021,10 +2039,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_vlan_add = qca8k_port_vlan_add, .port_vlan_del = qca8k_port_vlan_del, .phylink_get_caps = qca8k_phylink_get_caps, - .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs, - .phylink_mac_config = qca8k_phylink_mac_config, - .phylink_mac_link_down = qca8k_phylink_mac_link_down, - .phylink_mac_link_up = qca8k_phylink_mac_link_up, .get_phy_flags = qca8k_get_phy_flags, .port_lag_join = qca8k_port_lag_join, .port_lag_leave = qca8k_port_lag_leave, @@ -2091,6 +2105,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->ds->num_ports = QCA8K_NUM_PORTS; priv->ds->priv = priv; priv->ds->ops = &qca8k_switch_ops; + priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops; mutex_init(&priv->reg_mutex); dev_set_drvdata(&mdiodev->dev, priv); diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h index e0b1aa01337b..a1b2e0b529d5 100644 --- a/drivers/net/dsa/realtek/realtek.h +++ b/drivers/net/dsa/realtek/realtek.h @@ -17,6 +17,7 @@ #define REALTEK_HW_STOP_DELAY 25 /* msecs */ #define REALTEK_HW_START_DELAY 100 /* msecs */ +struct phylink_mac_ops; struct realtek_ops; struct dentry; struct inode; @@ -117,6 +118,7 @@ struct realtek_ops { struct realtek_variant { const struct dsa_switch_ops *ds_ops; const struct realtek_ops *ops; + const struct phylink_mac_ops *phylink_mac_ops; unsigned int clk_delay; u8 cmd_read; u8 cmd_write; diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 12665a8a3412..b9674f68b756 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -1048,11 +1048,13 @@ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port, phy_interface_set_rgmii(config->supported_interfaces); } -static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, +static void rtl8365mb_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct realtek_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct realtek_priv *priv = dp->ds->priv; + u8 port = dp->index; int ret; if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { @@ -1076,13 +1078,15 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, */ } -static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void rtl8365mb_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct realtek_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct realtek_priv *priv = dp->ds->priv; struct rtl8365mb_port *p; struct rtl8365mb *mb; + u8 port = dp->index; int ret; mb = priv->chip_data; @@ -1101,16 +1105,18 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port, } } -static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void rtl8365mb_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, + int speed, int duplex, bool tx_pause, bool rx_pause) { - struct realtek_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct realtek_priv *priv = dp->ds->priv; struct rtl8365mb_port *p; struct rtl8365mb *mb; + u8 port = dp->index; int ret; mb = priv->chip_data; @@ -2106,15 +2112,18 @@ static int rtl8365mb_detect(struct realtek_priv *priv) return 0; } +static const struct phylink_mac_ops rtl8365mb_phylink_mac_ops = { + .mac_config = rtl8365mb_phylink_mac_config, + .mac_link_down = rtl8365mb_phylink_mac_link_down, + .mac_link_up = rtl8365mb_phylink_mac_link_up, +}; + static const struct dsa_switch_ops rtl8365mb_switch_ops = { .get_tag_protocol = rtl8365mb_get_tag_protocol, .change_tag_protocol = rtl8365mb_change_tag_protocol, .setup = rtl8365mb_setup, .teardown = rtl8365mb_teardown, .phylink_get_caps = rtl8365mb_phylink_get_caps, - .phylink_mac_config = rtl8365mb_phylink_mac_config, - .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down, - .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up, .port_stp_state_set = rtl8365mb_port_stp_state_set, .get_strings = rtl8365mb_get_strings, .get_ethtool_stats = rtl8365mb_get_ethtool_stats, @@ -2136,6 +2145,7 @@ static const struct realtek_ops rtl8365mb_ops = { const struct realtek_variant rtl8365mb_variant = { .ds_ops = &rtl8365mb_switch_ops, .ops = &rtl8365mb_ops, + .phylink_mac_ops = &rtl8365mb_phylink_mac_ops, .clk_delay = 10, .cmd_read = 0xb9, .cmd_write = 0xb8, diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index e10ae94cf771..9e821b42e5f3 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -176,6 +176,7 @@ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f /* LED control registers */ +/* The LED blink rate is global; it is used by all triggers in all groups. */ #define RTL8366RB_LED_BLINKRATE_REG 0x0430 #define RTL8366RB_LED_BLINKRATE_MASK 0x0007 #define RTL8366RB_LED_BLINKRATE_28MS 0x0000 @@ -185,27 +186,27 @@ #define RTL8366RB_LED_BLINKRATE_222MS 0x0004 #define RTL8366RB_LED_BLINKRATE_446MS 0x0005 +/* LED trigger event for each group */ #define RTL8366RB_LED_CTRL_REG 0x0431 -#define RTL8366RB_LED_OFF 0x0 -#define RTL8366RB_LED_DUP_COL 0x1 -#define RTL8366RB_LED_LINK_ACT 0x2 -#define RTL8366RB_LED_SPD1000 0x3 -#define RTL8366RB_LED_SPD100 0x4 -#define RTL8366RB_LED_SPD10 0x5 -#define RTL8366RB_LED_SPD1000_ACT 0x6 -#define RTL8366RB_LED_SPD100_ACT 0x7 -#define RTL8366RB_LED_SPD10_ACT 0x8 -#define RTL8366RB_LED_SPD100_10_ACT 0x9 -#define RTL8366RB_LED_FIBER 0xa -#define RTL8366RB_LED_AN_FAULT 0xb -#define RTL8366RB_LED_LINK_RX 0xc -#define RTL8366RB_LED_LINK_TX 0xd -#define RTL8366RB_LED_MASTER 0xe -#define RTL8366RB_LED_FORCE 0xf +#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ + (4 * (led_group)) +#define RTL8366RB_LED_CTRL_MASK(led_group) \ + (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) + +/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only + * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is + * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. + */ #define RTL8366RB_LED_0_1_CTRL_REG 0x0432 -#define RTL8366RB_LED_1_OFFSET 6 #define RTL8366RB_LED_2_3_CTRL_REG 0x0433 -#define RTL8366RB_LED_3_OFFSET 6 +#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ + ((led_group) <= 1 ? \ + RTL8366RB_LED_0_1_CTRL_REG : \ + RTL8366RB_LED_2_3_CTRL_REG) +#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) +#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) #define RTL8366RB_MIB_COUNT 33 #define RTL8366RB_GLOBAL_MIB_COUNT 1 @@ -349,14 +350,44 @@ #define RTL8366RB_GREEN_FEATURE_TX BIT(0) #define RTL8366RB_GREEN_FEATURE_RX BIT(2) +enum rtl8366_ledgroup_mode { + RTL8366RB_LEDGROUP_OFF = 0x0, + RTL8366RB_LEDGROUP_DUP_COL = 0x1, + RTL8366RB_LEDGROUP_LINK_ACT = 0x2, + RTL8366RB_LEDGROUP_SPD1000 = 0x3, + RTL8366RB_LEDGROUP_SPD100 = 0x4, + RTL8366RB_LEDGROUP_SPD10 = 0x5, + RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, + RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, + RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, + RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, + RTL8366RB_LEDGROUP_FIBER = 0xa, + RTL8366RB_LEDGROUP_AN_FAULT = 0xb, + RTL8366RB_LEDGROUP_LINK_RX = 0xc, + RTL8366RB_LEDGROUP_LINK_TX = 0xd, + RTL8366RB_LEDGROUP_MASTER = 0xe, + RTL8366RB_LEDGROUP_FORCE = 0xf, + + __RTL8366RB_LEDGROUP_MODE_MAX +}; + +struct rtl8366rb_led { + u8 port_num; + u8 led_group; + struct realtek_priv *priv; + struct led_classdev cdev; +}; + /** * struct rtl8366rb - RTL8366RB-specific data * @max_mtu: per-port max MTU setting * @pvid_enabled: if PVID is set for respective port + * @leds: per-port and per-ledgroup led info */ struct rtl8366rb { unsigned int max_mtu[RTL8366RB_NUM_PORTS]; bool pvid_enabled[RTL8366RB_NUM_PORTS]; + struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; }; static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { @@ -799,6 +830,217 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table, return 0; } +static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode) +{ + int ret; + u32 val; + + val = mode << RTL8366RB_LED_CTRL_OFFSET(led_group); + + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_CTRL_REG, + RTL8366RB_LED_CTRL_MASK(led_group), + val); + if (ret) + return ret; + + return 0; +} + +static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) +{ + switch (led_group) { + case 0: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 1: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 2: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 3: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + default: + return 0; + } +} + +static int rb8366rb_get_port_led(struct rtl8366rb_led *led) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + u32 val; + + ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), + &val); + if (ret) { + dev_err(priv->dev, "error reading LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); +} + +static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_X_X_CTRL_REG(led_group), + rtl8366rb_led_group_port_mask(led_group, + port_num), + enable ? 0xffff : 0); + if (ret) { + dev_err(priv->dev, "error updating LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + /* Change the LED group to manual controlled LEDs if required */ + ret = rb8366rb_set_ledgroup_mode(priv, led_group, + RTL8366RB_LEDGROUP_FORCE); + + if (ret) { + dev_err(priv->dev, "error updating LED GROUP group %d\n", + led_group); + return ret; + } + + return 0; +} + +static int +rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, + cdev); + + return rb8366rb_set_port_led(led, brightness == LED_ON); +} + +static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, + struct fwnode_handle *led_fwnode) +{ + struct rtl8366rb *rb = priv->chip_data; + struct led_init_data init_data = { }; + enum led_default_state state; + struct rtl8366rb_led *led; + u32 led_group; + int ret; + + ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); + if (ret) + return ret; + + if (led_group >= RTL8366RB_NUM_LEDGROUPS) { + dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", + led_group, dp->index); + return -EINVAL; + } + + led = &rb->leds[dp->index][led_group]; + led->port_num = dp->index; + led->led_group = led_group; + led->priv = priv; + + state = led_init_default_state_get(led_fwnode); + switch (state) { + case LEDS_DEFSTATE_ON: + led->cdev.brightness = 1; + rb8366rb_set_port_led(led, 1); + break; + case LEDS_DEFSTATE_KEEP: + led->cdev.brightness = + rb8366rb_get_port_led(led); + break; + case LEDS_DEFSTATE_OFF: + default: + led->cdev.brightness = 0; + rb8366rb_set_port_led(led, 0); + } + + led->cdev.max_brightness = 1; + led->cdev.brightness_set_blocking = + rtl8366rb_cled_brightness_set_blocking; + init_data.fwnode = led_fwnode; + init_data.devname_mandatory = true; + + init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", + dp->ds->index, dp->index, led_group); + if (!init_data.devicename) + return -ENOMEM; + + ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); + if (ret) { + dev_warn(priv->dev, "Failed to init LED %d for port %d", + led_group, dp->index); + return ret; + } + + return 0; +} + +static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) +{ + int ret = 0; + int i; + + regmap_update_bits(priv->map, + RTL8366RB_INTERRUPT_CONTROL_REG, + RTL8366RB_P4_RGMII_LED, + 0); + + for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) { + ret = rb8366rb_set_ledgroup_mode(priv, i, + RTL8366RB_LEDGROUP_OFF); + if (ret) + return ret; + } + + return ret; +} + +static int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + struct device_node *leds_np, *led_np; + struct dsa_switch *ds = &priv->ds; + struct dsa_port *dp; + int ret = 0; + + dsa_switch_for_each_port(dp, ds) { + if (!dp->dn) + continue; + + leds_np = of_get_child_by_name(dp->dn, "leds"); + if (!leds_np) { + dev_dbg(priv->dev, "No leds defined for port %d", + dp->index); + continue; + } + + for_each_child_of_node(leds_np, led_np) { + ret = rtl8366rb_setup_led(priv, dp, + of_fwnode_handle(led_np)); + if (ret) { + of_node_put(led_np); + break; + } + } + + of_node_put(leds_np); + if (ret) + return ret; + } + return 0; +} + static int rtl8366rb_setup(struct dsa_switch *ds) { struct realtek_priv *priv = ds->priv; @@ -807,7 +1049,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds) u32 chip_ver = 0; u32 chip_id = 0; int jam_size; - u32 val; int ret; int i; @@ -987,7 +1228,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; - /* Set blinking, TODO: make this configurable */ + /* Set blinking, used by all LED groups using HW triggers. + * TODO: make this configurable + */ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG, RTL8366RB_LED_BLINKRATE_MASK, RTL8366RB_LED_BLINKRATE_56MS); @@ -995,32 +1238,17 @@ static int rtl8366rb_setup(struct dsa_switch *ds) return ret; /* Set up LED activity: - * Each port has 4 LEDs, we configure all ports to the same - * behaviour (no individual config) but we can set up each - * LED separately. + * Each port has 4 LEDs on fixed groups. Each group shares the same + * hardware trigger across all ports. LEDs can only be indiviually + * controlled setting the LED group to fixed mode and using the driver + * to toggle them LEDs on/off. */ if (priv->leds_disabled) { - /* Turn everything off */ - regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x0FFF, 0); - regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x0FFF, 0); - regmap_update_bits(priv->map, - RTL8366RB_INTERRUPT_CONTROL_REG, - RTL8366RB_P4_RGMII_LED, - 0); - val = RTL8366RB_LED_OFF; + ret = rtl8366rb_setup_all_leds_off(priv); + if (ret) + return ret; } else { - /* TODO: make this configurable per LED */ - val = RTL8366RB_LED_FORCE; - } - for (i = 0; i < 4; i++) { - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_CTRL_REG, - 0xf << (i * 4), - val << (i * 4)); + ret = rtl8366rb_setup_leds(priv); if (ret) return ret; } @@ -1077,11 +1305,19 @@ static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port, } static void -rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, - phy_interface_t interface, struct phy_device *phydev, +rtl8366rb_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void +rtl8366rb_mac_link_up(struct phylink_config *config, struct phy_device *phydev, + unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct realtek_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct realtek_priv *priv = dp->ds->priv; + int port = dp->index; unsigned int val; int ret; @@ -1147,10 +1383,12 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, } static void -rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, +rtl8366rb_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct realtek_priv *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct realtek_priv *priv = dp->ds->priv; + int port = dp->index; int ret; if (port != priv->cpu_port) @@ -1167,52 +1405,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, } } -static void rb8366rb_set_port_led(struct realtek_priv *priv, - int port, bool enable) -{ - u16 val = enable ? 0x3f : 0; - int ret; - - if (priv->leds_disabled) - return; - - switch (port) { - case 0: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x3F, val); - break; - case 1: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_0_1_CTRL_REG, - 0x3F << RTL8366RB_LED_1_OFFSET, - val << RTL8366RB_LED_1_OFFSET); - break; - case 2: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x3F, val); - break; - case 3: - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_2_3_CTRL_REG, - 0x3F << RTL8366RB_LED_3_OFFSET, - val << RTL8366RB_LED_3_OFFSET); - break; - case 4: - ret = regmap_update_bits(priv->map, - RTL8366RB_INTERRUPT_CONTROL_REG, - RTL8366RB_P4_RGMII_LED, - enable ? RTL8366RB_P4_RGMII_LED : 0); - break; - default: - dev_err(priv->dev, "no LED for port %d\n", port); - return; - } - if (ret) - dev_err(priv->dev, "error updating LED on port %d\n", port); -} - static int rtl8366rb_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) @@ -1226,7 +1418,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port, if (ret) return ret; - rb8366rb_set_port_led(priv, port, true); return 0; } @@ -1241,8 +1432,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) BIT(port)); if (ret) return; - - rb8366rb_set_port_led(priv, port, false); } static int @@ -1849,12 +2038,16 @@ static int rtl8366rb_detect(struct realtek_priv *priv) return 0; } +static const struct phylink_mac_ops rtl8366rb_phylink_mac_ops = { + .mac_config = rtl8366rb_mac_config, + .mac_link_down = rtl8366rb_mac_link_down, + .mac_link_up = rtl8366rb_mac_link_up, +}; + static const struct dsa_switch_ops rtl8366rb_switch_ops = { .get_tag_protocol = rtl8366_get_tag_protocol, .setup = rtl8366rb_setup, .phylink_get_caps = rtl8366rb_phylink_get_caps, - .phylink_mac_link_up = rtl8366rb_mac_link_up, - .phylink_mac_link_down = rtl8366rb_mac_link_down, .get_strings = rtl8366_get_strings, .get_ethtool_stats = rtl8366_get_ethtool_stats, .get_sset_count = rtl8366_get_sset_count, @@ -1892,6 +2085,7 @@ static const struct realtek_ops rtl8366rb_ops = { const struct realtek_variant rtl8366rb_variant = { .ds_ops = &rtl8366rb_switch_ops, .ops = &rtl8366rb_ops, + .phylink_mac_ops = &rtl8366rb_phylink_mac_ops, .clk_delay = 10, .cmd_read = 0xa9, .cmd_write = 0xa8, diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c index d2e876805393..35709a1756ae 100644 --- a/drivers/net/dsa/realtek/rtl83xx.c +++ b/drivers/net/dsa/realtek/rtl83xx.c @@ -236,6 +236,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv) ds->priv = priv; ds->dev = priv->dev; ds->ops = priv->variant->ds_ops; + ds->phylink_mac_ops = priv->variant->phylink_mac_ops; ds->num_ports = priv->num_ports; ret = dsa_register_switch(ds); @@ -290,16 +291,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA); * rtl83xx_remove() - Cleanup a realtek switch driver * @priv: realtek_priv pointer * - * If a method is provided, this function asserts the hard reset of the switch - * in order to avoid leaking traffic when the driver is gone. + * Placehold for common cleanup procedures. * - * Context: Might sleep if priv->gdev->chip->can_sleep. + * Context: Any * Return: nothing */ void rtl83xx_remove(struct realtek_priv *priv) { - /* leave the device reset asserted */ - rtl83xx_reset_assert(priv); } EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA); diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 10092ea85e46..92e032972b34 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -239,23 +239,31 @@ static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port, } static struct phylink_pcs * -a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port, +a5psw_phylink_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { - struct dsa_port *dp = dsa_to_port(ds, port); - struct a5psw *a5psw = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct a5psw *a5psw = dp->ds->priv; - if (!dsa_port_is_cpu(dp) && a5psw->pcs[port]) - return a5psw->pcs[port]; + if (dsa_port_is_cpu(dp)) + return NULL; - return NULL; + return a5psw->pcs[dp->index]; +} + +static void a5psw_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ } -static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void a5psw_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct a5psw *a5psw = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct a5psw *a5psw = dp->ds->priv; + int port = dp->index; u32 cmd_cfg; cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port)); @@ -263,15 +271,17 @@ static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port, a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg); } -static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void a5psw_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, - int duplex, bool tx_pause, bool rx_pause) + int speed, int duplex, bool tx_pause, + bool rx_pause) { u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA | A5PSW_CMD_CFG_TX_CRC_APPEND; - struct a5psw *a5psw = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct a5psw *a5psw = dp->ds->priv; if (speed == SPEED_1000) cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED; @@ -284,7 +294,7 @@ static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port, if (!rx_pause) cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE; - a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg); + a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg); } static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) @@ -992,15 +1002,19 @@ static int a5psw_setup(struct dsa_switch *ds) return 0; } +static const struct phylink_mac_ops a5psw_phylink_mac_ops = { + .mac_select_pcs = a5psw_phylink_mac_select_pcs, + .mac_config = a5psw_phylink_mac_config, + .mac_link_down = a5psw_phylink_mac_link_down, + .mac_link_up = a5psw_phylink_mac_link_up, +}; + static const struct dsa_switch_ops a5psw_switch_ops = { .get_tag_protocol = a5psw_get_tag_protocol, .setup = a5psw_setup, .port_disable = a5psw_port_disable, .port_enable = a5psw_port_enable, .phylink_get_caps = a5psw_phylink_get_caps, - .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs, - .phylink_mac_link_down = a5psw_phylink_mac_link_down, - .phylink_mac_link_up = a5psw_phylink_mac_link_up, .port_change_mtu = a5psw_port_change_mtu, .port_max_mtu = a5psw_port_max_mtu, .get_sset_count = a5psw_get_sset_count, @@ -1252,6 +1266,7 @@ static int a5psw_probe(struct platform_device *pdev) ds->dev = dev; ds->num_ports = A5PSW_PORTS_NUM; ds->ops = &a5psw_switch_ops; + ds->phylink_mac_ops = &a5psw_phylink_mac_ops; ds->priv = a5psw; ret = dsa_register_switch(ds); diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 9e8ca182c722..05d8ed3121e7 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -214,6 +214,9 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 6646f7fb0f90..ee0fb1c343f1 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1358,10 +1358,11 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port, } static struct phylink_pcs * -sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface) +sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) { - struct sja1105_private *priv = ds->priv; - struct dw_xpcs *xpcs = priv->xpcs[port]; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct sja1105_private *priv = dp->ds->priv; + struct dw_xpcs *xpcs = priv->xpcs[dp->index]; if (xpcs) return &xpcs->pcs; @@ -1369,21 +1370,31 @@ sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface) return NULL; } -static void sja1105_mac_link_down(struct dsa_switch *ds, int port, +static void sja1105_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void sja1105_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - sja1105_inhibit_tx(ds->priv, BIT(port), true); + struct dsa_port *dp = dsa_phylink_to_port(config); + + sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true); } -static void sja1105_mac_link_up(struct dsa_switch *ds, int port, +static void sja1105_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct sja1105_private *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct sja1105_private *priv = dp->ds->priv; + int port = dp->index; sja1105_adjust_port_config(priv, port, speed); @@ -3198,6 +3209,13 @@ static void sja1105_teardown(struct dsa_switch *ds) sja1105_static_config_free(&priv->static_config); } +static const struct phylink_mac_ops sja1105_phylink_mac_ops = { + .mac_select_pcs = sja1105_mac_select_pcs, + .mac_config = sja1105_mac_config, + .mac_link_up = sja1105_mac_link_up, + .mac_link_down = sja1105_mac_link_down, +}; + static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, .connect_tag_protocol = sja1105_connect_tag_protocol, @@ -3207,9 +3225,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .port_change_mtu = sja1105_change_mtu, .port_max_mtu = sja1105_get_max_mtu, .phylink_get_caps = sja1105_phylink_get_caps, - .phylink_mac_select_pcs = sja1105_mac_select_pcs, - .phylink_mac_link_up = sja1105_mac_link_up, - .phylink_mac_link_down = sja1105_mac_link_down, .get_strings = sja1105_get_strings, .get_ethtool_stats = sja1105_get_ethtool_stats, .get_sset_count = sja1105_get_sset_count, @@ -3375,6 +3390,7 @@ static int sja1105_probe(struct spi_device *spi) ds->dev = dev; ds->num_ports = priv->info->num_ports; ds->ops = &sja1105_switch_ops; + ds->phylink_mac_ops = &sja1105_phylink_mac_ops; ds->priv = priv; priv->ds = ds; @@ -3456,7 +3472,6 @@ MODULE_DEVICE_TABLE(spi, sja1105_spi_ids); static struct spi_driver sja1105_driver = { .driver = { .name = "sja1105", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(sja1105_dt_ids), }, .id_table = sja1105_spi_ids, diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index ae70eac3be28..4b031fefcec6 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/iopoll.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/bitops.h> @@ -268,6 +269,9 @@ #define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398) #define IS_739X(a) (IS_7395(a) || IS_7398(a)) +#define VSC73XX_POLL_SLEEP_US 1000 +#define VSC73XX_POLL_TIMEOUT_US 10000 + struct vsc73xx_counter { u8 counter; const char *name; @@ -713,51 +717,44 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port) port, VSC73XX_C_RX0, 0); } -static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc, - int port, struct phy_device *phydev, - u32 initval) +static void vsc73xx_reset_port(struct vsc73xx *vsc, int port, u32 initval) { - u32 val = initval; - u8 seed; - - /* Reset this port FIXME: break out subroutine */ - val |= VSC73XX_MAC_CFG_RESET; - vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); - - /* Seed the port randomness with randomness */ - get_random_bytes(&seed, 1); - val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; - val |= VSC73XX_MAC_CFG_SEED_LOAD; - val |= VSC73XX_MAC_CFG_WEXC_DIS; - vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); + int ret, err; + u32 val; - /* Flow control for the PHY facing ports: - * Use a zero delay pause frame when pause condition is left - * Obey pause control frames - * When generating pause frames, use 0xff as pause value - */ - vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF, - VSC73XX_FCCONF_ZERO_PAUSE_EN | - VSC73XX_FCCONF_FLOW_CTRL_OBEY | - 0xff); + /* Disable RX on this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RX_EN, 0); - /* Disallow backward dropping of frames from this port */ + /* Discard packets */ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_SBACKWDROP, BIT(port), 0); + VSC73XX_ARBDISC, BIT(port), BIT(port)); + + /* Wait until queue is empty */ + ret = read_poll_timeout(vsc73xx_read, err, + err < 0 || (val & BIT(port)), + VSC73XX_POLL_SLEEP_US, + VSC73XX_POLL_TIMEOUT_US, false, + vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBEMPTY, &val); + if (ret) + dev_err(vsc->dev, + "timeout waiting for block arbiter\n"); + else if (err < 0) + dev_err(vsc->dev, "error reading arbiter\n"); - /* Enable TX, RX, deassert reset, stop loading seed */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, - VSC73XX_MAC_CFG, - VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD | - VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN, - VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); + /* Put this port into reset */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET | initval); } -static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void vsc73xx_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct vsc73xx *vsc = ds->priv; - u32 val; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct vsc73xx *vsc = dp->ds->priv; + int port = dp->index; /* Special handling of the CPU-facing port */ if (port == CPU_PORT) { @@ -774,104 +771,93 @@ static void vsc73xx_adjust_link(struct dsa_switch *ds, int port, VSC73XX_ADVPORTM_ENA_GTX | VSC73XX_ADVPORTM_DDR_MODE); } +} + +static void vsc73xx_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct vsc73xx *vsc = dp->ds->priv; + int port = dp->index; - /* This is the MAC confiuration that always need to happen - * after a PHY or the CPU port comes up or down. + /* This routine is described in the datasheet (below ARBDISC register + * description) */ - if (!phydev->link) { - int maxloop = 10; - - dev_dbg(vsc->dev, "port %d: went down\n", - port); - - /* Disable RX on this port */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, - VSC73XX_MAC_CFG, - VSC73XX_MAC_CFG_RX_EN, 0); - - /* Discard packets */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_ARBDISC, BIT(port), BIT(port)); - - /* Wait until queue is empty */ - vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_ARBEMPTY, &val); - while (!(val & BIT(port))) { - msleep(1); - vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_ARBEMPTY, &val); - if (--maxloop == 0) { - dev_err(vsc->dev, - "timeout waiting for block arbiter\n"); - /* Continue anyway */ - break; - } - } + vsc73xx_reset_port(vsc, port, 0); + + /* Allow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), BIT(port)); + + /* Receive mask (disable forwarding) */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), 0); +} + +static void vsc73xx_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, + int duplex, bool tx_pause, bool rx_pause) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct vsc73xx *vsc = dp->ds->priv; + int port = dp->index; + u32 val; + u8 seed; - /* Put this port into reset */ - vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, - VSC73XX_MAC_CFG_RESET); + if (speed == SPEED_1000) + val = VSC73XX_MAC_CFG_GIGA_MODE | VSC73XX_MAC_CFG_TX_IPG_1000M; + else + val = VSC73XX_MAC_CFG_TX_IPG_100_10M; - /* Accept packets again */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_ARBDISC, BIT(port), 0); + if (phy_interface_mode_is_rgmii(interface)) + val |= VSC73XX_MAC_CFG_CLK_SEL_1000M; + else + val |= VSC73XX_MAC_CFG_CLK_SEL_EXT; - /* Allow backward dropping of frames from this port */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, - VSC73XX_SBACKWDROP, BIT(port), BIT(port)); + if (duplex == DUPLEX_FULL) + val |= VSC73XX_MAC_CFG_FDX; - /* Receive mask (disable forwarding) */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, - VSC73XX_RECVMASK, BIT(port), 0); + /* This routine is described in the datasheet (below ARBDISC register + * description) + */ + vsc73xx_reset_port(vsc, port, val); - return; - } + /* Seed the port randomness with randomness */ + get_random_bytes(&seed, 1); + val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; + val |= VSC73XX_MAC_CFG_SEED_LOAD; + val |= VSC73XX_MAC_CFG_WEXC_DIS; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); - /* Figure out what speed was negotiated */ - if (phydev->speed == SPEED_1000) { - dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n", - port); - - /* Set up default for internal port or external RGMII */ - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) - val = VSC73XX_MAC_CFG_1000M_F_RGMII; - else - val = VSC73XX_MAC_CFG_1000M_F_PHY; - vsc73xx_adjust_enable_port(vsc, port, phydev, val); - } else if (phydev->speed == SPEED_100) { - if (phydev->duplex == DUPLEX_FULL) { - val = VSC73XX_MAC_CFG_100_10M_F_PHY; - dev_dbg(vsc->dev, - "port %d: 100 Mbit full duplex mode\n", - port); - } else { - val = VSC73XX_MAC_CFG_100_10M_H_PHY; - dev_dbg(vsc->dev, - "port %d: 100 Mbit half duplex mode\n", - port); - } - vsc73xx_adjust_enable_port(vsc, port, phydev, val); - } else if (phydev->speed == SPEED_10) { - if (phydev->duplex == DUPLEX_FULL) { - val = VSC73XX_MAC_CFG_100_10M_F_PHY; - dev_dbg(vsc->dev, - "port %d: 10 Mbit full duplex mode\n", - port); - } else { - val = VSC73XX_MAC_CFG_100_10M_H_PHY; - dev_dbg(vsc->dev, - "port %d: 10 Mbit half duplex mode\n", - port); - } - vsc73xx_adjust_enable_port(vsc, port, phydev, val); - } else { - dev_err(vsc->dev, - "could not adjust link: unknown speed\n"); - } + /* Flow control for the PHY facing ports: + * Use a zero delay pause frame when pause condition is left + * Obey pause control frames + * When generating pause frames, use 0xff as pause value + */ + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_FCCONF, + VSC73XX_FCCONF_ZERO_PAUSE_EN | + VSC73XX_FCCONF_FLOW_CTRL_OBEY | + 0xff); - /* Enable port (forwarding) in the receieve mask */ + /* Accept packets again */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_ARBDISC, BIT(port), 0); + + /* Enable port (forwarding) in the receive mask */ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK, BIT(port), BIT(port)); + + /* Disallow backward dropping of frames from this port */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, + VSC73XX_SBACKWDROP, BIT(port), 0); + + /* Enable TX, RX, deassert reset, stop loading seed */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_MAC_CFG, + VSC73XX_MAC_CFG_RESET | VSC73XX_MAC_CFG_SEED_LOAD | + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN, + VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); } static int vsc73xx_port_enable(struct dsa_switch *ds, int port, @@ -1053,12 +1039,17 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port, config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000; } +static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = { + .mac_config = vsc73xx_mac_config, + .mac_link_down = vsc73xx_mac_link_down, + .mac_link_up = vsc73xx_mac_link_up, +}; + static const struct dsa_switch_ops vsc73xx_ds_ops = { .get_tag_protocol = vsc73xx_get_tag_protocol, .setup = vsc73xx_setup, .phy_read = vsc73xx_phy_read, .phy_write = vsc73xx_phy_write, - .adjust_link = vsc73xx_adjust_link, .get_strings = vsc73xx_get_strings, .get_ethtool_stats = vsc73xx_get_ethtool_stats, .get_sset_count = vsc73xx_get_sset_count, @@ -1195,26 +1186,16 @@ int vsc73xx_probe(struct vsc73xx *vsc) vsc->addr[0], vsc->addr[1], vsc->addr[2], vsc->addr[3], vsc->addr[4], vsc->addr[5]); - /* The VSC7395 switch chips have 5+1 ports which means 5 - * ordinary ports and a sixth CPU port facing the processor - * with an RGMII interface. These ports are numbered 0..4 - * and 6, so they leave a "hole" in the port map for port 5, - * which is invalid. - * - * The VSC7398 has 8 ports, port 7 is again the CPU port. - * - * We allocate 8 ports and avoid access to the nonexistant - * ports. - */ vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL); if (!vsc->ds) return -ENOMEM; vsc->ds->dev = dev; - vsc->ds->num_ports = 8; + vsc->ds->num_ports = VSC73XX_MAX_NUM_PORTS; vsc->ds->priv = vsc; vsc->ds->ops = &vsc73xx_ds_ops; + vsc->ds->phylink_mac_ops = &vsc73xx_phylink_mac_ops; ret = dsa_register_switch(vsc->ds); if (ret) { dev_err(dev, "unable to register switch (%d)\n", ret); diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h index 30b1f0a36566..2997f7e108b1 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.h +++ b/drivers/net/dsa/vitesse-vsc73xx.h @@ -3,8 +3,28 @@ #include <linux/etherdevice.h> #include <linux/gpio/driver.h> +/* The VSC7395 switch chips have 5+1 ports which means 5 ordinary ports and + * a sixth CPU port facing the processor with an RGMII interface. These ports + * are numbered 0..4 and 6, so they leave a "hole" in the port map for port 5, + * which is invalid. + * + * The VSC7398 has 8 ports, port 7 is again the CPU port. + * + * We allocate 8 ports and avoid access to the nonexistent ports. + */ +#define VSC73XX_MAX_NUM_PORTS 8 + /** - * struct vsc73xx - VSC73xx state container + * struct vsc73xx - VSC73xx state container: main data structure + * @dev: The device pointer + * @reset: The descriptor for the GPIO line tied to the reset pin + * @ds: Pointer to the DSA core structure + * @gc: Main structure of the GPIO controller + * @chipid: Storage for the Chip ID value read from the CHIPID register of the + * switch + * @addr: MAC address used in flow control frames + * @ops: Structure with hardware-dependent operations + * @priv: Pointer to the configuration interface structure */ struct vsc73xx { struct device *dev; @@ -17,6 +37,11 @@ struct vsc73xx { void *priv; }; +/** + * struct vsc73xx_ops - VSC73xx methods container + * @read: Method for register reading over the hardware-dependent interface + * @write: Method for register writing over the hardware-dependent interface + */ struct vsc73xx_ops { int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg, u32 *val); diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c index 96db032b478f..de3b768f2ff9 100644 --- a/drivers/net/dsa/xrs700x/xrs700x.c +++ b/drivers/net/dsa/xrs700x/xrs700x.c @@ -466,13 +466,25 @@ static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port, } } -static void xrs700x_mac_link_up(struct dsa_switch *ds, int port, - unsigned int mode, phy_interface_t interface, +static void xrs700x_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void xrs700x_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ +} + +static void xrs700x_mac_link_up(struct phylink_config *config, struct phy_device *phydev, + unsigned int mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct xrs700x *priv = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct xrs700x *priv = dp->ds->priv; + int port = dp->index; unsigned int val; switch (speed) { @@ -699,13 +711,18 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port, return 0; } +static const struct phylink_mac_ops xrs700x_phylink_mac_ops = { + .mac_config = xrs700x_mac_config, + .mac_link_down = xrs700x_mac_link_down, + .mac_link_up = xrs700x_mac_link_up, +}; + static const struct dsa_switch_ops xrs700x_ops = { .get_tag_protocol = xrs700x_get_tag_protocol, .setup = xrs700x_setup, .teardown = xrs700x_teardown, .port_stp_state_set = xrs700x_port_stp_state_set, .phylink_get_caps = xrs700x_phylink_get_caps, - .phylink_mac_link_up = xrs700x_mac_link_up, .get_strings = xrs700x_get_strings, .get_sset_count = xrs700x_get_sset_count, .get_ethtool_stats = xrs700x_get_ethtool_stats, @@ -763,6 +780,7 @@ struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv) INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work); ds->ops = &xrs700x_ops; + ds->phylink_mac_ops = &xrs700x_phylink_mac_ops; ds->priv = priv; priv->dev = base; diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index ba3e7aa1a28f..4725a8cfd695 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -31,9 +31,6 @@ Setting to > 1512 effectively disables this feature. */ static int rx_copybreak = 200; -/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ -static const int mtu = 1500; - /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 20; diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 5267e9dcd87e..be58dac0502a 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -502,7 +502,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map) { if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (map->port <= 3) { - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); tc589_set_xcvr(dev, dev->if_port); } else { diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 706bd59bf645..1fbab79e2be4 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -44,7 +44,7 @@ config 3C515 config PCMCIA_3C574 tristate "3Com 3c574 PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT help Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA (PC-card) Fast Ethernet card to your computer. @@ -54,7 +54,7 @@ config PCMCIA_3C574 config PCMCIA_3C589 tristate "3Com 3c589 PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT help Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA (PC-card) Ethernet card to your computer. diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index a4130e643342..345f250781c6 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -19,7 +19,7 @@ if NET_VENDOR_8390 config PCMCIA_AXNET tristate "Asix AX88190 PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT help Say Y here if you intend to attach an Asix AX88190-based PCMCIA (PC-card) Fast Ethernet card to your computer. These cards are @@ -117,7 +117,7 @@ config NE2000 config NE2K_PCI tristate "PCI NE2000 and clones support (see help)" - depends on PCI + depends on PCI && HAS_IOPORT select CRC32 help This driver is for NE2000 compatible PCI cards. It will not work @@ -146,7 +146,7 @@ config APNE config PCMCIA_PCNET tristate "NE2000 compatible PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT select CRC32 help Say Y here if you intend to attach an NE2000 compatible PCMCIA diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index 05d39ecb97ff..e876fe52399a 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -258,7 +258,7 @@ static int etherh_set_config(struct net_device *dev, struct ifmap *map) * media type, turn off automedia detection. */ dev->flags &= ~IFF_AUTOMEDIA; - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); break; default: diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9bd5e991f1e5..780fb4afb6af 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -994,7 +994,7 @@ static int set_config(struct net_device *dev, struct ifmap *map) return -EOPNOTSUPP; else if ((map->port < 1) || (map->port > 2)) return -EINVAL; - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); NS8390_init(dev, 1); } diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 8b4ef5121308..0713f1e2c7f3 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -11,10 +11,10 @@ #include <linux/crc8.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/gpio/consumer.h> #include <linux/if_bridge.h> #include <linux/interrupt.h> #include <linux/iopoll.h> -#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/mii.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 3d9220f9c9fe..b325e0cef120 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3852,7 +3852,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu) et131x_disable_txrx(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); et131x_adapter_memory_free(adapter); diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index eafef84fe3be..3d8ac63132fb 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2539,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu) struct ace_regs __iomem *regs = ap->regs; writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (new_mtu > ACE_STD_MTU) { if (!(ap->jumbo)) { diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 1c8763be0e4b..3c112c18ae6a 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -788,7 +788,7 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu) return -EBUSY; } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); netdev_update_features(dev); return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index fea57eb8e58b..924f03f5a6c7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -47,7 +47,7 @@ /* ENA adaptive interrupt moderation settings */ #define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 -#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20 #define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 #define ENA_HASH_KEY_SIZE 40 @@ -305,6 +305,8 @@ struct ena_com_dev { u16 stats_func; /* Selected function for extended statistic dump */ u16 stats_queue; /* Selected queue for extended statistic dump */ + u32 ena_min_poll_delay_us; + struct ena_com_mmio_read mmio_read; struct ena_rss rss; @@ -325,8 +327,6 @@ struct ena_com_dev { struct ena_intr_moder_entry *intr_moder_tbl; struct ena_com_llq_info llq_info; - - u32 ena_min_poll_delay_us; }; struct ena_com_dev_get_features_ctx { diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 933e619b3a31..4c6e07aa4bbb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base * idx * io_cq->cdesc_entry_size_in_bytes); } -static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, - u16 *first_cdesc_idx) +static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, + u16 *first_cdesc_idx, + u16 *num_descs) { + u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked; struct ena_eth_io_rx_cdesc_base *cdesc; - u16 count = 0, head_masked; u32 last = 0; do { + u32 status; + cdesc = ena_com_get_next_rx_cdesc(io_cq); if (!cdesc) break; + status = READ_ONCE(cdesc->status); ena_com_cq_inc_head(io_cq); + if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) { + struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq); + + netdev_err(dev->net_device, + "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n", + count, io_cq->qid, cdesc->req_id); + return -EFAULT; + } count++; - last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> - ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; + last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); if (last) { *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx; - count += io_cq->cur_rx_pkt_cdesc_count; head_masked = io_cq->head & (io_cq->q_depth - 1); + *num_descs = count; io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; @@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", io_cq->qid, *first_cdesc_idx, count); } else { - io_cq->cur_rx_pkt_cdesc_count += count; - count = 0; + io_cq->cur_rx_pkt_cdesc_count = count; + *num_descs = 0; } - return count; + return 0; } static int ena_com_create_meta(struct ena_com_io_sq *io_sq, @@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, u16 cdesc_idx = 0; u16 nb_hw_desc; u16 i = 0; + int rc; WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc); + if (unlikely(rc != 0)) + return -EFAULT; + if (nb_hw_desc == 0) { ena_rx_ctx->descs = nb_hw_desc; return 0; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 72b019758caa..449bc4960ccc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -47,7 +47,7 @@ struct ena_com_rx_ctx { bool frag; u32 hash; u16 descs; - int max_bufs; + u16 max_bufs; u8 pkt_offset; }; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 0cb6cc1cef56..b24cc3f05248 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -49,6 +49,7 @@ static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(interface_up), ENA_STAT_GLOBAL_ENTRY(interface_down), ENA_STAT_GLOBAL_ENTRY(admin_q_pause), + ENA_STAT_GLOBAL_ENTRY(reset_fail), }; static const struct ena_stats ena_stats_eni_strings[] = { @@ -459,10 +460,18 @@ static void ena_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ena_adapter *adapter = netdev_priv(dev); - - strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strscpy(info->bus_info, pci_name(adapter->pdev), - sizeof(info->bus_info)); + ssize_t ret = 0; + + ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + if (ret < 0) + netif_dbg(adapter, drv, dev, + "module name will be truncated, status = %zd\n", ret); + + ret = strscpy(info->bus_info, pci_name(adapter->pdev), + sizeof(info->bus_info)); + if (ret < 0) + netif_dbg(adapter, drv, dev, + "bus info will be truncated, status = %zd\n", ret); } static void ena_get_ringparam(struct net_device *netdev, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index be5acfa41ee0..184b6e6cbed4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl); static int ena_rss_init_default(struct ena_adapter *adapter); static void check_for_admin_com_state(struct ena_adapter *adapter); -static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); +static int ena_destroy_device(struct ena_adapter *adapter, bool graceful); static int ena_restore_device(struct ena_adapter *adapter); static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -104,7 +104,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu) if (!ret) { netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); update_rx_ring_mtu(adapter, new_mtu); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); } else { netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", new_mtu); @@ -1347,6 +1347,8 @@ error: if (rc == -ENOSPC) { ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); + } else if (rc == -EFAULT) { + ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED); } else { ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); @@ -2701,6 +2703,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd { struct device *dev = &pdev->dev; struct ena_admin_host_info *host_info; + ssize_t ret; int rc; /* Allocate only the host info */ @@ -2715,11 +2718,19 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd host_info->bdf = pci_dev_id(pdev); host_info->os_type = ENA_ADMIN_OS_LINUX; host_info->kernel_ver = LINUX_VERSION_CODE; - strscpy(host_info->kernel_ver_str, utsname()->version, - sizeof(host_info->kernel_ver_str) - 1); + ret = strscpy(host_info->kernel_ver_str, utsname()->version, + sizeof(host_info->kernel_ver_str)); + if (ret < 0) + dev_dbg(dev, + "kernel version string will be truncated, status = %zd\n", ret); + host_info->os_dist = 0; - strscpy(host_info->os_dist_str, utsname()->release, - sizeof(host_info->os_dist_str)); + ret = strscpy(host_info->os_dist_str, utsname()->release, + sizeof(host_info->os_dist_str)); + if (ret < 0) + dev_dbg(dev, + "OS distribution string will be truncated, status = %zd\n", ret); + host_info->driver_version = (DRV_MODULE_GEN_MAJOR) | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | @@ -3235,14 +3246,15 @@ err_disable_msix: return rc; } -static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) +static int ena_destroy_device(struct ena_adapter *adapter, bool graceful) { struct net_device *netdev = adapter->netdev; struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; + int rc = 0; if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) - return; + return 0; netif_carrier_off(netdev); @@ -3260,7 +3272,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) * and device is up, ena_down() already reset the device. */ if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) - ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); + rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); ena_free_mgmnt_irq(adapter); @@ -3279,6 +3291,8 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + return rc; } static int ena_restore_device(struct ena_adapter *adapter) @@ -3355,14 +3369,17 @@ err: static void ena_fw_reset_device(struct work_struct *work) { + int rc = 0; + struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); rtnl_lock(); if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + rc |= ena_destroy_device(adapter, false); + rc |= ena_restore_device(adapter); + adapter->dev_stats.reset_fail += !!rc; dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 6d2cc20210cc..d59509747d1a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -290,6 +290,7 @@ struct ena_stats_dev { u64 admin_q_pause; u64 rx_drops; u64 tx_drops; + u64 reset_fail; }; enum ena_flags_t { diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 2c3d6a77ea79..a2efebafd686 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types { ENA_REGS_RESET_GENERIC = 13, ENA_REGS_RESET_MISS_INTERRUPT = 14, ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15, + ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16, }; /* ena_registers offsets */ diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index f8cc8925161c..b39c6f3e1eda 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -56,7 +56,7 @@ config LANCE config PCNET32 tristate "AMD PCnet32 PCI support" - depends on PCI + depends on PCI && HAS_IOPORT select CRC32 select MII help @@ -122,7 +122,7 @@ config MVME147_NET config PCMCIA_NMCLAN tristate "New Media PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT help Say Y here if you intend to attach a New Media Ethernet or LiveWire PCMCIA (PC-card) Ethernet card to your computer. diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index ea6cfc2095e1..f64f96fa17cf 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1520,9 +1520,9 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) if (!netif_running(dev)) { /* new_mtu will be used - * when device starts netxt time + * when device starts next time */ - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -1531,7 +1531,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) /* stop the chip */ writel(RUN, lp->mmio + CMD0); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); err = amd8111e_restart(dev); spin_unlock_irq(&lp->lock); @@ -1796,7 +1796,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev, lp = netdev_priv(dev); lp->pci_dev = pdev; lp->amd8111e_net_dev = dev; - lp->pm_cap = pdev->pm_cap; spin_lock_init(&lp->lock); diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h index 9d570adb295b..305232f5476d 100644 --- a/drivers/net/ethernet/amd/amd8111e.h +++ b/drivers/net/ethernet/amd/amd8111e.h @@ -764,7 +764,6 @@ struct amd8111e_priv{ u32 ext_phy_id; struct amd8111e_link_config link_config; - int pm_cap; struct net_device *next; int mii; diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 0dd391c84c13..37054a670407 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -760,7 +760,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map) { if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { if (map->port <= 2) { - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); } else return -EINVAL; diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index a3e17a0c187a..14522d6d5f86 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -256,7 +256,8 @@ int pdsc_dl_flash_update(struct devlink *dl, int pdsc_dl_enable_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx); int pdsc_dl_enable_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx); + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack); int pdsc_dl_enable_validate(struct devlink *dl, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack); diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index 54864f27c87a..2681889162a2 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -37,7 +37,8 @@ int pdsc_dl_enable_get(struct devlink *dl, u32 id, } int pdsc_dl_enable_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct pdsc *pdsc = devlink_priv(dl); struct pdsc_viftype *vt_entry; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 6b73648b3779..c4a4e316683f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2070,7 +2070,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu) return ret; pdata->rx_buf_size = ret; - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); xgbe_restart_dev(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 9131020d06af..7912b3b45148 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -538,7 +538,6 @@ static const struct xgbe_version_data xgbe_v1 = { .tx_tstamp_workaround = 1, }; -#ifdef CONFIG_ACPI static const struct acpi_device_id xgbe_acpi_match[] = { { .id = "AMDI8001", .driver_data = (kernel_ulong_t)&xgbe_v1 }, @@ -546,9 +545,7 @@ static const struct acpi_device_id xgbe_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match); -#endif -#ifdef CONFIG_OF static const struct of_device_id xgbe_of_match[] = { { .compatible = "amd,xgbe-seattle-v1a", .data = &xgbe_v1 }, @@ -556,7 +553,6 @@ static const struct of_device_id xgbe_of_match[] = { }; MODULE_DEVICE_TABLE(of, xgbe_of_match); -#endif static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops, xgbe_platform_suspend, xgbe_platform_resume); @@ -564,12 +560,8 @@ static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops, static struct platform_driver xgbe_driver = { .driver = { .name = XGBE_DRV_NAME, -#ifdef CONFIG_ACPI .acpi_match_table = xgbe_acpi_match, -#endif -#ifdef CONFIG_OF .of_match_table = xgbe_of_match, -#endif .pm = &xgbe_platform_pm_ops, }, .probe = xgbe_platform_probe, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 44900026d11b..4af9d89d5f88 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1530,7 +1530,7 @@ static int xgene_change_mtu(struct net_device *ndev, int new_mtu) frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; xgene_enet_close(ndev); - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); pdata->mac_ops->set_framesize(pdata, frame_size); xgene_enet_open(ndev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 0b2a52199914..c1d1673c5749 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -146,7 +146,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) if (err < 0) goto err_exit; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); err_exit: return err; diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 0f2f400b5bc4..a38be924cdaa 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1788,7 +1788,7 @@ static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu) { struct ag71xx *ag = netdev_priv(ndev); - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); ag71xx_wr(ag, AG71XX_REG_MAC_MFL, ag71xx_max_frame_len(ndev->mtu)); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 49bb9a8f00e6..3d28654e5df7 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1176,7 +1176,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) struct alx_priv *alx = netdev_priv(netdev); int max_frame = ALX_MAX_FRAME_LEN(mtu); - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); netdev_update_features(netdev); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 46cdc32b4e31..c571614b1d50 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -561,7 +561,7 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); adapter->hw.max_frame_size = new_mtu; atl1c_set_rxbufsize(adapter, netdev); atl1c_down(adapter); diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 5f2a6fcba967..9b778b34b67e 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -428,7 +428,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) { while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); adapter->hw.max_frame_size = new_mtu; adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; atl1e_down(adapter); diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index a9014d7932db..3afd3627ce48 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2687,7 +2687,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = (max_frame + 7) & ~7; adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) { atl1_down(adapter); atl1_up(adapter); diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bcfc9488125b..fa9a4919f25d 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -905,7 +905,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu) struct atl2_hw *hw = &adapter->hw; /* set MTU */ - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); hw->max_frame_size = new_mtu; ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1be6d14030bc..e5809ad5eb82 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1042,13 +1042,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu) /* We'll just catch it later when the * device is up'd. */ - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } spin_lock_irq(&bp->lock); b44_halt(bp); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); spin_unlock_irq(&bp->lock); diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 3196c4dea076..3c0e3b9828be 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b65b8592ad75..6ec773e61182 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -7912,7 +7912,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu) { struct bnx2 *bp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, false); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c9b6acd8c892..a8e07e51418f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4902,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) * because the actual alloc size is * only updated as part of load */ - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!bnx2x_mtu_allows_gro(new_mtu)) dev->features &= ~NETIF_F_GRO_HW; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2c2ee79c4d77..c437ca1c0fd3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -76,7 +76,7 @@ NETIF_MSG_TX_ERR) MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Broadcom BCM573xx network driver"); +MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) #define BNXT_RX_DMA_OFFSET NET_SKB_PAD @@ -137,6 +137,7 @@ static const struct { [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, + [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -211,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -294,7 +296,7 @@ static bool bnxt_vf_pciid(enum board_idx idx) return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || - idx == NETXTREME_E_P5_VF_HV); + idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -1296,9 +1298,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return RX_AGG_CMP_VALID(agg, *raw_cons); } -static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, - unsigned int len, - dma_addr_t mapping) +static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) { struct bnxt *bp = bnapi->bp; struct pci_dev *pdev = bp->pdev; @@ -1318,6 +1320,39 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, bp->rx_dir); skb_put(skb, len); + + return skb; +} + +static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + return bnxt_copy_data(bnapi, data, len, mapping); +} + +static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping) +{ + unsigned int metasize = 0; + u8 *data = xdp->data; + struct sk_buff *skb; + + len = xdp->data_end - xdp->data_meta; + metasize = xdp->data - xdp->data_meta; + data = xdp->data_meta; + + skb = bnxt_copy_data(bnapi, data, len, mapping); + if (!skb) + return skb; + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + return skb; } @@ -1778,7 +1813,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } } else { @@ -1788,7 +1823,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } @@ -1804,7 +1839,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { skb_free_frag(data); bnxt_abort_tpa(cpr, idx, agg_bufs); - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1815,7 +1850,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } } @@ -2073,7 +2108,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = -EIO; if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { - bnapi->cp_ring.sw_stats.rx.rx_buf_errors++; + bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { netdev_warn_once(bp->dev, "RX buffer error %x\n", @@ -2101,14 +2136,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } if (xdp_active) { - if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { + if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { rc = 1; goto next_rx; } } if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + if (!xdp_active) + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + else + skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) { @@ -2186,7 +2224,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } else { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (dev->features & NETIF_F_RXCSUM) - bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++; + bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; } } @@ -2223,7 +2261,7 @@ next_rx_no_prod_no_len: return rc; oom_next_rx: - cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1; + cpr->sw_stats->rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -2272,7 +2310,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, } rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); if (rc && rc != -EBUSY) - cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1; + cpr->sw_stats->rx.rx_netpoll_discards += 1; return rc; } @@ -2481,6 +2519,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) } return false; } + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: + netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); + break; default: netdev_err(bp->dev, "FW reported unknown error type %u\n", err_type); @@ -3551,14 +3592,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp) } static int bnxt_alloc_rx_page_pool(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr) + struct bnxt_rx_ring_info *rxr, + int numa_node) { struct page_pool_params pp = { 0 }; pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) pp.pool_size += bp->rx_ring_size; - pp.nid = dev_to_node(&bp->pdev->dev); + pp.nid = numa_node; pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; pp.dev = &bp->pdev->dev; @@ -3578,7 +3620,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc = 0, agg_rings = 0; + int numa_node = dev_to_node(&bp->pdev->dev); + int i, rc = 0, agg_rings = 0, cpu; if (!bp->rx_ring) return -ENOMEM; @@ -3589,10 +3632,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + int cpu_node; ring = &rxr->rx_ring_struct; - rc = bnxt_alloc_rx_page_pool(bp, rxr); + cpu = cpumask_local_spread(i, numa_node); + cpu_node = cpu_to_node(cpu); + netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", + i, cpu_node); + rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); if (rc) return rc; @@ -3851,13 +3899,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, static int bnxt_alloc_cp_rings(struct bnxt *bp) { bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); - int i, j, rc, ulp_base_vec, ulp_msix; + int i, j, rc, ulp_msix; int tcs = bp->num_tc; if (!tcs) tcs = 1; ulp_msix = bnxt_get_ulp_msix_num(bp); - ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr, *cpr2; @@ -3876,10 +3923,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) if (rc) return rc; - if (ulp_msix && i >= ulp_base_vec) - ring->map_idx = i + ulp_msix; - else - ring->map_idx = i; + ring->map_idx = ulp_msix + i; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) continue; @@ -3909,6 +3953,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) if (rc) return rc; cpr2->bnapi = bnapi; + cpr2->sw_stats = cpr->sw_stats; cpr2->cp_idx = k; if (!k && rx) { bp->rx_ring[i].rx_cpr = cpr2; @@ -4233,6 +4278,7 @@ static void bnxt_init_vnics(struct bnxt *bp) int j; vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_id = i; for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; @@ -4749,6 +4795,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; bnxt_free_stats_mem(bp, &cpr->stats); + + kfree(cpr->sw_stats); + cpr->sw_stats = NULL; } } @@ -4763,6 +4812,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); + if (!cpr->sw_stats) + return -ENOMEM; + cpr->stats.len = size; rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); if (rc) @@ -5780,8 +5833,22 @@ void bnxt_fill_ipv6_mask(__be32 mask[4]) static void bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, - u16 rxq) + struct bnxt_ntuple_filter *fltr) { + struct bnxt_rss_ctx *rss_ctx, *tmp; + u16 rxq = fltr->base.rxq; + + if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + if (rss_ctx->index == fltr->base.fw_vnic_id) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + break; + } + } + return; + } if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { struct bnxt_vnic_info *vnic; u32 enables; @@ -5822,7 +5889,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req->flags = cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { - bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); } else { vnic = &bp->vnic_info[fltr->base.rxq + 1]; req->dst_id = cpu_to_le16(vnic->fw_vnic_id); @@ -5930,9 +5997,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); } -static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input *req; int rc; @@ -6017,9 +6084,10 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { int entries; + u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6027,16 +6095,22 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), - GFP_KERNEL); - if (!bp->rss_indir_tbl) + tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!tbl) return -ENOMEM; + + if (rss_ctx) + rss_ctx->rss_indir_tbl = tbl; + else + bp->rss_indir_tbl = tbl; + return 0; } -static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { u16 max_rings, max_entries, pad, i; + u16 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6047,13 +6121,17 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) max_rings = bp->rx_nr_rings; max_entries = bnxt_get_rxfh_indir_size(bp->dev); + if (rss_ctx) + rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + else + rss_indir_tbl = &bp->rss_indir_tbl[0]; for (i = 0; i < max_entries; i++) - bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) @@ -6109,6 +6187,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + j = vnic->rss_ctx->rss_indir_tbl[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -6146,9 +6226,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } -static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) +static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, + bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; int rc; @@ -6166,9 +6246,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) return hwrm_req_send(bp, req); } -static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) +static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; @@ -6221,9 +6301,8 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) hwrm_req_drop(bp, req); } -static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) +static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_plcmodes_cfg_input *req; int rc; @@ -6248,7 +6327,8 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) return hwrm_req_send(bp, req); } -static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; @@ -6257,10 +6337,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, return; req->rss_cos_lb_ctx_id = - cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) @@ -6272,13 +6352,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, i, j); + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); } } bp->rsscos_nr_ctxs = 0; } -static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; @@ -6291,7 +6372,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) resp = hwrm_req_hold(bp, req); rc = hwrm_req_send(bp, req); if (!rc) - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = + vnic->fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); hwrm_req_drop(bp, req); @@ -6305,10 +6386,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; } -int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input *req; unsigned int ring = 0, grp_idx; u16 def_vlan = 0; @@ -6356,8 +6436,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if (vnic->flags & BNXT_VNIC_RSS_FLAG) ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) - ring = vnic_id - 1; - else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = vnic->vnic_id - 1; + else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; @@ -6373,25 +6453,25 @@ vnic_mru: #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); - if (!vnic_id && bnxt_ulp_registered(bp->edev)) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_req_send(bp, req); } -static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input *req; if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) return; - req->vnic_id = - cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; + vnic->fw_vnic_id = INVALID_HW_RING_ID; } } @@ -6400,15 +6480,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp) u16 i; for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_free_one(bp, i); + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); } -static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, - unsigned int start_rx_ring_idx, - unsigned int nr_rings) +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) { unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_alloc_output *resp; struct hwrm_vnic_alloc_input *req; int rc; @@ -6434,7 +6513,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; - if (vnic_id == BNXT_VNIC_DEFAULT) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT) req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); resp = hwrm_req_hold(bp, req); @@ -7266,17 +7345,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) int bnxt_nq_rings_in_use(struct bnxt *bp) { - int cp = bp->cp_nr_rings; - int ulp_msix, ulp_base; - - ulp_msix = bnxt_get_ulp_msix_num(bp); - if (ulp_msix) { - ulp_base = bnxt_get_ulp_msix_base(bp); - cp += ulp_msix; - if ((ulp_base + ulp_msix) > cp) - cp = ulp_base + ulp_msix; - } - return cp; + return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); } static int bnxt_cp_rings_in_use(struct bnxt *bp) @@ -7292,16 +7361,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) static int bnxt_get_func_stat_ctxs(struct bnxt *bp) { - int ulp_stat = bnxt_get_ulp_stat_ctxs(bp); - int cp = bp->cp_nr_rings; - - if (!ulp_stat) - return cp; - - if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp)) - return bnxt_get_ulp_msix_base(bp) + ulp_stat; - - return cp + ulp_stat; + return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); } static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) @@ -7333,7 +7393,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { hw_resc->resv_rx_rings = bp->rx_nr_rings; if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL); } } @@ -7341,7 +7401,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_RFS) { if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return 2; + return 2 + bp->num_rss_ctx; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) return rx_rings + 1; } @@ -7409,17 +7469,32 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); } +static int bnxt_get_avail_msix(struct bnxt *bp, int num); + static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_rings hwr = {0}; + int cp = bp->cp_nr_rings; int rx_rings, rc; + int ulp_msix = 0; bool sh = false; int tx_cp; if (!bnxt_need_reserve_rings(bp)) return 0; - hwr.cp = bnxt_nq_rings_in_use(bp); + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + if (!ulp_msix) + bnxt_set_ulp_stat_ctxs(bp, 0); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + hwr.cp = cp + ulp_msix; + } else { + hwr.cp = bnxt_nq_rings_in_use(bp); + } + hwr.tx = bp->tx_nr_rings; hwr.rx = bp->rx_nr_rings; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -7489,7 +7564,20 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return -ENOMEM; if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL); + + if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { + int resv_msix, resv_ctx, ulp_ctxs; + struct bnxt_hw_resc *hw_resc; + + hw_resc = &bp->hw_resc; + resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; + ulp_msix = min_t(int, resv_msix, ulp_msix); + bnxt_set_ulp_msix_num(bp, ulp_msix); + resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; + ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); + bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); + } return rc; } @@ -9668,7 +9756,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) else if (BNXT_NO_FW_ACCESS(bp)) return 0; for (i = 0; i < bp->nr_vnics; i++) { - rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); + rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); if (rc) { netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", i, rc); @@ -9683,7 +9771,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) int i; for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_set_rss(bp, i, false); + bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); } static void bnxt_clear_vnic(struct bnxt *bp) @@ -9761,28 +9849,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) return hwrm_req_send(bp, req); } -static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc; if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) goto skip_rss_ctx; /* allocate context for vnic */ - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++; @@ -9790,26 +9877,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) skip_rss_ctx: /* configure default vnic, ring grp */ - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } /* Enable RSS hashing on vnic */ - rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); + rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } } @@ -9817,16 +9904,33 @@ vnic_setup_err: return rc; } -static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic->vnic_id, rc); + return rc; +} + +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc, i, nr_ctxs; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); if (rc) { netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", - vnic_id, i, rc); + vnic->vnic_id, i, rc); break; } bp->rsscos_nr_ctxs++; @@ -9834,63 +9938,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) if (i < nr_ctxs) return -ENOMEM; - rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic_id, rc); - return rc; - } - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + if (rc) return rc; - } + if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } } return rc; } -static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) +static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - return __bnxt_setup_vnic_p5(bp, vnic_id); + return __bnxt_setup_vnic_p5(bp, vnic); else - return __bnxt_setup_vnic(bp, vnic_id); + return __bnxt_setup_vnic(bp, vnic); } -static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, +static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 start_rx_ring_idx, int rx_rings) { int rc; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); return rc; } - return bnxt_setup_vnic(bp, vnic_id); + return bnxt_setup_vnic(bp, vnic); } static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { + struct bnxt_vnic_info *vnic; int i, rc = 0; - if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, - bp->rx_nr_rings); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); + } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return 0; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i; @@ -9901,12 +9999,104 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) vnic->flags |= BNXT_VNIC_RFS_FLAG; if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; - if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) + if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) break; } return rc; } +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all) +{ + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct bnxt_filter_base *usr_fltr, *tmp; + struct bnxt_ntuple_filter *ntp_fltr; + int i; + + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } + if (!all) + return; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && + usr_fltr->fw_vnic_id == rss_ctx->index) { + ntp_fltr = container_of(usr_fltr, + struct bnxt_ntuple_filter, + base); + bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); + bnxt_del_ntp_filter(bp, ntp_fltr); + bnxt_del_one_usr_fltr(bp, usr_fltr); + } + } + + if (vnic->rss_table) + dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + kfree(rss_ctx->rss_indir_tbl); + list_del(&rss_ctx->list); + bp->num_rss_ctx--; + clear_bit(rss_ctx->index, bp->rss_ctx_bmap); + kfree(rss_ctx); +} + +static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) +{ + bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || + bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || + __bnxt_setup_vnic_p5(bp, vnic)) { + netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", + rss_ctx->index); + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + } + } +} + +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +{ + struct bnxt_rss_ctx *rss_ctx = NULL; + + rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); + if (rss_ctx) { + rss_ctx->vnic.rss_ctx = rss_ctx; + list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); + bp->num_rss_ctx++; + } + return rss_ctx; +} + +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + bnxt_del_one_rss_ctx(bp, rss_ctx, all); + + if (all) + bitmap_free(bp->rss_ctx_bmap); +} + +static void bnxt_init_multi_rss_ctx(struct bnxt *bp) +{ + bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); + if (bp->rss_ctx_bmap) { + /* burn index 0 since we cannot have context 0 */ + __set_bit(0, bp->rss_ctx_bmap); + INIT_LIST_HEAD(&bp->rss_ctx_list); + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + } +} + /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ static bool bnxt_promisc_ok(struct bnxt *bp) { @@ -9919,16 +10109,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp) static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; unsigned int rc = 0; - rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); return rc; } - rc = bnxt_hwrm_vnic_cfg(bp, 1); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); @@ -9971,7 +10162,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rx_nr_rings--; /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -9980,7 +10171,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (BNXT_VF(bp)) bnxt_hwrm_func_qcfg(bp); - rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); + rc = bnxt_setup_vnic(bp, vnic); if (rc) goto err_out; if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) @@ -10295,19 +10486,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); } -int bnxt_get_avail_msix(struct bnxt *bp, int num) +static int bnxt_get_avail_msix(struct bnxt *bp, int num) { - int max_cp = bnxt_get_max_func_cp_rings(bp); int max_irq = bnxt_get_max_func_irqs(bp); int total_req = bp->cp_nr_rings + num; - int max_idx, avail_msix; - - max_idx = bp->total_irqs; - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - max_idx = min_t(int, bp->total_irqs, max_cp); - avail_msix = max_idx - bp->cp_nr_rings; - if (!BNXT_NEW_RM(bp) || avail_msix >= num) - return avail_msix; if (max_irq < total_req) { num = max_irq - bp->cp_nr_rings; @@ -10434,13 +10616,23 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) { bool irq_cleared = false; int tcs = bp->num_tc; + int irqs_required; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - if (irq_re_init && BNXT_NEW_RM(bp) && - bnxt_get_num_msix(bp) != bp->total_irqs) { + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + irqs_required = ulp_msix + bp->cp_nr_rings; + } else { + irqs_required = bnxt_get_num_msix(bp); + } + + if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); irq_cleared = true; @@ -10622,9 +10814,9 @@ static void bnxt_disable_napi(struct bnxt *bp) cpr = &bnapi->cp_ring; if (bnapi->tx_fault) - cpr->sw_stats.tx.tx_resets++; + cpr->sw_stats->tx.tx_resets++; if (bnapi->in_reset) - cpr->sw_stats.rx.rx_resets++; + cpr->sw_stats->rx.rx_resets++; napi_disable(&bnapi->napi); if (bnapi->rx_ring) cancel_work_sync(&cpr->dim.work); @@ -11359,7 +11551,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (fw_reset) { set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) - bnxt_ulp_stop(bp); + bnxt_ulp_irq_stop(bp); bnxt_free_ctx_mem(bp); bnxt_dcb_free(bp); rc = bnxt_fw_init_one(bp); @@ -11666,6 +11858,46 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp) bnxt_cfg_one_usr_fltr(bp, usr_fltr); } +static int bnxt_set_xps_mapping(struct bnxt *bp) +{ + int numa_node = dev_to_node(&bp->pdev->dev); + unsigned int q_idx, map_idx, cpu, i; + const struct cpumask *cpu_mask_ptr; + int nr_cpus = num_online_cpus(); + cpumask_t *q_map; + int rc = 0; + + q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); + if (!q_map) + return -ENOMEM; + + /* Create CPU mask for all TX queues across MQPRIO traffic classes. + * Each TC has the same number of TX queues. The nth TX queue for each + * TC will have the same CPU mask. + */ + for (i = 0; i < nr_cpus; i++) { + map_idx = i % bp->tx_nr_rings_per_tc; + cpu = cpumask_local_spread(i, numa_node); + cpu_mask_ptr = get_cpu_mask(cpu); + cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); + } + + /* Register CPU mask for each TX queue except the ones marked for XDP */ + for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { + map_idx = q_idx % bp->tx_nr_rings_per_tc; + rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); + if (rc) { + netdev_warn(bp->dev, "Error setting XPS for q:%d\n", + q_idx); + break; + } + } + + kfree(q_map); + + return rc; +} + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -11728,8 +11960,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - if (irq_re_init) + if (irq_re_init) { udp_tunnel_nic_reset_ntf(bp->dev); + rc = bnxt_set_xps_mapping(bp); + if (rc) + netdev_warn(bp->dev, "failed to set xps mapping\n"); + } if (bp->tx_nr_rings_xdp < num_possible_cpus()) { if (!static_key_enabled(&bnxt_xdp_locking_key)) @@ -11754,6 +11990,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_hwrm_realloc_rss_ctx_vnic(bp); bnxt_cfg_usr_fltrs(bp); return 0; @@ -11868,10 +12106,9 @@ static int bnxt_open(struct net_device *dev) bnxt_hwrm_if_change(bp, false); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { - if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { - bnxt_ulp_start(bp, 0); - bnxt_reenable_sriov(bp); - } + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_queue_sp_work(bp, + BNXT_RESTART_ULP_SP_EVENT); } } @@ -11902,6 +12139,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, while (bnxt_drv_busy(bp)) msleep(20); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -12101,8 +12340,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); stats->rx_dropped += - cpr->sw_stats.rx.rx_netpoll_discards + - cpr->sw_stats.rx.rx_oom_discards; + cpr->sw_stats->rx.rx_netpoll_discards + + cpr->sw_stats->rx.rx_oom_discards; } } @@ -12169,7 +12408,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp, struct bnxt_total_ring_err_stats *stats, struct bnxt_cp_ring_info *cpr) { - struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; + struct bnxt_sw_stats *sw_stats = cpr->sw_stats; u64 *hw_stats = cpr->stats.sw_stats; stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; @@ -12399,33 +12638,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp) } /* If runtime conditions support RFS */ -static bool bnxt_rfs_capable(struct bnxt *bp) +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) { struct bnxt_hw_rings hwr = {0}; int max_vnics, max_rss_ctxs; - hwr.rss_ctx = 1; - if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { - /* 2 VNICS: default + Ntuple */ - hwr.vnic = 2; - hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * - hwr.vnic; - goto check_reserve_vnic; - } - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) return bnxt_rfs_supported(bp); + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; - hwr.vnic = 1 + bp->rx_nr_rings; -check_reserve_vnic: + hwr.grp = bp->rx_nr_rings; + hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); + if (new_rss_ctx) + hwr.vnic++; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); - if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && - !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) - hwr.rss_ctx = hwr.vnic; - if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { if (bp->rx_nr_rings > 1) netdev_warn(bp->dev, @@ -12459,7 +12691,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); netdev_features_t vlan_features; - if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) features &= ~NETIF_F_NTUPLE; if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) @@ -12851,17 +13083,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent) if (!silent) bnxt_dbg_dump_states(bp); if (netif_running(bp->dev)) { - int rc; - - if (silent) { - bnxt_close_nic(bp, false, false); - bnxt_open_nic(bp, false, false); - } else { - bnxt_ulp_stop(bp); - bnxt_close_nic(bp, true, false); - rc = bnxt_open_nic(bp, true, false); - bnxt_ulp_start(bp, rc); - } + bnxt_close_nic(bp, !silent, false); + bnxt_open_nic(bp, !silent, false); } } @@ -13019,7 +13242,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) rxr->bnapi->in_reset = false; bnxt_alloc_one_rx_ring(bp, i); cpr = &rxr->bnapi->cp_ring; - cpr->sw_stats.rx.rx_resets++; + cpr->sw_stats->rx.rx_resets++; if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); @@ -13041,7 +13264,6 @@ static void bnxt_fw_fatal_close(struct bnxt *bp) static void bnxt_fw_reset_close(struct bnxt *bp) { - bnxt_ulp_stop(bp); /* When firmware is in fatal state, quiesce device and disable * bus master to prevent any potential bad DMAs before freeing * kernel memory. @@ -13122,6 +13344,7 @@ void bnxt_fw_exception(struct bnxt *bp) { netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + bnxt_ulp_stop(bp); bnxt_rtnl_lock_sp(bp); bnxt_force_fw_reset(bp); bnxt_rtnl_unlock_sp(bp); @@ -13153,6 +13376,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp) void bnxt_fw_reset(struct bnxt *bp) { + bnxt_ulp_stop(bp); bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -13231,7 +13455,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp) bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, fw_ring_id, &val[0], &val[1]); - cpr->sw_stats.cmn.missed_irqs++; + cpr->sw_stats->cmn.missed_irqs++; } } } @@ -13277,6 +13501,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp) hwrm_req_send(bp, req); } +static void bnxt_ulp_restart(struct bnxt *bp) +{ + bnxt_ulp_stop(bp); + bnxt_ulp_start(bp, 0); +} + static void bnxt_sp_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, sp_task); @@ -13288,6 +13518,11 @@ static void bnxt_sp_task(struct work_struct *work) return; } + if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { + bnxt_ulp_restart(bp); + bnxt_reenable_sriov(bp); + } + if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_cfg_rx_mode(bp); @@ -13416,8 +13651,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, return -ENOMEM; hwr.stat = hwr.cp; if (BNXT_NEW_RM(bp)) { - hwr.cp += bnxt_get_ulp_msix_num(bp); - hwr.stat += bnxt_get_ulp_stat_ctxs(bp); + hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); + hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); hwr.grp = rx; hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); } @@ -13600,7 +13835,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_RFS; if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { + if (bnxt_rfs_capable(bp, false)) { bp->flags |= BNXT_FLAG_RFS; dev->features |= NETIF_F_NTUPLE; } @@ -13744,10 +13979,8 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) { clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { - bnxt_ulp_start(bp, rc); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) bnxt_dl_health_fw_status_update(bp, false); - } bp->fw_reset_state = 0; dev_close(bp->dev); } @@ -13778,7 +14011,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = 0; netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", n); - return; + goto ulp_start; } bnxt_queue_fw_reset_work(bp, HZ / 10); return; @@ -13788,7 +14021,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); - return; + goto ulp_start; } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { @@ -13881,7 +14114,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); - return; + goto ulp_start; } if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && @@ -13893,10 +14126,6 @@ static void bnxt_fw_reset_task(struct work_struct *work) /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - bnxt_ulp_start(bp, 0); - bnxt_reenable_sriov(bp); - bnxt_vf_reps_alloc(bp); - bnxt_vf_reps_open(bp); bnxt_ptp_reapply_pps(bp); clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { @@ -13904,6 +14133,12 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_status_update(bp, true); } rtnl_unlock(); + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); + rtnl_lock(); + bnxt_vf_reps_alloc(bp); + bnxt_vf_reps_open(bp); + rtnl_unlock(); break; } return; @@ -13919,6 +14154,8 @@ fw_reset_abort: rtnl_lock(); bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); +ulp_start: + bnxt_ulp_start(bp, rc); } static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) @@ -14043,7 +14280,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) bnxt_close_nic(bp, true, false); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); bnxt_set_ring_params(bp); if (netif_running(dev)) @@ -14453,12 +14690,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { u16 mode; - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode == bp->br_mode) break; @@ -14542,7 +14776,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards; + stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; } static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, @@ -14594,12 +14828,17 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_rdma_aux_device_uninit(bp); + bnxt_rdma_aux_device_del(bp); bnxt_ptp_clear(bp); unregister_netdev(dev); + + bnxt_rdma_aux_device_uninit(bp); + bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -14688,8 +14927,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_rx = hw_resc->max_rx_rings; *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - - bnxt_get_ulp_msix_num(bp), - hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); + bnxt_get_ulp_msix_num_in_use(bp), + hw_resc->max_stat_ctxs - + bnxt_get_ulp_stat_ctxs_in_use(bp)); if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) *max_cp = min_t(int, *max_cp, max_irq); max_ring_grps = hw_resc->max_hw_ring_grps; @@ -14785,6 +15025,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) { int dflt_rings, max_rx_rings, max_tx_rings, rc; + int avail_msix; if (!bnxt_can_reserve_rings(bp)) return 0; @@ -14812,6 +15053,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; + if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { + int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); + + bnxt_set_ulp_msix_num(bp, ulp_num_msix); + bnxt_set_dflt_ulp_stat_ctxs(bp); + } + rc = __bnxt_reserve_rings(bp); if (rc && rc != -ENODEV) netdev_warn(bp->dev, "Unable to reserve tx rings\n"); @@ -15058,7 +15307,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp); + rc = bnxt_alloc_rss_indir_tbl(bp, NULL); if (rc) goto init_err_pci_clean; @@ -15161,6 +15410,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); + bnxt_rdma_aux_device_init(bp); rc = bnxt_set_dflt_rings(bp, true); if (rc) { if (BNXT_VF(bp) && rc == -ENODEV) { @@ -15211,13 +15461,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + bnxt_init_multi_rss_ctx(bp); + + rc = register_netdev(dev); if (rc) goto init_err_cleanup; bnxt_dl_fw_reporters_create(bp); - bnxt_rdma_aux_device_init(bp); + bnxt_rdma_aux_device_add(bp); bnxt_print_device_info(bp); @@ -15225,12 +15479,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; init_err_cleanup: + bnxt_rdma_aux_device_uninit(bp); bnxt_dl_unregister(bp); init_err_dl: bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); init_err_pci_clean: + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); @@ -15285,8 +15542,9 @@ static int bnxt_suspend(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rtnl_lock(); bnxt_ulp_stop(bp); + + rtnl_lock(); if (netif_running(dev)) { netif_device_detach(dev); rc = bnxt_close(dev); @@ -15341,10 +15599,10 @@ static int bnxt_resume(struct device *device) } resume_exit: + rtnl_unlock(); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); - rtnl_unlock(); return rc; } @@ -15374,11 +15632,11 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + bnxt_ulp_stop(bp); + rtnl_lock(); netif_device_detach(netdev); - bnxt_ulp_stop(bp); - if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "Firmware reset already in progress\n"); abort = true; @@ -15430,6 +15688,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) netdev_info(bp->dev, "PCI Slot Reset\n"); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) + msleep(900); + rtnl_lock(); if (pci_enable_device(pdev)) { @@ -15510,13 +15772,13 @@ static void bnxt_io_resume(struct pci_dev *pdev) if (!err && netif_running(netdev)) err = bnxt_open(netdev); - bnxt_ulp_start(bp, err); - if (!err) { - bnxt_reenable_sriov(bp); + if (!err) netif_device_attach(netdev); - } rtnl_unlock(); + bnxt_ulp_start(bp, err); + if (!err) + bnxt_reenable_sriov(bp); } static const struct pci_error_handlers bnxt_err_handler = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index dd849e715c9b..656ab81c0272 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1152,7 +1152,7 @@ struct bnxt_cp_ring_info { struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; - struct bnxt_sw_stats sw_stats; + struct bnxt_sw_stats *sw_stats; struct bnxt_ring_struct cp_ring_struct; @@ -1256,8 +1256,22 @@ struct bnxt_vnic_info { #define BNXT_VNIC_UCAST_FLAG 8 #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 +#define BNXT_VNIC_RSSCTX_FLAG 0x40 + struct bnxt_rss_ctx *rss_ctx; + u32 vnic_id; }; +struct bnxt_rss_ctx { + struct list_head list; + struct bnxt_vnic_info vnic; + u16 *rss_indir_tbl; + u8 index; +}; + +#define BNXT_MAX_ETH_RSS_CTX 32 +#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) +#define BNXT_VNIC_ID_INVALID 0xffffffff + struct bnxt_hw_rings { int tx; int rx; @@ -1360,6 +1374,7 @@ struct bnxt_filter_base { #define BNXT_ACT_RING_DST 2 #define BNXT_ACT_FUNC_DST 4 #define BNXT_ACT_NO_AGING 8 +#define BNXT_ACT_RSS_CTX 0x10 u16 sw_id; u16 rxq; u16 fw_vnic_id; @@ -1998,6 +2013,7 @@ enum board_idx { NETXTREME_E_VF_HV, NETXTREME_E_P5_VF, NETXTREME_E_P5_VF_HV, + NETXTREME_E_P7_VF, }; struct bnxt { @@ -2227,6 +2243,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; + struct list_head rss_ctx_list; + unsigned long *rss_ctx_bmap; + u32 num_rss_ctx; int nr_vnics; u16 *rss_indir_tbl; u16 rss_indir_tbl_entries; @@ -2241,6 +2260,7 @@ struct bnxt { #define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5) #define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6) #define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7) +#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8) u8 rss_hash_key[HW_HASH_KEY_SIZE]; u8 rss_hash_key_valid:1; @@ -2284,6 +2304,7 @@ struct bnxt { struct bnxt_irq *irq_tbl; int total_irqs; + int ulp_num_msix_want; u8 mac_addr[ETH_ALEN]; #ifdef CONFIG_BNXT_DCB @@ -2340,6 +2361,10 @@ struct bnxt { #define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) +#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \ + (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ + ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX)) + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; @@ -2416,6 +2441,7 @@ struct bnxt { #define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 #define BNXT_THERMAL_THRESHOLD_SP_EVENT 22 #define BNXT_FW_ECHO_REQUEST_SP_EVENT 23 +#define BNXT_RESTART_ULP_SP_EVENT 24 struct delayed_work fw_reset_task; int fw_reset_state; @@ -2693,9 +2719,16 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); -int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_nq_rings_in_use(struct bnxt *bp); int bnxt_hwrm_set_coal(struct bnxt *); @@ -2705,7 +2738,6 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp); -int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); @@ -2721,6 +2753,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_func_qcaps(struct bnxt *bp); int bnxt_hwrm_fw_set_time(struct bnxt *); +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all); +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); @@ -2728,6 +2766,7 @@ void bnxt_reenable_sriov(struct bnxt *bp); void bnxt_close_nic(struct bnxt *, bool, bool); void bnxt_get_ring_err_stats(struct bnxt *bp, struct bnxt_total_ring_err_stats *stats); +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index ae4529c043f0..4cb0fabf977e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -437,18 +437,20 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + bnxt_ulp_stop(bp); rtnl_lock(); if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, "reload is unsupported while VFs are allocated or being configured"); rtnl_unlock(); + bnxt_ulp_start(bp, 0); return -EOPNOTSUPP; } if (bp->dev->reg_state == NETREG_UNREGISTERED) { rtnl_unlock(); + bnxt_ulp_start(bp, 0); return -ENODEV; } - bnxt_ulp_stop(bp); if (netif_running(bp->dev)) bnxt_close_nic(bp, true, true); bnxt_vf_reps_free(bp); @@ -516,7 +518,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti bnxt_vf_reps_alloc(bp); if (netif_running(bp->dev)) rc = bnxt_open_nic(bp, true, true); - bnxt_ulp_start(bp, rc); if (!rc) { bnxt_reenable_sriov(bp); bnxt_ptp_reapply_pps(bp); @@ -570,6 +571,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti dev_close(bp->dev); } rtnl_unlock(); + if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) + bnxt_ulp_start(bp, rc); return rc; } @@ -1096,7 +1099,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, } static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); struct hwrm_nvm_set_variable_input *req; @@ -1145,7 +1149,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, } static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1d240a27455a..8763f8a01457 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -631,13 +631,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = sw_stats[k]; skip_tpa_ring_stats: - sw = (u64 *)&cpr->sw_stats.rx; + sw = (u64 *)&cpr->sw_stats->rx; if (is_rx_ring(bp, i)) { for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) buf[j] = sw[k]; } - sw = (u64 *)&cpr->sw_stats.cmn; + sw = (u64 *)&cpr->sw_stats->cmn; for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) buf[j] = sw[k]; } @@ -969,6 +969,8 @@ static int bnxt_set_channels(struct net_device *dev, } bnxt_clear_usr_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1205,6 +1207,36 @@ fltr_err: return rc; } +static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, + u32 index) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + if (rss_ctx->index == index) + return rss_ctx; + return NULL; +} + +static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, + struct bnxt_rss_ctx *rss_ctx) +{ + int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, + vnic->rss_table_size, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) + return -ENOMEM; + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + return 0; +} + static int bnxt_add_l2_cls_rule(struct bnxt *bp, struct ethtool_rx_flow_spec *fs) { @@ -1280,22 +1312,24 @@ static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, } static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rxnfc *cmd) { - u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); - u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + struct ethtool_rx_flow_spec *fs = &cmd->fs; struct bnxt_ntuple_filter *new_fltr, *fltr; + u32 flow_type = fs->flow_type & 0xff; struct bnxt_l2_filter *l2_fltr; struct bnxt_flow_masks *fmasks; - u32 flow_type = fs->flow_type; struct flow_keys *fkeys; - u32 idx; + u32 idx, ring; int rc; + u8 vf; if (!bp->vnic_info) return -EAGAIN; - if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) + vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) return -EOPNOTSUPP; if (flow_type == IP_USER_FLOW) { @@ -1403,6 +1437,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, rcu_read_unlock(); new_fltr->base.flags = BNXT_ACT_NO_AGING; + if (fs->flow_type & FLOW_RSS) { + struct bnxt_rss_ctx *rss_ctx; + + new_fltr->base.fw_vnic_id = 0; + new_fltr->base.flags |= BNXT_ACT_RSS_CTX; + rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); + if (rss_ctx) { + new_fltr->base.fw_vnic_id = rss_ctx->index; + } else { + rc = -EINVAL; + goto ntuple_err; + } + } if (fs->ring_cookie == RX_CLS_FLOW_DISC) new_fltr->base.flags |= BNXT_ACT_DROP; else @@ -1444,12 +1491,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) flow_type == IPV6_USER_FLOW) && !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) return -EOPNOTSUPP; - if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) + if (flow_type & FLOW_MAC_EXT) return -EINVAL; flow_type &= ~FLOW_EXT; if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) - return bnxt_add_ntuple_cls_rule(bp, fs); + return bnxt_add_ntuple_cls_rule(bp, cmd); ring = ethtool_get_flow_spec_ring(fs->ring_cookie); vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); @@ -1463,7 +1510,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (flow_type == ETHER_FLOW) rc = bnxt_add_l2_cls_rule(bp, fs); else - rc = bnxt_add_ntuple_cls_rule(bp, fs); + rc = bnxt_add_ntuple_cls_rule(bp, cmd); return rc; } @@ -1754,7 +1801,10 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { + u32 rss_context = rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); + u16 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1764,10 +1814,18 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; - if (rxfh->indir && bp->rss_indir_tbl) { + if (rxfh->rss_context) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); + if (!rss_ctx) + return -EINVAL; + indir_tbl = rss_ctx->rss_indir_tbl; + vnic = &rss_ctx->vnic; + } + + if (rxfh->indir && indir_tbl) { tbl_size = bnxt_get_rxfh_indir_size(dev); for (i = 0; i < tbl_size; i++) - rxfh->indir[i] = bp->rss_indir_tbl[i]; + rxfh->indir[i] = indir_tbl[i]; } if (rxfh->key && vnic->rss_hash_key) @@ -1776,6 +1834,136 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } +static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + struct ethtool_rxfh_param *rxfh) +{ + if (rxfh->key) { + if (rss_ctx) { + memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, + HW_HASH_KEY_SIZE); + } else { + memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + } + if (rxfh->indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + u16 *indir_tbl = bp->rss_indir_tbl; + + if (rss_ctx) + indir_tbl = rss_ctx->rss_indir_tbl; + for (i = 0; i < tbl_size; i++) + indir_tbl[i] = rxfh->indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } +} + +static int bnxt_set_rxfh_context(struct bnxt *bp, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + u32 *rss_context = &rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + bool modify = false; + int bit_id; + int rc; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { + NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); + return -EOPNOTSUPP; + } + + if (!netif_running(bp->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); + return -EAGAIN; + } + + if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); + if (!rss_ctx) { + NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", + *rss_context); + return -EINVAL; + } + if (*rss_context && rxfh->rss_delete) { + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; + } + modify = true; + vnic = &rss_ctx->vnic; + goto modify_context; + } + + if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", + BNXT_MAX_ETH_RSS_CTX); + return -EINVAL; + } + + if (!bnxt_rfs_capable(bp, true)) { + NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); + return -ENOMEM; + } + + rss_ctx = bnxt_alloc_rss_ctx(bp); + if (!rss_ctx) + return -ENOMEM; + + vnic = &rss_ctx->vnic; + vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; + vnic->vnic_id = BNXT_VNIC_ID_INVALID; + rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + if (rc) + goto out; + + rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); + if (rc) + goto out; + + bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); + + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); + goto out; + } + + rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } +modify_context: + bnxt_modify_rss(bp, rss_ctx, rxfh); + + if (modify) + return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + + rc = __bnxt_setup_vnic_p5(bp, vnic); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } + + bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, + BNXT_RSS_CTX_BMAP_LEN, 0); + if (bit_id < 0) { + rc = -ENOMEM; + goto out; + } + rss_ctx->index = (u16)bit_id; + *rss_context = rss_ctx->index; + + return 0; +out: + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return rc; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1786,20 +1974,11 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->key) { - memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); - bp->rss_hash_key_updated = true; - } + if (rxfh->rss_context) + return bnxt_set_rxfh_context(bp, rxfh, extack); - if (rxfh->indir) { - u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + bnxt_modify_rss(bp, NULL, rxfh); - for (i = 0; i < tbl_size; i++) - bp->rss_indir_tbl[i] = rxfh->indir[i]; - pad = bp->rss_indir_tbl_entries - tbl_size; - if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); - } bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); @@ -4641,6 +4820,14 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!bp->num_tests || !BNXT_PF(bp)) return; + + if (etest->flags & ETH_TEST_FL_OFFLINE && + bnxt_ulp_registered(bp->edev)) { + etest->flags |= ETH_TEST_FL_FAILED; + netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); + return; + } + memset(buf, 0, sizeof(u64) * bp->num_tests); if (!netif_running(dev)) { etest->flags |= ETH_TEST_FL_FAILED; @@ -4671,7 +4858,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!offline) { bnxt_run_fw_tests(bp, test_mask, &test_results); } else { - bnxt_ulp_stop(bp); bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); @@ -4682,7 +4868,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (rc) { bnxt_hwrm_mac_loopback(bp, false); etest->flags |= ETH_TEST_FL_FAILED; - bnxt_ulp_start(bp, rc); return; } if (bnxt_run_loopback(bp)) @@ -4709,7 +4894,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_hwrm_phy_loopback(bp, false, false); bnxt_half_close_nic(bp); rc = bnxt_open_nic(bp, true, true); - bnxt_ulp_start(bp, rc); } if (rc || bnxt_test_irq(bp)) { buf[BNXT_IRQ_TEST_IDX] = 1; @@ -5071,6 +5255,7 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, + .cap_rss_ctx_supported = 1, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index e957abd704db..06ea86c80be1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -468,6 +468,10 @@ struct cmd_nums { #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL #define HWRM_TF_IF_TBL_SET 0x2feUL #define HWRM_TF_IF_TBL_GET 0x2ffUL + #define HWRM_TF_RESC_USAGE_SET 0x300UL + #define HWRM_TF_RESC_USAGE_QUERY 0x301UL + #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL + #define HWRM_TF_TBL_TYPE_FREE 0x303UL #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL @@ -495,6 +499,7 @@ struct cmd_nums { #define HWRM_TFC_IF_TBL_SET 0x398UL #define HWRM_TFC_IF_TBL_GET 0x399UL #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL + #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -604,8 +609,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 15 -#define HWRM_VERSION_STR "1.10.3.15" +#define HWRM_VERSION_RSVD 39 +#define HWRM_VERSION_STR "1.10.3.39" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1328,8 +1333,9 @@ struct hwrm_async_event_cmpl_error_report_base { #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL - #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED }; /* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ @@ -1478,6 +1484,30 @@ struct hwrm_async_event_cmpl_error_report_thermal { #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING }; +/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -1781,6 +1811,9 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1791,10 +1824,8 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL __le16 xid_partition_cap; - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL - #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL u8 device_serial_number[8]; __le16 ctxs_per_partition; u8 unused_2[2]; @@ -1844,6 +1875,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL + #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1955,7 +1987,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB - u8 unused_2[2]; + __le16 roce_vnic_id; __le32 partition_min_bw; #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0 @@ -2003,6 +2035,8 @@ struct hwrm_func_qcfg_output { __le32 roce_max_srq_per_vf; __le32 roce_max_gid_per_vf; __le16 xid_partition_cfg; + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL u8 unused_7; u8 valid; }; @@ -2229,10 +2263,8 @@ struct hwrm_func_cfg_input { __le32 roce_max_srq_per_vf; __le32 roce_max_gid_per_vf; __le16 xid_partition_cfg; - #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL - #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL __le16 unused_2; }; @@ -2416,6 +2448,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -3636,19 +3669,22 @@ struct hwrm_func_backing_store_cfg_v2_input { #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; __le32 flags; #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL @@ -3707,17 +3743,22 @@ struct hwrm_func_backing_store_qcfg_v2_input { #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID __le16 instance; @@ -3740,15 +3781,18 @@ struct hwrm_func_backing_store_qcfg_v2_output { #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID __le16 instance; __le32 flags; __le64 page_dir; @@ -3841,19 +3885,22 @@ struct hwrm_func_backing_store_qcaps_v2_input { #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID u8 rsvd[6]; }; @@ -3873,19 +3920,22 @@ struct hwrm_func_backing_store_qcaps_v2_output { #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL - #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID __le16 entry_size; __le32 flags; #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL @@ -3990,6 +4040,7 @@ struct hwrm_func_drv_if_change_output { __le32 flags; #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL u8 unused_0[3]; u8 valid; }; @@ -4472,7 +4523,11 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) - #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP __le16 fec_cfg; #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL @@ -7380,7 +7435,7 @@ struct hwrm_cfa_l2_filter_free_output { u8 valid; }; -/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */ +/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */ struct hwrm_cfa_l2_filter_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -7399,12 +7454,22 @@ struct hwrm_cfa_l2_filter_cfg_input { #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL __le64 l2_filter_id; __le32 dst_id; __le32 new_mirror_vnic_id; + __le32 prof_func; + __le32 l2_context_id; }; /* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ @@ -8466,7 +8531,15 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; u8 unused_0[6]; }; @@ -8514,7 +8587,15 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; __be16 tunnel_dst_port_val; u8 unused_0[4]; @@ -8565,7 +8646,15 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 u8 tunnel_next_proto; __le16 tunnel_dst_port_id; u8 unused_0[4]; @@ -8860,7 +8949,7 @@ struct hwrm_stat_generic_qstats_output { u8 valid; }; -/* generic_sw_hw_stats (size:1408b/176B) */ +/* generic_sw_hw_stats (size:1472b/184B) */ struct generic_sw_hw_stats { __le64 pcie_statistics_tx_tlp; __le64 pcie_statistics_rx_tlp; @@ -8884,6 +8973,7 @@ struct generic_sw_hw_stats { __le64 hw_db_recov_dbs_dropped; __le64 hw_db_recov_drops_serviced; __le64 hw_db_recov_dbs_recovered; + __le64 hw_db_recov_oo_drop_count; }; /* hwrm_fw_reset_input (size:192b/24B) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index cc07660330f5..e661ab154d6b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -109,7 +109,8 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) spin_unlock_bh(&ptp->ptp_lock); } -static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) +static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, + u32 txts_tmo) { struct hwrm_port_ts_query_output *resp; struct hwrm_port_ts_query_input *req; @@ -122,10 +123,15 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + u32 tmo_us = txts_tmo * 1000; + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); - req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); + if (!tmo_us) + tmo_us = BNXT_PTP_QTS_TIMEOUT; + tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US); + req->ts_req_timeout = cpu_to_le16(tmo_us); } resp = hwrm_req_hold(bp, req); @@ -672,10 +678,17 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct skb_shared_hwtstamps timestamp; + unsigned long now = jiffies; u64 ts = 0, ns = 0; + u32 tmo = 0; int rc; - rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts); + if (!ptp->txts_pending) + ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo); + if (!time_after_eq(now, ptp->abs_txts_tmo)) + tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now); + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, + tmo); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); spin_lock_bh(&ptp->ptp_lock); @@ -684,6 +697,10 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(ptp->tx_skb, ×tamp); } else { + if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) { + ptp->txts_pending = true; + return; + } netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } @@ -691,6 +708,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; atomic_inc(&ptp->tx_avail); + ptp->txts_pending = false; } static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) @@ -714,6 +732,8 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) spin_unlock_bh(&ptp->ptp_lock); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } + if (ptp->txts_pending) + return 0; return HZ; } @@ -891,7 +911,8 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) if (rc) return rc; } else { - rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns); + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, + &ns, 0); if (rc) return rc; } @@ -965,6 +986,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) spin_unlock_bh(&ptp->ptp_lock); ptp_schedule_worker(ptp->ptp_clock, 0); } + ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO; return 0; out: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index fce8dc39a7d0..2c3415c8fc03 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -22,7 +22,9 @@ #define BNXT_LO_TIMER_MASK 0x0000ffffffffUL #define BNXT_HI_TIMER_MASK 0xffff00000000UL +#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */ #define BNXT_PTP_QTS_TIMEOUT 1000 +#define BNXT_PTP_QTS_MAX_TMO_US 65535U #define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) @@ -115,11 +117,14 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; + u8 txts_pending:1; int rx_filter; u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; + u32 txts_tmo; + unsigned long abs_txts_tmo; }; #if BITS_PER_LONG == 32 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 273c9ba48f09..d2ca90407cce 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct bnxt_tc_flow *flow) { struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ @@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 195c02dc0683..ba3fa1c2e5d9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids); static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) { struct bnxt_en_dev *edev = bp->edev; - int num_msix, idx, i; + int num_msix, i; if (!edev->ulp_tbl->msix_requested) { netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n"); return; } num_msix = edev->ulp_tbl->msix_requested; - idx = edev->ulp_tbl->msix_base; for (i = 0; i < num_msix; i++) { - ent[i].vector = bp->irq_tbl[idx + i].vector; - ent[i].ring_idx = idx + i; + ent[i].vector = bp->irq_tbl[i].vector; + ent[i].ring_idx = i; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ent[i].db_offset = bp->db_offset; else - ent[i].db_offset = (idx + i) * 0x80; + ent[i].db_offset = i * 0x80; + } +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num) +{ + if (bp->edev) + bp->edev->ulp_num_msix_vec = num; +} + +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx) +{ + if (bp->edev) + bp->edev->ulp_num_ctxs = num_ulp_ctx; +} + +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) { + bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS; + /* Reserve one additional stat_ctx for PF0 (except + * on 1-port NICs) as it also creates one stat_ctx + * for PF1 in case of RoCE bonding. + */ + if (BNXT_PF(bp) && !bp->pf.port_id && + bp->port_count > 1) + bp->edev->ulp_num_ctxs++; } } @@ -57,25 +110,36 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt *bp = netdev_priv(dev); unsigned int max_stat_ctxs; struct bnxt_ulp *ulp; + int rc = 0; + rtnl_lock(); + mutex_lock(&edev->en_dev_lock); + if (!bp->irq_tbl) { + rc = -ENODEV; + goto exit; + } max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || - bp->cp_nr_rings == max_stat_ctxs) - return -ENOMEM; + bp->cp_nr_rings == max_stat_ctxs) { + rc = -ENOMEM; + goto exit; + } ulp = edev->ulp_tbl; - if (!ulp) - return -ENOMEM; - ulp->handle = handle; rcu_assign_pointer(ulp->ulp_ops, ulp_ops); if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_hwrm_vnic_cfg(bp, 0); + bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]); + + edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; - return 0; +exit: + mutex_unlock(&edev->en_dev_lock); + rtnl_unlock(); + return rc; } EXPORT_SYMBOL(bnxt_register_dev); @@ -87,8 +151,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) int i = 0; ulp = edev->ulp_tbl; + rtnl_lock(); + mutex_lock(&edev->en_dev_lock); if (ulp->msix_requested) edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + edev->ulp_tbl->msix_requested = 0; if (ulp->max_async_event_id) bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); @@ -101,11 +168,13 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) msleep(100); i++; } + mutex_unlock(&edev->en_dev_lock); + rtnl_unlock(); return; } EXPORT_SYMBOL(bnxt_unregister_dev); -int bnxt_get_ulp_msix_num(struct bnxt *bp) +static int bnxt_set_dflt_ulp_msix(struct bnxt *bp) { u32 roce_msix = BNXT_VF(bp) ? BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX; @@ -114,29 +183,6 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp) min_t(u32, roce_msix, num_online_cpus()) : 0); } -int bnxt_get_ulp_msix_base(struct bnxt *bp) -{ - if (bnxt_ulp_registered(bp->edev)) { - struct bnxt_en_dev *edev = bp->edev; - - if (edev->ulp_tbl->msix_requested) - return edev->ulp_tbl->msix_base; - } - return 0; -} - -int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) -{ - if (bnxt_ulp_registered(bp->edev)) { - struct bnxt_en_dev *edev = bp->edev; - - if (edev->ulp_tbl->msix_requested) - return BNXT_MIN_ROCE_STAT_CTXS; - } - - return 0; -} - int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg) { @@ -181,6 +227,12 @@ void bnxt_ulp_stop(struct bnxt *bp) if (!edev) return; + mutex_lock(&edev->en_dev_lock); + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; if (aux_priv) { struct auxiliary_device *adev; @@ -195,6 +247,7 @@ void bnxt_ulp_stop(struct bnxt *bp) adrv->suspend(adev, pm); } } + mutex_unlock(&edev->en_dev_lock); } void bnxt_ulp_start(struct bnxt *bp, int err) @@ -210,6 +263,12 @@ void bnxt_ulp_start(struct bnxt *bp, int err) if (err) return; + mutex_lock(&edev->en_dev_lock); + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + if (edev->ulp_tbl->msix_requested) bnxt_fill_msix_vecs(bp, edev->msix_entries); @@ -225,7 +284,7 @@ void bnxt_ulp_start(struct bnxt *bp, int err) adrv->resume(adev); } } - + mutex_unlock(&edev->en_dev_lock); } void bnxt_ulp_irq_stop(struct bnxt *bp) @@ -309,7 +368,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp) aux_priv = bp->aux_priv; adev = &aux_priv->aux_dev; - auxiliary_device_delete(adev); auxiliary_device_uninit(adev); } @@ -327,6 +385,14 @@ static void bnxt_aux_dev_release(struct device *dev) bp->aux_priv = NULL; } +void bnxt_rdma_aux_device_del(struct bnxt *bp) +{ + if (!bp->edev) + return; + + auxiliary_device_delete(&bp->aux_priv->aux_dev); +} + static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) { edev->net = bp->dev; @@ -334,6 +400,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; edev->l2_db_offset = bp->db_offset; + mutex_init(&edev->en_dev_lock); if (bp->flags & BNXT_FLAG_ROCEV1_CAP) edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; @@ -347,7 +414,23 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) edev->pf_port_id = bp->pf.port_id; edev->en_state = bp->state; edev->bar0 = bp->bar0; - edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); +} + +void bnxt_rdma_aux_device_add(struct bnxt *bp) +{ + struct auxiliary_device *aux_dev; + int rc; + + if (!bp->edev) + return; + + aux_dev = &bp->aux_priv->aux_dev; + rc = auxiliary_device_add(aux_dev); + if (rc) { + netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n"); + auxiliary_device_uninit(aux_dev); + bp->flags &= ~BNXT_FLAG_ROCE_CAP; + } } void bnxt_rdma_aux_device_init(struct bnxt *bp) @@ -404,13 +487,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp) edev->ulp_tbl = ulp; bp->edev = edev; bnxt_set_edev_info(edev, bp); - - rc = auxiliary_device_add(aux_dev); - if (rc) { - netdev_warn(bp->dev, - "Failed to add auxiliary device for ROCE\n"); - goto aux_dev_uninit; - } + bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp); return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index b9e73de14b57..4eafe6ec0abf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -46,7 +46,6 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; - u16 msix_base; atomic_t ref_count; }; @@ -86,18 +85,28 @@ struct bnxt_en_dev { * updated in resume. */ void __iomem *bar0; + + u16 ulp_num_msix_vec; + u16 ulp_num_ctxs; + + /* serialize ulp operations */ + struct mutex en_dev_lock; }; static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev) { - if (edev && edev->ulp_tbl) + if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops)) return true; return false; } int bnxt_get_ulp_msix_num(struct bnxt *bp); -int bnxt_get_ulp_msix_base(struct bnxt *bp); +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp); +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num); int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs); +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp); +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp, int err); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); @@ -105,6 +114,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp); void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); void bnxt_rdma_aux_device_uninit(struct bnxt *bp); +void bnxt_rdma_aux_device_del(struct bnxt *bp); +void bnxt_rdma_aux_device_add(struct bnxt *bp); void bnxt_rdma_aux_device_init(struct bnxt *bp); int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops, void *handle); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 4079538bc310..345681d5007e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); - xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); + xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); } void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, @@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, * false - packet should be passed to the stack. */ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, u8 **data_ptr, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event) { struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); @@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, txr = rxr->bnapi->tx_ring[0]; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ - orig_data = xdp.data; + orig_data = xdp->data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); + act = bpf_prog_run_xdp(xdp_prog, xdp); tx_avail = bnxt_tx_avail(bp, txr); /* If the tx ring is not full, we must not update the rx producer yet @@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail != bp->tx_ring_size) *event &= ~BNXT_RX_EVENT; - *len = xdp.data_end - xdp.data; - if (orig_data != xdp.data) { - offset = xdp.data - xdp.data_hard_start; - *data_ptr = xdp.data_hard_start + offset; + *len = xdp->data_end - xdp->data; + if (orig_data != xdp->data) { + offset = xdp->data - xdp->data_hard_start; + *data_ptr = xdp->data_hard_start + offset; } switch (act) { @@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, mapping = rx_buf->mapping - bp->rx_dma_offset; *event &= BNXT_TX_CMP_EVENT; - if (unlikely(xdp_buff_has_frags(&xdp))) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); + if (unlikely(xdp_buff_has_frags(xdp))) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); tx_needed += sinfo->nr_frags; *event = BNXT_AGG_EVENT; @@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail < tx_needed) { trace_xdp_exception(bp->dev, xdp_prog, act); - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } @@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, *event |= BNXT_TX_EVENT; __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, - NEXT_RX(rxr->rx_prod), &xdp); + NEXT_RX(rxr->rx_prod), xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; case XDP_REDIRECT: @@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, /* if we are unable to allocate a new buffer, abort and reuse */ if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { trace_xdp_exception(bp->dev, xdp_prog, act); - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); return true; } - if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { + if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { trace_xdp_exception(bp->dev, xdp_prog, act); page_pool_recycle_direct(rxr->page_pool, page); return true; @@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, trace_xdp_exception(bp->dev, xdp_prog, act); fallthrough; case XDP_DROP: - bnxt_xdp_buff_frags_free(rxr, &xdp); + bnxt_xdp_buff_frags_free(rxr, xdp); bnxt_reuse_rx_data(rxr, cons, page); break; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 5e412c5655ba..0122782400b8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct xdp_buff *xdp); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, - struct xdp_buff xdp, struct page *page, u8 **data_ptr, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp_xmit(struct net_device *dev, int num_frames, diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 3d63177e7e52..c2b4188a1ef1 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -3682,7 +3682,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, #if defined(CONFIG_INET) struct rtable *rt; - rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); + rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0, + RT_SCOPE_UNIVERSE); if (!IS_ERR(rt)) { *dst = &rt->dst; return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b1f84b37032a..c7e7dac057a3 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) controller driver * - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #define pr_fmt(fmt) "bcmgenet: " fmt @@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) { u32 reg; + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (reg & CMD_SW_RESET) + if (reg & CMD_SW_RESET) { + spin_unlock_bh(&priv->reg_lock); return; + } if (enable) reg |= mask; else reg &= ~mask; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); /* UniMAC stops on a packet boundary, wait for a full-size packet * to be processed @@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv) udelay(10); /* issue soft reset and disable MAC while updating its registers */ + spin_lock_bh(&priv->reg_lock); bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); udelay(2); + spin_unlock_bh(&priv->reg_lock); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ + netif_addr_lock_bh(dev); bcmgenet_set_rx_mode(dev); + netif_addr_unlock_bh(dev); bcmgenet_enable_rx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); @@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev) * 3. The number of filters needed exceeds the number filters * supported by the hardware. */ + spin_lock(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || (nfilter > MAX_MDF_FILTER)) { reg |= CMD_PROMISC; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock(&priv->reg_lock); bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); return; } else { reg &= ~CMD_PROMISC; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock(&priv->reg_lock); } /* update MDF filter */ @@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } + spin_lock_init(&priv->reg_lock); spin_lock_init(&priv->lock); /* Set default pause parameters */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 7523b60b3c1c..43b923c48b14 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #ifndef __BCMGENET_H__ @@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule { /* device context */ struct bcmgenet_priv { void __iomem *base; + /* reg_lock: lock to serialize access to shared registers */ + spinlock_t reg_lock; enum bcmgenet_version version; struct net_device *dev; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 7a41cad5788f..1248792d7fd4 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support * - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #define pr_fmt(fmt) "bcmgenet_wol: " fmt @@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, } /* Can't suspend with WoL if MAC is still in reset */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); if (reg & CMD_SW_RESET) reg &= ~CMD_SW_RESET; @@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, /* disable RX */ reg &= ~CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); mdelay(10); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { @@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, } /* Enable CRC forward */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); priv->crc_fwd_en = 1; reg |= CMD_CRC_FWD; @@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, /* Receiver must be enabled for WOL MP detection */ reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); reg = UMAC_IRQ_MPD_R; if (hfb_enable) @@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, } /* Disable CRC Forward */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 9ada89355747..c4a3698cef66 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -2,7 +2,7 @@ /* * Broadcom GENET MDIO routines * - * Copyright (c) 2014-2017 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #include <linux/acpi.h> @@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev) reg |= RGMII_LINK; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | @@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev) reg |= CMD_TX_EN | CMD_RX_EN; } bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); active = phy_init_eee(phydev, 0) >= 0; bcmgenet_eee_enable_set(dev, @@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * block for the interface to work, unconditionally clear the * Out-of-band disable since we do not need it. */ + mutex_lock(&phydev->lock); reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); reg &= ~OOB_DISABLE; if (priv->ext_phy) { @@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) reg |= RGMII_MODE_EN; } bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + mutex_unlock(&phydev->lock); if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 62ff4381ac83..1589a49b876c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4019,7 +4019,7 @@ static int tg3_power_up(struct tg3 *tp) static int tg3_setup_phy(struct tg3 *, bool); -static int tg3_power_down_prepare(struct tg3 *tp) +static void tg3_power_down_prepare(struct tg3 *tp) { u32 misc_host_ctrl; bool device_should_wake, do_low_power; @@ -4263,7 +4263,7 @@ static int tg3_power_down_prepare(struct tg3 *tp) tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); - return 0; + return; } static void tg3_power_down(struct tg3 *tp) @@ -14295,7 +14295,7 @@ static void tg3_set_rx_mode(struct net_device *dev) static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, int new_mtu) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (new_mtu > ETH_DATA_LEN) { if (tg3_flag(tp, 5780_CLASS)) { @@ -18084,7 +18084,6 @@ static int tg3_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct tg3 *tp = netdev_priv(dev); - int err = 0; rtnl_lock(); @@ -18108,32 +18107,11 @@ static int tg3_suspend(struct device *device) tg3_flag_clear(tp, INIT_COMPLETE); tg3_full_unlock(tp); - err = tg3_power_down_prepare(tp); - if (err) { - int err2; - - tg3_full_lock(tp, 0); - - tg3_flag_set(tp, INIT_COMPLETE); - err2 = tg3_restart_hw(tp, true); - if (err2) - goto out; - - tg3_timer_start(tp); - - netif_device_attach(dev); - tg3_netif_start(tp); - -out: - tg3_full_unlock(tp); - - if (!err2) - tg3_phy_start(tp); - } + tg3_power_down_prepare(tp); unlock: rtnl_unlock(); - return err; + return 0; } static int tg3_resume(struct device *device) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index c32174484a96..fe121d36112d 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3276,7 +3276,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu) mutex_lock(&bnad->conf_mutex); mtu = netdev->mtu; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); frame = BNAD_FRAME_SIZE(mtu); new_frame = BNAD_FRAME_SIZE(new_mtu); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 7246e13dd559..97291bfbeea5 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 898debfd4db3..241ce9a2fa99 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3022,7 +3022,7 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu) if (netif_running(dev)) return -EBUSY; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 5e97f1e4e38e..a71b320fd030 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -1358,7 +1358,7 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu) /* Bring interface down, change mtu and bring interface back up */ xgmac_stop(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return xgmac_open(dev); } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index f38d31bfab1b..674c54831875 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -1262,7 +1262,7 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); lio->mtu = new_mtu; WRITE_ONCE(sc->caller_is_done, true); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index aa6c0dfb6f1c..96c6ea12279f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -218,7 +218,7 @@ lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu) return -EIO; } - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 007d4b06819e..744f2434f7fa 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -649,7 +649,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) struct octeon_mgmt *p = netdev_priv(netdev); int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); /* HW lifts the limit if the frame is VLAN tagged * (+4 bytes per each tag, up to two tags) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index eff350e0bc2a..aebb9fef3f6e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1589,7 +1589,7 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (!netif_running(netdev)) return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index d2286adf09fe..7d7d3e0098df 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -844,7 +844,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu) return -EOPNOTSUPP; if ((ret = mac->ops->set_mtu(mac, new_mtu))) return ret; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 2236f1d35f2b..f92a3550e480 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2559,7 +2559,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) return ret; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); init_port_mtus(adapter); if (adapter->params.rev == 0 && offload_running(adapter)) t3_load_mtus(adapter, adapter->params.mtus, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 2eb33a727bba..2418645c8823 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3180,7 +3180,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); if (!ret) - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 72ac4a34424b..69d045d769c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -305,7 +305,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.iport = ~0; } -static int cxgb4_validate_flow_match(struct net_device *dev, +static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack, struct flow_rule *rule) { struct flow_dissector *dissector = rule->match.dissector; @@ -321,11 +321,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | BIT_ULL(FLOW_DISSECTOR_KEY_IP))) { - netdev_warn(dev, "Unsupported key used: 0x%llx\n", - dissector->used_keys); + NL_SET_ERR_MSG_FMT_MOD(extack, + "Unsupported key used: 0x%llx", + dissector->used_keys); return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -339,13 +343,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev, struct flow_match_ip match; if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { - netdev_err(dev, "IP Key supported only with IPv4/v6"); + NL_SET_ERR_MSG_MOD(extack, + "IP Key supported only with IPv4/v6"); return -EINVAL; } flow_rule_match_ip(rule, &match); if (match.mask->ttl) { - netdev_warn(dev, "ttl match unsupported for offload"); + NL_SET_ERR_MSG_MOD(extack, + "ttl match unsupported for offload"); return -EOPNOTSUPP; } } @@ -576,7 +582,7 @@ static bool valid_l4_mask(u32 mask) return hi && lo ? false : true; } -static bool valid_pedit_action(struct net_device *dev, +static bool valid_pedit_action(struct netlink_ext_ack *extack, const struct flow_action_entry *act, u8 *natmode_flags) { @@ -595,8 +601,7 @@ static bool valid_pedit_action(struct net_device *dev, case PEDIT_ETH_SMAC_47_16: break; default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field"); return false; } break; @@ -609,8 +614,7 @@ static bool valid_pedit_action(struct net_device *dev, *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field"); return false; } break; @@ -629,8 +633,7 @@ static bool valid_pedit_action(struct net_device *dev, *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; break; default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field"); return false; } break; @@ -638,8 +641,8 @@ static bool valid_pedit_action(struct net_device *dev, switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, + "Unsupported mask for TCP L4 ports"); return false; } if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -648,8 +651,7 @@ static bool valid_pedit_action(struct net_device *dev, *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field"); return false; } break; @@ -657,8 +659,8 @@ static bool valid_pedit_action(struct net_device *dev, switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, + "Unsupported mask for UDP L4 ports"); return false; } if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -667,13 +669,12 @@ static bool valid_pedit_action(struct net_device *dev, *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; break; default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field"); return false; } break; default: - netdev_err(dev, "%s: Unsupported pedit type\n", __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit type"); return false; } return true; @@ -727,8 +728,7 @@ int cxgb4_validate_flow_actions(struct net_device *dev, * the provided output port is not valid */ if (!found) { - netdev_err(dev, "%s: Out port invalid\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, "Out port invalid"); return -EINVAL; } act_redir = true; @@ -745,21 +745,21 @@ int cxgb4_validate_flow_actions(struct net_device *dev, case FLOW_ACTION_VLAN_PUSH: case FLOW_ACTION_VLAN_MANGLE: if (proto != ETH_P_8021Q) { - netdev_err(dev, "%s: Unsupported vlan proto\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, + "Unsupported vlan proto"); return -EOPNOTSUPP; } break; default: - netdev_err(dev, "%s: Unsupported vlan action\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, + "Unsupported vlan action"); return -EOPNOTSUPP; } act_vlan = true; } break; case FLOW_ACTION_MANGLE: { - bool pedit_valid = valid_pedit_action(dev, act, + bool pedit_valid = valid_pedit_action(extack, act, &natmode_flags); if (!pedit_valid) @@ -771,14 +771,14 @@ int cxgb4_validate_flow_actions(struct net_device *dev, /* Do nothing. cxgb4_set_filter will validate */ break; default: - netdev_err(dev, "%s: Unsupported action\n", __func__); + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); return -EOPNOTSUPP; } } if ((act_pedit || act_vlan) && !act_redir) { - netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", - __func__); + NL_SET_ERR_MSG_MOD(extack, + "pedit/vlan rewrite invalid without egress redirect"); return -EINVAL; } @@ -864,7 +864,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0)) return -EOPNOTSUPP; - if (cxgb4_validate_flow_match(dev, rule)) + if (cxgb4_validate_flow_match(extack, rule)) return -EOPNOTSUPP; cxgb4_process_flow_match(dev, rule, fs); @@ -901,8 +901,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, init_completion(&ctx.completion); ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); if (ret) { - netdev_err(dev, "%s: filter creation err %d\n", - __func__, ret); + NL_SET_ERR_MSG_FMT_MOD(extack, "filter creation err %d", ret); return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 17faac715882..5c13bcb4550d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) for (i = 0; i < nq; i++) { struct sge_uld_txq *txq = &txq_info->uldtxq[i]; - if (txq && txq->q.desc) { + if (txq->q.desc) { tasklet_kill(&txq->qresume_tsk); t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 49d5808b7d11..de52bcb884c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) lb->loopback = 1; q = &adap->sge.ethtxq[pi->first_qset]; - __netif_tx_lock(q->txq, smp_processor_id()); + __netif_tx_lock_bh(q->txq); reclaim_completed_tx(adap, &q->q, -1, true); credits = txq_avail(&q->q) - ndesc; if (unlikely(credits < 0)) { - __netif_tx_unlock(q->txq); + __netif_tx_unlock_bh(q->txq); return -ENOMEM; } @@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) init_completion(&lb->completion); txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); - __netif_tx_unlock(q->txq); + __netif_tx_unlock_bh(q->txq); /* wait for the pkt to return */ ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 9ba0864592e8..2fbe0f059a0b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1169,7 +1169,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu) ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return ret; } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 6482728794dd..e8e460a92e0e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -10,6 +10,7 @@ #include <net/ipv6.h> #include <linux/netdevice.h> #include <crypto/aes.h> +#include <linux/skbuff_ref.h> #include "chcr_ktls.h" static LIST_HEAD(uld_ctx_list); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d266a87297a5..f604119efc80 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2027,7 +2027,7 @@ static int _enic_change_mtu(struct net_device *netdev, int new_mtu) return err; } - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (running) { err = enic_open(netdev); diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 12a83fa1302d..9f6089e81608 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res); static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { - /* The base address of the desc rings must be 512 byte aligned. - * Descriptor count is aligned to groups of 32 descriptors. A - * count of 0 means the maximum 4096 descriptors. Descriptor - * size is aligned to 16 bytes. - */ - - unsigned int count_align = 32; - unsigned int desc_align = 16; - ring->base_align = 512; + /* Descriptor ring base address alignment in bytes*/ + ring->base_align = VNIC_DESC_BASE_ALIGN; + /* A count of 0 means the maximum descriptors */ if (desc_count == 0) - desc_count = 4096; + desc_count = VNIC_DESC_MAX_COUNT; - ring->desc_count = ALIGN(desc_count, count_align); + /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */ + ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN); - ring->desc_size = ALIGN(desc_size, desc_align); + /* Descriptor size alignment in bytes */ + ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN); ring->size = ring->desc_count * ring->desc_size; ring->size_unaligned = ring->size + ring->base_align; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 6273794b923b..7fdd8c661c99 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg) #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define VNIC_DESC_SIZE_ALIGN 16 +#define VNIC_DESC_COUNT_ALIGN 32 +#define VNIC_DESC_BASE_ALIGN 512 +#define VNIC_DESC_MAX_COUNT 4096 + enum vnic_dev_intr_mode { VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_INTX, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 705c3eb19cd3..5f0c9e1771db 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev, { struct gemini_ethernet_port *port = netdev_priv(netdev); struct gemini_ethernet *geth = port->geth; + unsigned long flags; u32 val, mask; netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id); + spin_lock_irqsave(&geth->irq_lock, flags); + mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq); if (en) @@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev, val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); val = en ? val | mask : val & ~mask; writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG); + + spin_unlock_irqrestore(&geth->irq_lock, flags); } static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num) @@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget) union gmac_rxdesc_3 word3; struct page *page = NULL; unsigned int page_offs; + unsigned long flags; unsigned short r, w; union dma_rwptr rw; dma_addr_t mapping; int frag_nr = 0; + spin_lock_irqsave(&geth->irq_lock, flags); rw.bits32 = readl(ptr_reg); /* Reset interrupt as all packages until here are taken into account */ writel(DEFAULT_Q0_INT_BIT << netdev->dev_id, geth->base + GLOBAL_INTERRUPT_STATUS_1_REG); + spin_unlock_irqrestore(&geth->irq_lock, flags); + r = rw.bits.rptr; w = rw.bits.wptr; @@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data) gmac_update_hw_stats(netdev); if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) { + spin_lock(&geth->irq_lock); writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8), geth->base + GLOBAL_INTERRUPT_STATUS_4_REG); - - spin_lock(&geth->irq_lock); u64_stats_update_begin(&port->ir_stats_syncp); ++port->stats.rx_fifo_errors; u64_stats_update_end(&port->ir_stats_syncp); @@ -1978,7 +1986,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu) gmac_disable_tx_rx(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT, CONFIG0_MAXLEN_MASK); diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index aaf0eda96292..8af5ecec7d61 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -708,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu) { if (netif_running(dev)) return -EBUSY; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index ad862ed7888a..a8596ebcdfd6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4982,10 +4982,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { mode = nla_get_u16(attr); if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 003bc9a45c65..1047c805054e 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1092,7 +1092,7 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu) } iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR); - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index dcbc598b11c6..baa0b3c2ce6f 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2995,7 +2995,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) return -EINVAL; - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 888509cf1f21..6866807973da 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2698,7 +2698,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) return err; out: - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, static int update_xps(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; - struct cpumask xps_mask; - struct dpaa2_eth_fq *fq; int i, num_queues, netdev_queues; + struct dpaa2_eth_fq *fq; + cpumask_var_t xps_mask; int err = 0; + if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL)) + return -ENOMEM; + num_queues = dpaa2_eth_queue_count(priv); netdev_queues = (net_dev->num_tc ? : 1) * num_queues; @@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv) for (i = 0; i < netdev_queues; i++) { fq = &priv->fq[i % num_queues]; - cpumask_clear(&xps_mask); - cpumask_set_cpu(fq->target_cpu, &xps_mask); + cpumask_clear(xps_mask); + cpumask_set_cpu(fq->target_cpu, xps_mask); - err = netif_set_xps_queue(net_dev, &xps_mask, i); + err = netif_set_xps_queue(net_dev, xps_mask, i); if (err) { netdev_warn_once(net_dev, "Error setting XPS queue\n"); break; } } + free_cpumask_var(xps_mask); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index b6a534a3e0b1..701a87370737 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, acl_h = &acl_key->match; acl_m = &acl_key->mask; + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; @@ -548,6 +551,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index f3543a2df68d..a71f848adc05 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) return err; } - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); return 0; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 9f07f4947b63..5c45f42232d3 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, if (priv->min_num_stack_tx_queues + num_xdp_tx_queues > priv->num_tx_rings) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)", + "Reserving %d XDP TXQs leaves under %d for stack (total %d)", num_xdp_tx_queues, priv->min_num_stack_tx_queues, priv->num_tx_rings); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8bd213da8fb6..a72d8a2eb0b3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3674,29 +3674,6 @@ fec_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -4003,9 +3980,6 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_eth_ioctl = phy_do_ioctl_running, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif .ndo_set_features = fec_set_features, .ndo_bpf = fec_enet_bpf, .ndo_xdp_xmit = fec_enet_xdp_xmit, diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 758535adc9ff..92b8f4ab26f1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -267,7 +267,6 @@ struct memac_cfg { bool reset_on_init; bool pause_ignore; bool promiscuous_mode_enable; - struct fixed_phy_status *fixed_link; u16 max_frame_length; u16 pause_quanta; u32 tx_ipg_length; diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c index f557d68e5b76..1ed245a2ee01 100644 --- a/drivers/net/ethernet/freescale/fman/fman_muram.c +++ b/drivers/net/ethernet/freescale/fman/fman_muram.c @@ -12,7 +12,6 @@ struct muram_info { struct gen_pool *pool; void __iomem *vbase; - size_t size; phys_addr_t pbase; }; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index a811238c018d..2baef59f741d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2026,7 +2026,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) if (dev->flags & IFF_UP) stop_gfar(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (dev->flags & IFF_UP) startup_gfar(dev); diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig index 0a1400cb410a..06a28bce5d27 100644 --- a/drivers/net/ethernet/fujitsu/Kconfig +++ b/drivers/net/ethernet/fujitsu/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_FUJITSU config PCMCIA_FMVJ18X tristate "Fujitsu FMV-J18x PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT select CRC32 help Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c index df86770731ad..ac86179a0a81 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_main.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c @@ -927,7 +927,7 @@ static int fun_change_mtu(struct net_device *netdev, int new_mtu) rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu); if (!rc) - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return rc; } diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 4814c96d5fe7..ae1e21c9b0a5 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -50,6 +50,10 @@ /* PTYPEs are always 10 bits. */ #define GVE_NUM_PTYPES 1024 +/* Default minimum ring size */ +#define GVE_DEFAULT_MIN_TX_RING_SIZE 256 +#define GVE_DEFAULT_MIN_RX_RING_SIZE 512 + #define GVE_DEFAULT_RX_BUFFER_SIZE 2048 #define GVE_MAX_RX_BUFFER_SIZE 4096 @@ -63,7 +67,6 @@ #define GVE_DEFAULT_HEADER_BUFFER_SIZE 128 #define DQO_QPL_DEFAULT_TX_PAGES 512 -#define DQO_QPL_DEFAULT_RX_PAGES 2048 /* Maximum TSO size supported on DQO */ #define GVE_DQO_TX_MAX 0x3FFFF @@ -607,6 +610,7 @@ struct gve_notify_block { struct gve_priv *priv; struct gve_tx_ring *tx; /* tx rings on this block */ struct gve_rx_ring *rx; /* rx rings on this block */ + u32 irq; }; /* Tracks allowed and current queue settings */ @@ -621,11 +625,6 @@ struct gve_qpl_config { unsigned long *qpl_id_map; /* bitmap of used qpl ids */ }; -struct gve_options_dqo_rda { - u16 tx_comp_ring_entries; /* number of tx_comp descriptors */ - u16 rx_buff_ring_entries; /* number of rx_buff descriptors */ -}; - struct gve_irq_db { __be32 index; } ____cacheline_aligned; @@ -639,28 +638,10 @@ struct gve_ptype_lut { struct gve_ptype ptypes[GVE_NUM_PTYPES]; }; -/* Parameters for allocating queue page lists */ -struct gve_qpls_alloc_cfg { - struct gve_qpl_config *qpl_cfg; - struct gve_queue_config *tx_cfg; - struct gve_queue_config *rx_cfg; - - u16 num_xdp_queues; - bool raw_addressing; - bool is_gqi; - - /* Allocated resources are returned here */ - struct gve_queue_page_list *qpls; -}; - /* Parameters for allocating resources for tx queues */ struct gve_tx_alloc_rings_cfg { struct gve_queue_config *qcfg; - /* qpls and qpl_cfg must already be allocated */ - struct gve_queue_page_list *qpls; - struct gve_qpl_config *qpl_cfg; - u16 ring_size; u16 start_idx; u16 num_rings; @@ -676,10 +657,6 @@ struct gve_rx_alloc_rings_cfg { struct gve_queue_config *qcfg; struct gve_queue_config *qcfg_tx; - /* qpls and qpl_cfg must already be allocated */ - struct gve_queue_page_list *qpls; - struct gve_qpl_config *qpl_cfg; - u16 ring_size; u16 packet_buffer_size; bool raw_addressing; @@ -705,7 +682,6 @@ struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */ - struct gve_queue_page_list *qpls; /* array of num qpls */ struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */ struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */ dma_addr_t irq_db_indices_bus; @@ -718,9 +694,13 @@ struct gve_priv { u16 num_event_counters; u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ + u16 max_tx_desc_cnt; + u16 max_rx_desc_cnt; + u16 min_tx_desc_cnt; + u16 min_rx_desc_cnt; + bool modify_ring_size_enabled; + bool default_min_ring_size; u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */ - u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */ - u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ struct bpf_prog *xdp_prog; /* XDP BPF program */ @@ -730,7 +710,6 @@ struct gve_priv { u16 num_xdp_queues; struct gve_queue_config tx_cfg; struct gve_queue_config rx_cfg; - struct gve_qpl_config qpl_cfg; /* map used QPL ids */ u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ @@ -792,7 +771,6 @@ struct gve_priv { u64 link_speed; bool up_before_suspend; /* True if dev was up before suspend */ - struct gve_options_dqo_rda options_dqo_rda; struct gve_ptype_lut *ptype_lut_dqo; /* Must be a power of two. */ @@ -1027,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) return priv->tx_cfg.max_queues + rx_qid; } -/* Returns the index into priv->qpls where a certain rx queue's QPL resides */ static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) { return tx_cfg->max_queues + rx_qid; @@ -1038,41 +1015,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) return gve_tx_qpl_id(priv, 0); } -/* Returns the index into priv->qpls where the first rx queue's QPL resides */ static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) { return gve_get_rx_qpl_id(tx_cfg, 0); } -/* Returns a pointer to the next available tx qpl in the list of qpls */ -static inline -struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg, - int tx_qid) -{ - /* QPL already in use */ - if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map)) - return NULL; - set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map); - return &cfg->qpls[tx_qid]; -} - -/* Returns a pointer to the next available rx qpl in the list of qpls */ -static inline -struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg, - int rx_qid) +static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) { - int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid); - /* QPL already in use */ - if (test_bit(id, cfg->qpl_cfg->qpl_id_map)) - return NULL; - set_bit(id, cfg->qpl_cfg->qpl_id_map); - return &cfg->qpls[id]; -} - -/* Unassigns the qpl with the given id */ -static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id) -{ - clear_bit(id, qpl_cfg->qpl_id_map); + /* For DQO, page count should be more than ring size for + * out-of-order completions. Set it to two times of ring size. + */ + return 2 * rx_desc_cnt; } /* Returns the correct dma direction for tx and rx qpls */ @@ -1115,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev, enum dma_data_direction, gfp_t gfp_flags); void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, enum dma_data_direction); +/* qpls */ +struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, + u32 id, int pages); +void gve_free_queue_page_list(struct gve_priv *priv, + struct gve_queue_page_list *qpl, + u32 id); /* tx handling */ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, @@ -1137,6 +1096,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); int gve_rx_poll(struct gve_notify_block *block, int budget); bool gve_rx_work_pending(struct gve_rx_ring *rx); +int gve_rx_alloc_ring_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx); +void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg); int gve_rx_alloc_rings(struct gve_priv *priv); int gve_rx_alloc_rings_gqi(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); @@ -1150,6 +1115,12 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); int gve_reset(struct gve_priv *priv, bool attempt_teardown); +void gve_get_curr_alloc_cfgs(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); +int gve_adjust_config(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index ae12ac38e18b..8ca0def176ef 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; } +#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8 + static void gve_parse_device_option(struct gve_priv *priv, struct gve_device_descriptor *device_descriptor, @@ -41,7 +43,8 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, - struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_modify_ring **dev_op_modify_ring) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); u16 option_length = be16_to_cpu(option->option_length); @@ -165,6 +168,27 @@ void gve_parse_device_option(struct gve_priv *priv, "Buffer Sizes"); *dev_op_buffer_sizes = (void *)(option + 1); break; + case GVE_DEV_OPT_ID_MODIFY_RING: + if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Modify Ring", (int)sizeof(**dev_op_modify_ring), + GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_modify_ring)) { + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring"); + } + + *dev_op_modify_ring = (void *)(option + 1); + + /* device has not provided min ring size */ + if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) + priv->default_min_ring_size = true; + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -183,7 +207,8 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_dqo_rda **dev_op_dqo_rda, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, - struct gve_device_option_buffer_sizes **dev_op_buffer_sizes) + struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_modify_ring **dev_op_modify_ring) { const int num_options = be16_to_cpu(descriptor->num_device_options); struct gve_device_option *dev_opt; @@ -204,7 +229,8 @@ gve_process_device_options(struct gve_priv *priv, gve_parse_device_option(priv, descriptor, dev_opt, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, - dev_op_dqo_qpl, dev_op_buffer_sizes); + dev_op_dqo_qpl, dev_op_buffer_sizes, + dev_op_modify_ring); dev_opt = next_opt; } @@ -565,6 +591,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cpu_to_be64(tx->q_resources_bus), .tx_ring_addr = cpu_to_be64(tx->bus), .ntfy_id = cpu_to_be32(tx->ntfy_id), + .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt), }; if (gve_is_gqi(priv)) { @@ -573,24 +600,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); } else { - u16 comp_ring_size; u32 qpl_id = 0; - if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + if (priv->queue_format == GVE_DQO_RDA_FORMAT) qpl_id = GVE_RAW_ADDRESSING_QPL_ID; - comp_ring_size = - priv->options_dqo_rda.tx_comp_ring_entries; - } else { + else qpl_id = tx->dqo.qpl->id; - comp_ring_size = priv->tx_desc_cnt; - } cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd.create_tx_queue.tx_ring_size = - cpu_to_be16(priv->tx_desc_cnt); cmd.create_tx_queue.tx_comp_ring_addr = cpu_to_be64(tx->complq_bus_dqo); cmd.create_tx_queue.tx_comp_ring_size = - cpu_to_be16(comp_ring_size); + cpu_to_be16(priv->tx_desc_cnt); } return gve_adminq_issue_cmd(priv, &cmd); @@ -610,63 +630,73 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que return gve_adminq_kick_and_wait(priv); } -static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) +static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, + union gve_adminq_command *cmd, + u32 queue_index) { struct gve_rx_ring *rx = &priv->rx[queue_index]; - union gve_adminq_command cmd; - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); - cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { + memset(cmd, 0, sizeof(*cmd)); + cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); + cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) { .queue_id = cpu_to_be32(queue_index), .ntfy_id = cpu_to_be32(rx->ntfy_id), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), + .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), }; if (gve_is_gqi(priv)) { u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; - cmd.create_rx_queue.rx_desc_ring_addr = - cpu_to_be64(rx->desc.bus), - cmd.create_rx_queue.rx_data_ring_addr = - cpu_to_be64(rx->data.data_bus), - cmd.create_rx_queue.index = cpu_to_be32(queue_index); - cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); + cmd->create_rx_queue.rx_desc_ring_addr = + cpu_to_be64(rx->desc.bus); + cmd->create_rx_queue.rx_data_ring_addr = + cpu_to_be64(rx->data.data_bus); + cmd->create_rx_queue.index = cpu_to_be32(queue_index); + cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { - u16 rx_buff_ring_entries; u32 qpl_id = 0; - if (priv->queue_format == GVE_DQO_RDA_FORMAT) { + if (priv->queue_format == GVE_DQO_RDA_FORMAT) qpl_id = GVE_RAW_ADDRESSING_QPL_ID; - rx_buff_ring_entries = - priv->options_dqo_rda.rx_buff_ring_entries; - } else { + else qpl_id = rx->dqo.qpl->id; - rx_buff_ring_entries = priv->rx_desc_cnt; - } - cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd.create_rx_queue.rx_ring_size = - cpu_to_be16(priv->rx_desc_cnt); - cmd.create_rx_queue.rx_desc_ring_addr = + cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); + cmd->create_rx_queue.rx_desc_ring_addr = cpu_to_be64(rx->dqo.complq.bus); - cmd.create_rx_queue.rx_data_ring_addr = + cmd->create_rx_queue.rx_data_ring_addr = cpu_to_be64(rx->dqo.bufq.bus); - cmd.create_rx_queue.packet_buffer_size = + cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(priv->data_buffer_size_dqo); - cmd.create_rx_queue.rx_buff_ring_size = - cpu_to_be16(rx_buff_ring_entries); - cmd.create_rx_queue.enable_rsc = + cmd->create_rx_queue.rx_buff_ring_size = + cpu_to_be16(priv->rx_desc_cnt); + cmd->create_rx_queue.enable_rsc = !!(priv->dev->features & NETIF_F_LRO); if (priv->header_split_enabled) - cmd.create_rx_queue.header_buffer_size = + cmd->create_rx_queue.header_buffer_size = cpu_to_be16(priv->header_buf_size); } +} + +static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) +{ + union gve_adminq_command cmd; + gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); return gve_adminq_issue_cmd(priv, &cmd); } +/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */ +int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index) +{ + union gve_adminq_command cmd; + + gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); + return gve_adminq_execute_cmd(priv, &cmd); +} + int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) { int err; @@ -713,22 +743,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu return gve_adminq_kick_and_wait(priv); } +static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd, + u32 queue_index) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); + cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { + .queue_id = cpu_to_be32(queue_index), + }; +} + static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index) { union gve_adminq_command cmd; - int err; - memset(&cmd, 0, sizeof(cmd)); - cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); - cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { - .queue_id = cpu_to_be32(queue_index), - }; + gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); + return gve_adminq_issue_cmd(priv, &cmd); +} - err = gve_adminq_issue_cmd(priv, &cmd); - if (err) - return err; +/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */ +int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index) +{ + union gve_adminq_command cmd; - return 0; + gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); + return gve_adminq_execute_cmd(priv, &cmd); } int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) @@ -745,31 +784,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) return gve_adminq_kick_and_wait(priv); } -static int gve_set_desc_cnt(struct gve_priv *priv, - struct gve_device_descriptor *descriptor) -{ - priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); - priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); - return 0; -} - -static int -gve_set_desc_cnt_dqo(struct gve_priv *priv, - const struct gve_device_descriptor *descriptor, - const struct gve_device_option_dqo_rda *dev_op_dqo_rda) +static void gve_set_default_desc_cnt(struct gve_priv *priv, + const struct gve_device_descriptor *descriptor) { priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); - if (priv->queue_format == GVE_DQO_QPL_FORMAT) - return 0; - - priv->options_dqo_rda.tx_comp_ring_entries = - be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries); - priv->options_dqo_rda.rx_buff_ring_entries = - be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries); - - return 0; + /* set default ranges */ + priv->max_tx_desc_cnt = priv->tx_desc_cnt; + priv->max_rx_desc_cnt = priv->rx_desc_cnt; + priv->min_tx_desc_cnt = priv->tx_desc_cnt; + priv->min_rx_desc_cnt = priv->rx_desc_cnt; } static void gve_enable_supported_features(struct gve_priv *priv, @@ -779,7 +804,9 @@ static void gve_enable_supported_features(struct gve_priv *priv, const struct gve_device_option_dqo_qpl *dev_op_dqo_qpl, const struct gve_device_option_buffer_sizes - *dev_op_buffer_sizes) + *dev_op_buffer_sizes, + const struct gve_device_option_modify_ring + *dev_op_modify_ring) { /* Before control reaches this point, the page-size-capped max MTU from * the gve_device_descriptor field has already been stored in @@ -796,12 +823,8 @@ static void gve_enable_supported_features(struct gve_priv *priv, if (dev_op_dqo_qpl) { priv->tx_pages_per_qpl = be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); - priv->rx_pages_per_qpl = - be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl); if (priv->tx_pages_per_qpl == 0) priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; - if (priv->rx_pages_per_qpl == 0) - priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES; } if (dev_op_buffer_sizes && @@ -814,12 +837,33 @@ static void gve_enable_supported_features(struct gve_priv *priv, "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", priv->max_rx_buffer_size, priv->header_buf_size); } + + /* Read and store ring size ranges given by device */ + if (dev_op_modify_ring && + (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { + priv->modify_ring_size_enabled = true; + + /* max ring size for DQO QPL should not be overwritten because of device limit */ + if (priv->queue_format != GVE_DQO_QPL_FORMAT) { + priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); + priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); + } + if (priv->default_min_ring_size) { + /* If device hasn't provided minimums, use default minimums */ + priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; + priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE; + } else { + priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size); + priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); + } + } } int gve_adminq_describe_device(struct gve_priv *priv) { struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; + struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; @@ -851,9 +895,9 @@ int gve_adminq_describe_device(struct gve_priv *priv) err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, &dev_op_gqi_qpl, &dev_op_dqo_rda, - &dev_op_jumbo_frames, - &dev_op_dqo_qpl, - &dev_op_buffer_sizes); + &dev_op_jumbo_frames, &dev_op_dqo_qpl, + &dev_op_buffer_sizes, + &dev_op_modify_ring); if (err) goto free_device_descriptor; @@ -888,15 +932,13 @@ int gve_adminq_describe_device(struct gve_priv *priv) dev_info(&priv->pdev->dev, "Driver is running with GQI QPL queue format.\n"); } - if (gve_is_gqi(priv)) { - err = gve_set_desc_cnt(priv, descriptor); - } else { - /* DQO supports LRO. */ + + /* set default descriptor counts */ + gve_set_default_desc_cnt(priv, descriptor); + + /* DQO supports LRO. */ + if (!gve_is_gqi(priv)) priv->dev->hw_features |= NETIF_F_LRO; - err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda); - } - if (err) - goto free_device_descriptor; priv->max_registered_pages = be64_to_cpu(descriptor->max_registered_pages); @@ -912,18 +954,11 @@ int gve_adminq_describe_device(struct gve_priv *priv) mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); - priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl); - - if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) { - dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", - priv->rx_data_slot_cnt); - priv->rx_desc_cnt = priv->rx_data_slot_cnt; - } priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_dqo_qpl, - dev_op_buffer_sizes); + dev_op_buffer_sizes, dev_op_modify_ring); free_device_descriptor: dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 5ac972e45ff8..e64f0dbe744d 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4); struct gve_device_option_dqo_rda { __be32 supported_features_mask; - __be16 tx_comp_ring_entries; - __be16 rx_buff_ring_entries; + __be32 reserved; }; static_assert(sizeof(struct gve_device_option_dqo_rda) == 8); @@ -134,6 +133,16 @@ struct gve_device_option_buffer_sizes { static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8); +struct gve_device_option_modify_ring { + __be32 supported_featured_mask; + __be16 max_rx_ring_size; + __be16 max_tx_ring_size; + __be16 min_rx_ring_size; + __be16 min_tx_ring_size; +}; + +static_assert(sizeof(struct gve_device_option_modify_ring) == 12); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -143,28 +152,31 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8); * the device for read/write and data is copied from/to SKBs. */ enum gve_dev_opt_id { - GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1, - GVE_DEV_OPT_ID_GQI_RDA = 0x2, - GVE_DEV_OPT_ID_GQI_QPL = 0x3, - GVE_DEV_OPT_ID_DQO_RDA = 0x4, - GVE_DEV_OPT_ID_DQO_QPL = 0x7, - GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, - GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, + GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1, + GVE_DEV_OPT_ID_GQI_RDA = 0x2, + GVE_DEV_OPT_ID_GQI_QPL = 0x3, + GVE_DEV_OPT_ID_DQO_RDA = 0x4, + GVE_DEV_OPT_ID_MODIFY_RING = 0x6, + GVE_DEV_OPT_ID_DQO_QPL = 0x7, + GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, + GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, }; enum gve_dev_opt_req_feat_mask { - GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, - GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, }; enum gve_sup_feature_mask { - GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, - GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, + GVE_SUP_MODIFY_RING_MASK = 1 << 0, + GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, + GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -439,7 +451,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, int gve_adminq_deconfigure_device_resources(struct gve_priv *priv); int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues); +int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index); int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues); +int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index); int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id); int gve_adminq_register_page_list(struct gve_priv *priv, struct gve_queue_page_list *qpl); diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h index b81584829c40..e83773fb891f 100644 --- a/drivers/net/ethernet/google/gve/gve_dqo.h +++ b/drivers/net/ethernet/google/gve/gve_dqo.h @@ -44,6 +44,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg); void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx); void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx); +int gve_rx_alloc_ring_dqo(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx); +void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg); int gve_rx_alloc_rings_dqo(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg); void gve_rx_free_rings_dqo(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 9aebfb843d9d..fe1741d482b4 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -8,6 +8,7 @@ #include "gve.h" #include "gve_adminq.h" #include "gve_dqo.h" +#include "gve_utils.h" static void gve_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) @@ -73,7 +74,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", - "adminq_report_stats_cnt", "adminq_report_link_speed_cnt" + "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt" }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -89,42 +90,34 @@ static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct gve_priv *priv = netdev_priv(netdev); - char *s = (char *)data; + u8 *s = (char *)data; int num_tx_queues; int i, j; num_tx_queues = gve_num_tx_queues(priv); switch (stringset) { case ETH_SS_STATS: - memcpy(s, *gve_gstrings_main_stats, - sizeof(gve_gstrings_main_stats)); - s += sizeof(gve_gstrings_main_stats); - - for (i = 0; i < priv->rx_cfg.num_queues; i++) { - for (j = 0; j < NUM_GVE_RX_CNTS; j++) { - snprintf(s, ETH_GSTRING_LEN, - gve_gstrings_rx_stats[j], i); - s += ETH_GSTRING_LEN; - } - } + for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++) + ethtool_puts(&s, gve_gstrings_main_stats[i]); - for (i = 0; i < num_tx_queues; i++) { - for (j = 0; j < NUM_GVE_TX_CNTS; j++) { - snprintf(s, ETH_GSTRING_LEN, - gve_gstrings_tx_stats[j], i); - s += ETH_GSTRING_LEN; - } - } + for (i = 0; i < priv->rx_cfg.num_queues; i++) + for (j = 0; j < NUM_GVE_RX_CNTS; j++) + ethtool_sprintf(&s, gve_gstrings_rx_stats[j], + i); + + for (i = 0; i < num_tx_queues; i++) + for (j = 0; j < NUM_GVE_TX_CNTS; j++) + ethtool_sprintf(&s, gve_gstrings_tx_stats[j], + i); + + for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++) + ethtool_puts(&s, gve_gstrings_adminq_stats[i]); - memcpy(s, *gve_gstrings_adminq_stats, - sizeof(gve_gstrings_adminq_stats)); - s += sizeof(gve_gstrings_adminq_stats); break; case ETH_SS_PRIV_FLAGS: - memcpy(s, *gve_gstrings_priv_flags, - sizeof(gve_gstrings_priv_flags)); - s += sizeof(gve_gstrings_priv_flags); + for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++) + ethtool_puts(&s, gve_gstrings_priv_flags[i]); break; default: @@ -165,6 +158,8 @@ gve_get_ethtool_stats(struct net_device *netdev, struct stats *report_stats; int *rx_qid_to_stats_idx; int *tx_qid_to_stats_idx; + int num_stopped_rxqs = 0; + int num_stopped_txqs = 0; struct gve_priv *priv; bool skip_nic_stats; unsigned int start; @@ -181,12 +176,23 @@ gve_get_ethtool_stats(struct net_device *netdev, sizeof(int), GFP_KERNEL); if (!rx_qid_to_stats_idx) return; + for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { + rx_qid_to_stats_idx[ring] = -1; + if (!gve_rx_was_added_to_block(priv, ring)) + num_stopped_rxqs++; + } tx_qid_to_stats_idx = kmalloc_array(num_tx_queues, sizeof(int), GFP_KERNEL); if (!tx_qid_to_stats_idx) { kfree(rx_qid_to_stats_idx); return; } + for (ring = 0; ring < num_tx_queues; ring++) { + tx_qid_to_stats_idx[ring] = -1; + if (!gve_tx_was_added_to_block(priv, ring)) + num_stopped_txqs++; + } + for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, @@ -260,7 +266,13 @@ gve_get_ethtool_stats(struct net_device *netdev, /* For rx cross-reporting stats, start from nic rx stats in report */ base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; - max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + + /* The boundary between driver stats and NIC stats shifts if there are + * stopped queues. + */ + base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + + NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; + max_stats_idx = NIC_RX_STATS_REPORT_NUM * + (priv->rx_cfg.num_queues - num_stopped_rxqs) + base_stats_idx; /* Preprocess the stats report for rx, map queue id to start index */ skip_nic_stats = false; @@ -274,6 +286,10 @@ gve_get_ethtool_stats(struct net_device *netdev, skip_nic_stats = true; break; } + if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) { + net_err_ratelimited("Invalid rxq id in NIC stats\n"); + continue; + } rx_qid_to_stats_idx[queue_id] = stats_idx; } /* walk RX rings */ @@ -308,11 +324,11 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = rx->rx_copybreak_pkt; data[i++] = rx->rx_copied_pkt; /* stats from NIC */ - if (skip_nic_stats) { + stats_idx = rx_qid_to_stats_idx[ring]; + if (skip_nic_stats || stats_idx < 0) { /* skip NIC rx stats */ i += NIC_RX_STATS_REPORT_NUM; } else { - stats_idx = rx_qid_to_stats_idx[ring]; for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) { u64 value = be64_to_cpu(report_stats[stats_idx + j].value); @@ -338,7 +354,8 @@ gve_get_ethtool_stats(struct net_device *netdev, /* For tx cross-reporting stats, start from nic tx stats in report */ base_stats_idx = max_stats_idx; - max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues + + max_stats_idx = NIC_TX_STATS_REPORT_NUM * + (num_tx_queues - num_stopped_txqs) + max_stats_idx; /* Preprocess the stats report for tx, map queue id to start index */ skip_nic_stats = false; @@ -352,6 +369,10 @@ gve_get_ethtool_stats(struct net_device *netdev, skip_nic_stats = true; break; } + if (queue_id < 0 || queue_id >= num_tx_queues) { + net_err_ratelimited("Invalid txq id in NIC stats\n"); + continue; + } tx_qid_to_stats_idx[queue_id] = stats_idx; } /* walk TX rings */ @@ -383,11 +404,11 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = gve_tx_load_event_counter(priv, tx); data[i++] = tx->dma_mapping_error; /* stats from NIC */ - if (skip_nic_stats) { + stats_idx = tx_qid_to_stats_idx[ring]; + if (skip_nic_stats || stats_idx < 0) { /* skip NIC tx stats */ i += NIC_TX_STATS_REPORT_NUM; } else { - stats_idx = tx_qid_to_stats_idx[ring]; for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) { u64 value = be64_to_cpu(report_stats[stats_idx + j].value); @@ -428,6 +449,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_set_driver_parameter_cnt; data[i++] = priv->adminq_report_stats_cnt; data[i++] = priv->adminq_report_link_speed_cnt; + data[i++] = priv->adminq_get_ptype_map_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -489,8 +511,8 @@ static void gve_get_ringparam(struct net_device *netdev, { struct gve_priv *priv = netdev_priv(netdev); - cmd->rx_max_pending = priv->rx_desc_cnt; - cmd->tx_max_pending = priv->tx_desc_cnt; + cmd->rx_max_pending = priv->max_rx_desc_cnt; + cmd->tx_max_pending = priv->max_tx_desc_cnt; cmd->rx_pending = priv->rx_desc_cnt; cmd->tx_pending = priv->tx_desc_cnt; @@ -502,20 +524,81 @@ static void gve_get_ringparam(struct net_device *netdev, kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; } +static int gve_adjust_ring_sizes(struct gve_priv *priv, + u16 new_tx_desc_cnt, + u16 new_rx_desc_cnt) +{ + struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; + struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; + int err; + + /* get current queue configuration */ + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + + /* copy over the new ring_size from ethtool */ + tx_alloc_cfg.ring_size = new_tx_desc_cnt; + rx_alloc_cfg.ring_size = new_rx_desc_cnt; + + if (netif_running(priv->dev)) { + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); + if (err) + return err; + } + + /* Set new ring_size for the next up */ + priv->tx_desc_cnt = new_tx_desc_cnt; + priv->rx_desc_cnt = new_rx_desc_cnt; + + return 0; +} + +static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt, + u16 new_rx_desc_cnt) +{ + /* check for valid range */ + if (new_tx_desc_cnt < priv->min_tx_desc_cnt || + new_tx_desc_cnt > priv->max_tx_desc_cnt || + new_rx_desc_cnt < priv->min_rx_desc_cnt || + new_rx_desc_cnt > priv->max_rx_desc_cnt) { + dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n"); + return -EINVAL; + } + + if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) { + dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n"); + return -EINVAL; + } + return 0; +} + static int gve_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *cmd, struct kernel_ethtool_ringparam *kernel_cmd, struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(netdev); + u16 new_tx_cnt, new_rx_cnt; + int err; - if (priv->tx_desc_cnt != cmd->tx_pending || - priv->rx_desc_cnt != cmd->rx_pending) { - dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n"); + err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); + if (err) + return err; + + if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt) + return 0; + + if (!priv->modify_ring_size_enabled) { + dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n"); return -EOPNOTSUPP; } - return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split); + new_tx_cnt = cmd->tx_pending; + new_rx_cnt = cmd->rx_pending; + + if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt)) + return -EINVAL; + + return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt); } static int gve_user_reset(struct net_device *netdev, u32 *flags) @@ -710,5 +793,6 @@ const struct ethtool_ops gve_ethtool_ops = { .set_tunable = gve_set_tunable, .get_priv_flags = gve_get_priv_flags, .set_priv_flags = gve_set_priv_flags, - .get_link_ksettings = gve_get_link_ksettings + .get_link_ksettings = gve_get_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, }; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 166bd827a6d7..cabf7d4bcecb 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -9,6 +9,7 @@ #include <linux/etherdevice.h> #include <linux/filter.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> @@ -16,6 +17,7 @@ #include <linux/workqueue.h> #include <linux/utsname.h> #include <linux/version.h> +#include <net/netdev_queues.h> #include <net/sch_generic.h> #include <net/xdp_sock_drv.h> #include "gve.h" @@ -253,6 +255,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg) return IRQ_HANDLED; } +static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq) +{ + int cpu_curr = smp_processor_id(); + const struct cpumask *aff_mask; + + aff_mask = irq_get_effective_affinity_mask(irq); + if (unlikely(!aff_mask)) + return 1; + + return cpumask_test_cpu(cpu_curr, aff_mask); +} + int gve_napi_poll(struct napi_struct *napi, int budget) { struct gve_notify_block *block; @@ -322,8 +336,21 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget) reschedule |= work_done == budget; } - if (reschedule) - return budget; + if (reschedule) { + /* Reschedule by returning budget only if already on the correct + * cpu. + */ + if (likely(gve_is_napi_on_home_cpu(priv, block->irq))) + return budget; + + /* If not on the cpu with which this queue's irq has affinity + * with, we avoid rescheduling napi and arm the irq instead so + * that napi gets rescheduled back eventually onto the right + * cpu. + */ + if (work_done == budget) + work_done--; + } if (likely(napi_complete_done(napi, work_done))) { /* Enable interrupts again. @@ -428,6 +455,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) "Failed to receive msix vector %d\n", i); goto abort_with_some_ntfy_blocks; } + block->irq = priv->msix_vectors[msix_idx].vector; irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, get_cpu_mask(i % active_cpus)); block->irq_db_index = &priv->irq_db_indices[i].index; @@ -441,6 +469,7 @@ abort_with_some_ntfy_blocks: irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, NULL); free_irq(priv->msix_vectors[msix_idx].vector, block); + block->irq = 0; } kvfree(priv->ntfy_blocks); priv->ntfy_blocks = NULL; @@ -474,6 +503,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv) irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, NULL); free_irq(priv->msix_vectors[msix_idx].vector, block); + block->irq = 0; } free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); kvfree(priv->ntfy_blocks); @@ -582,37 +612,36 @@ static void gve_teardown_device_resources(struct gve_priv *priv) gve_clear_device_resources_ok(priv); } -static int gve_unregister_qpl(struct gve_priv *priv, u32 i) +static int gve_unregister_qpl(struct gve_priv *priv, + struct gve_queue_page_list *qpl) { int err; - err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); + if (!qpl) + return 0; + + err = gve_adminq_unregister_page_list(priv, qpl->id); if (err) { netif_err(priv, drv, priv->dev, "Failed to unregister queue page list %d\n", - priv->qpls[i].id); + qpl->id); return err; } - priv->num_registered_pages -= priv->qpls[i].num_entries; + priv->num_registered_pages -= qpl->num_entries; return 0; } -static int gve_register_qpl(struct gve_priv *priv, u32 i) +static int gve_register_qpl(struct gve_priv *priv, + struct gve_queue_page_list *qpl) { - int num_rx_qpls; int pages; int err; - /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */ - num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); - if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) { - netif_err(priv, drv, priv->dev, - "Cannot register nonexisting QPL at index %d\n", i); - return -EINVAL; - } + if (!qpl) + return 0; - pages = priv->qpls[i].num_entries; + pages = qpl->num_entries; if (pages + priv->num_registered_pages > priv->max_registered_pages) { netif_err(priv, drv, priv->dev, @@ -622,14 +651,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i) return -EINVAL; } - err = gve_adminq_register_page_list(priv, &priv->qpls[i]); + err = gve_adminq_register_page_list(priv, qpl); if (err) { netif_err(priv, drv, priv->dev, "failed to register queue page list %d\n", - priv->qpls[i].id); - /* This failure will trigger a reset - no need to clean - * up - */ + qpl->id); return err; } @@ -637,6 +663,26 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i) return 0; } +static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx) +{ + struct gve_tx_ring *tx = &priv->tx[idx]; + + if (gve_is_gqi(priv)) + return tx->tx_fifo.qpl; + else + return tx->dqo.qpl; +} + +static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + + if (gve_is_gqi(priv)) + return rx->data.qpl; + else + return rx->dqo.qpl; +} + static int gve_register_xdp_qpls(struct gve_priv *priv) { int start_id; @@ -645,7 +691,7 @@ static int gve_register_xdp_qpls(struct gve_priv *priv) start_id = gve_xdp_tx_start_queue_id(priv); for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_register_qpl(priv, i); + err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); /* This failure will trigger a reset - no need to clean up */ if (err) return err; @@ -656,7 +702,6 @@ static int gve_register_xdp_qpls(struct gve_priv *priv) static int gve_register_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; - int start_id; int err; int i; @@ -665,15 +710,13 @@ static int gve_register_qpls(struct gve_priv *priv) num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { - err = gve_register_qpl(priv, i); + err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); if (err) return err; } - /* there might be a gap between the tx and rx qpl ids */ - start_id = gve_rx_start_qpl_id(&priv->tx_cfg); for (i = 0; i < num_rx_qpls; i++) { - err = gve_register_qpl(priv, start_id + i); + err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i)); if (err) return err; } @@ -689,7 +732,7 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv) start_id = gve_xdp_tx_start_queue_id(priv); for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_unregister_qpl(priv, i); + err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); /* This failure will trigger a reset - no need to clean */ if (err) return err; @@ -700,7 +743,6 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv) static int gve_unregister_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; - int start_id; int err; int i; @@ -709,15 +751,14 @@ static int gve_unregister_qpls(struct gve_priv *priv) num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { - err = gve_unregister_qpl(priv, i); + err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); /* This failure will trigger a reset - no need to clean */ if (err) return err; } - start_id = gve_rx_start_qpl_id(&priv->tx_cfg); for (i = 0; i < num_rx_qpls; i++) { - err = gve_unregister_qpl(priv, start_id + i); + err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i)); /* This failure will trigger a reset - no need to clean */ if (err) return err; @@ -828,8 +869,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, { cfg->qcfg = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); - cfg->qpls = priv->qpls; - cfg->qpl_cfg = &priv->qpl_cfg; cfg->ring_size = priv->tx_desc_cnt; cfg->start_idx = 0; cfg->num_rings = gve_num_tx_queues(priv); @@ -886,9 +925,9 @@ static int gve_alloc_xdp_rings(struct gve_priv *priv) return 0; } -static int gve_alloc_rings(struct gve_priv *priv, - struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, - struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +static int gve_queues_mem_alloc(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { int err; @@ -974,9 +1013,9 @@ static void gve_free_xdp_rings(struct gve_priv *priv) } } -static void gve_free_rings(struct gve_priv *priv, - struct gve_tx_alloc_rings_cfg *tx_cfg, - struct gve_rx_alloc_rings_cfg *rx_cfg) +static void gve_queues_mem_free(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_cfg, + struct gve_rx_alloc_rings_cfg *rx_cfg) { if (gve_is_gqi(priv)) { gve_tx_free_rings_gqi(priv, tx_cfg); @@ -1005,35 +1044,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev, return 0; } -static int gve_alloc_queue_page_list(struct gve_priv *priv, - struct gve_queue_page_list *qpl, - u32 id, int pages) +struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv, + u32 id, int pages) { + struct gve_queue_page_list *qpl; int err; int i; + qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL); + if (!qpl) + return NULL; + qpl->id = id; qpl->num_entries = 0; qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); - /* caller handles clean up */ if (!qpl->pages) - return -ENOMEM; + goto abort; + qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); - /* caller handles clean up */ if (!qpl->page_buses) - return -ENOMEM; + goto abort; for (i = 0; i < pages; i++) { err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], &qpl->page_buses[i], gve_qpl_dma_dir(priv, id), GFP_KERNEL); - /* caller handles clean up */ if (err) - return -ENOMEM; + goto abort; qpl->num_entries++; } - return 0; + return qpl; + +abort: + gve_free_queue_page_list(priv, qpl, id); + return NULL; } void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, @@ -1045,14 +1090,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, put_page(page); } -static void gve_free_queue_page_list(struct gve_priv *priv, - struct gve_queue_page_list *qpl, - int id) +void gve_free_queue_page_list(struct gve_priv *priv, + struct gve_queue_page_list *qpl, + u32 id) { int i; - if (!qpl->pages) + if (!qpl) return; + if (!qpl->pages) + goto free_qpl; if (!qpl->page_buses) goto free_pages; @@ -1065,120 +1112,8 @@ static void gve_free_queue_page_list(struct gve_priv *priv, free_pages: kvfree(qpl->pages); qpl->pages = NULL; -} - -static void gve_free_n_qpls(struct gve_priv *priv, - struct gve_queue_page_list *qpls, - int start_id, - int num_qpls) -{ - int i; - - for (i = start_id; i < start_id + num_qpls; i++) - gve_free_queue_page_list(priv, &qpls[i], i); -} - -static int gve_alloc_n_qpls(struct gve_priv *priv, - struct gve_queue_page_list *qpls, - int page_count, - int start_id, - int num_qpls) -{ - int err; - int i; - - for (i = start_id; i < start_id + num_qpls; i++) { - err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count); - if (err) - goto free_qpls; - } - - return 0; - -free_qpls: - /* Must include the failing QPL too for gve_alloc_queue_page_list fails - * without cleaning up. - */ - gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1); - return err; -} - -static int gve_alloc_qpls(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *cfg) -{ - int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues; - int rx_start_id, tx_num_qpls, rx_num_qpls; - struct gve_queue_page_list *qpls; - int page_count; - int err; - - if (cfg->raw_addressing) - return 0; - - qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL); - if (!qpls) - return -ENOMEM; - - cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) * - sizeof(unsigned long) * BITS_PER_BYTE; - cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues), - sizeof(unsigned long), GFP_KERNEL); - if (!cfg->qpl_cfg->qpl_id_map) { - err = -ENOMEM; - goto free_qpl_array; - } - - /* Allocate TX QPLs */ - page_count = priv->tx_pages_per_qpl; - tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues, - gve_is_qpl(priv)); - err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls); - if (err) - goto free_qpl_map; - - /* Allocate RX QPLs */ - rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg); - /* For GQI_QPL number of pages allocated have 1:1 relationship with - * number of descriptors. For DQO, number of pages required are - * more than descriptors (because of out of order completions). - */ - page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; - rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv)); - err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls); - if (err) - goto free_tx_qpls; - - cfg->qpls = qpls; - return 0; - -free_tx_qpls: - gve_free_n_qpls(priv, qpls, 0, tx_num_qpls); -free_qpl_map: - kvfree(cfg->qpl_cfg->qpl_id_map); - cfg->qpl_cfg->qpl_id_map = NULL; -free_qpl_array: - kvfree(qpls); - return err; -} - -static void gve_free_qpls(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *cfg) -{ - int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues; - struct gve_queue_page_list *qpls = cfg->qpls; - int i; - - if (!qpls) - return; - - kvfree(cfg->qpl_cfg->qpl_id_map); - cfg->qpl_cfg->qpl_id_map = NULL; - - for (i = 0; i < max_queues; i++) - gve_free_queue_page_list(priv, &qpls[i], i); - - kvfree(qpls); - cfg->qpls = NULL; +free_qpl: + kvfree(qpl); } /* Use this to schedule a reset when the device is capable of continuing @@ -1282,18 +1217,6 @@ static void gve_drain_page_cache(struct gve_priv *priv) page_frag_cache_drain(&priv->rx[i].page_cache); } -static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *cfg) -{ - cfg->raw_addressing = !gve_is_qpl(priv); - cfg->is_gqi = gve_is_gqi(priv); - cfg->num_xdp_queues = priv->num_xdp_queues; - cfg->qpl_cfg = &priv->qpl_cfg; - cfg->tx_cfg = &priv->tx_cfg; - cfg->rx_cfg = &priv->rx_cfg; - cfg->qpls = priv->qpls; -} - static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg) { @@ -1301,8 +1224,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, cfg->qcfg_tx = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->enable_header_split = priv->header_split_enabled; - cfg->qpls = priv->qpls; - cfg->qpl_cfg = &priv->qpl_cfg; cfg->ring_size = priv->rx_desc_cnt; cfg->packet_buffer_size = gve_is_gqi(priv) ? GVE_DEFAULT_RX_BUFFER_SIZE : @@ -1310,90 +1231,56 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, cfg->rx = priv->rx; } -static void gve_get_curr_alloc_cfgs(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *qpls_alloc_cfg, - struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, - struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +void gve_get_curr_alloc_cfgs(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { - gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg); gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg); gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg); } -static void gve_rx_start_rings(struct gve_priv *priv, int num_rings) +static void gve_rx_start_ring(struct gve_priv *priv, int i) { - int i; - - for (i = 0; i < num_rings; i++) { - if (gve_is_gqi(priv)) - gve_rx_start_ring_gqi(priv, i); - else - gve_rx_start_ring_dqo(priv, i); - } + if (gve_is_gqi(priv)) + gve_rx_start_ring_gqi(priv, i); + else + gve_rx_start_ring_dqo(priv, i); } -static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings) +static void gve_rx_start_rings(struct gve_priv *priv, int num_rings) { int i; - if (!priv->rx) - return; - - for (i = 0; i < num_rings; i++) { - if (gve_is_gqi(priv)) - gve_rx_stop_ring_gqi(priv, i); - else - gve_rx_stop_ring_dqo(priv, i); - } + for (i = 0; i < num_rings; i++) + gve_rx_start_ring(priv, i); } -static void gve_queues_mem_free(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *qpls_alloc_cfg, - struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, - struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +static void gve_rx_stop_ring(struct gve_priv *priv, int i) { - gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg); - gve_free_qpls(priv, qpls_alloc_cfg); + if (gve_is_gqi(priv)) + gve_rx_stop_ring_gqi(priv, i); + else + gve_rx_stop_ring_dqo(priv, i); } -static int gve_queues_mem_alloc(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *qpls_alloc_cfg, - struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, - struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings) { - int err; - - err = gve_alloc_qpls(priv, qpls_alloc_cfg); - if (err) { - netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n"); - return err; - } - tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls; - rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls; - err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg); - if (err) { - netif_err(priv, drv, priv->dev, "Failed to alloc rings\n"); - goto free_qpls; - } + int i; - return 0; + if (!priv->rx) + return; -free_qpls: - gve_free_qpls(priv, qpls_alloc_cfg); - return err; + for (i = 0; i < num_rings; i++) + gve_rx_stop_ring(priv, i); } static void gve_queues_mem_remove(struct gve_priv *priv) { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; - gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); - gve_queues_mem_free(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); - priv->qpls = NULL; + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg); priv->tx = NULL; priv->rx = NULL; } @@ -1402,7 +1289,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv) * No memory is allocated. Passed-in memory is freed on errors. */ static int gve_queues_start(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *qpls_alloc_cfg, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { @@ -1410,12 +1296,10 @@ static int gve_queues_start(struct gve_priv *priv, int err; /* Record new resources into priv */ - priv->qpls = qpls_alloc_cfg->qpls; priv->tx = tx_alloc_cfg->tx; priv->rx = rx_alloc_cfg->rx; /* Record new configs into priv */ - priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg; priv->tx_cfg = *tx_alloc_cfg->qcfg; priv->rx_cfg = *rx_alloc_cfg->qcfg; priv->tx_desc_cnt = tx_alloc_cfg->ring_size; @@ -1483,23 +1367,19 @@ static int gve_open(struct net_device *dev) { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_priv *priv = netdev_priv(dev); int err; - gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); - err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) return err; /* No need to free on error: ownership of resources is lost after * calling gve_queues_start. */ - err = gve_queues_start(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) return err; @@ -1558,11 +1438,8 @@ static int gve_close(struct net_device *dev) static int gve_remove_xdp_queues(struct gve_priv *priv) { - int qpl_start_id; int err; - qpl_start_id = gve_xdp_tx_start_queue_id(priv); - err = gve_destroy_xdp_rings(priv); if (err) return err; @@ -1574,27 +1451,19 @@ static int gve_remove_xdp_queues(struct gve_priv *priv) gve_unreg_xdp_info(priv); gve_free_xdp_rings(priv); - gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv)); priv->num_xdp_queues = 0; return 0; } static int gve_add_xdp_queues(struct gve_priv *priv) { - int start_id; int err; priv->num_xdp_queues = priv->rx_cfg.num_queues; - start_id = gve_xdp_tx_start_queue_id(priv); - err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl, - start_id, gve_num_xdp_qpls(priv)); - if (err) - goto err; - err = gve_alloc_xdp_rings(priv); if (err) - goto free_xdp_qpls; + goto err; err = gve_reg_xdp_info(priv, priv->dev); if (err) @@ -1612,8 +1481,6 @@ static int gve_add_xdp_queues(struct gve_priv *priv) free_xdp_rings: gve_free_xdp_rings(priv); -free_xdp_qpls: - gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv)); err: priv->num_xdp_queues = 0; return err; @@ -1863,16 +1730,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static int gve_adjust_config(struct gve_priv *priv, - struct gve_qpls_alloc_cfg *qpls_alloc_cfg, - struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, - struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) +int gve_adjust_config(struct gve_priv *priv, + struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, + struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) { int err; /* Allocate resources for the new confiugration */ - err = gve_queues_mem_alloc(priv, qpls_alloc_cfg, - tx_alloc_cfg, rx_alloc_cfg); + err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg); if (err) { netif_err(priv, drv, priv->dev, "Adjust config failed to alloc new queues"); @@ -1884,14 +1749,12 @@ static int gve_adjust_config(struct gve_priv *priv, if (err) { netif_err(priv, drv, priv->dev, "Adjust config failed to close old queues"); - gve_queues_mem_free(priv, qpls_alloc_cfg, - tx_alloc_cfg, rx_alloc_cfg); + gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg); return err; } /* Bring the device back up again with the new resources. */ - err = gve_queues_start(priv, qpls_alloc_cfg, - tx_alloc_cfg, rx_alloc_cfg); + err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg); if (err) { netif_err(priv, drv, priv->dev, "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n"); @@ -1911,32 +1774,18 @@ int gve_adjust_queues(struct gve_priv *priv, { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; - struct gve_qpl_config new_qpl_cfg; int err; - gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); - - /* qpl_cfg is not read-only, it contains a map that gets updated as - * rings are allocated, which is why we cannot use the yet unreleased - * one in priv. - */ - qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg; - tx_alloc_cfg.qpl_cfg = &new_qpl_cfg; - rx_alloc_cfg.qpl_cfg = &new_qpl_cfg; + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); /* Relay the new config from ethtool */ - qpls_alloc_cfg.tx_cfg = &new_tx_config; tx_alloc_cfg.qcfg = &new_tx_config; rx_alloc_cfg.qcfg_tx = &new_tx_config; - qpls_alloc_cfg.rx_cfg = &new_rx_config; rx_alloc_cfg.qcfg = &new_rx_config; tx_alloc_cfg.num_rings = new_tx_config.num_queues; if (netif_carrier_ok(priv->dev)) { - err = gve_adjust_config(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; } /* Set the config for the next up. */ @@ -1961,12 +1810,16 @@ static void gve_turndown(struct gve_priv *priv) int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + if (!gve_tx_was_added_to_block(priv, idx)) + continue; napi_disable(&block->napi); } for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + if (!gve_rx_was_added_to_block(priv, idx)) + continue; napi_disable(&block->napi); } @@ -1989,6 +1842,9 @@ static void gve_turnup(struct gve_priv *priv) int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + if (!gve_tx_was_added_to_block(priv, idx)) + continue; + napi_enable(&block->napi); if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); @@ -1996,11 +1852,21 @@ static void gve_turnup(struct gve_priv *priv) gve_set_itr_coalesce_usecs_dqo(priv, block, priv->tx_coalesce_usecs); } + + /* Any descs written by the NIC before this barrier will be + * handled by the one-off napi schedule below. Whereas any + * descs after the barrier will generate interrupts. + */ + mb(); + napi_schedule(&block->napi); } for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; + if (!gve_rx_was_added_to_block(priv, idx)) + continue; + napi_enable(&block->napi); if (gve_is_gqi(priv)) { iowrite32be(0, gve_irq_doorbell(priv, block)); @@ -2008,11 +1874,27 @@ static void gve_turnup(struct gve_priv *priv) gve_set_itr_coalesce_usecs_dqo(priv, block, priv->rx_coalesce_usecs); } + + /* Any descs written by the NIC before this barrier will be + * handled by the one-off napi schedule below. Whereas any + * descs after the barrier will generate interrupts. + */ + mb(); + napi_schedule(&block->napi); } gve_set_napi_enabled(priv); } +static void gve_turnup_and_check_status(struct gve_priv *priv) +{ + u32 status; + + gve_turnup(priv); + status = ioread32be(&priv->reg_bar0->device_status); + gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); +} + static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct gve_notify_block *block; @@ -2077,7 +1959,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; bool enable_hdr_split; int err = 0; @@ -2097,15 +1978,13 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split) if (enable_hdr_split == priv->header_split_enabled) return 0; - gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); rx_alloc_cfg.enable_header_split = enable_hdr_split; rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split); if (netif_running(priv->dev)) - err = gve_adjust_config(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; } @@ -2115,26 +1994,15 @@ static int gve_set_features(struct net_device *netdev, const netdev_features_t orig_features = netdev->features; struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_priv *priv = netdev_priv(netdev); - struct gve_qpl_config new_qpl_cfg; int err; - gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); - /* qpl_cfg is not read-only, it contains a map that gets updated as - * rings are allocated, which is why we cannot use the yet unreleased - * one in priv. - */ - qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg; - tx_alloc_cfg.qpl_cfg = &new_qpl_cfg; - rx_alloc_cfg.qpl_cfg = &new_qpl_cfg; + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { netdev->features ^= NETIF_F_LRO; if (netif_carrier_ok(netdev)) { - err = gve_adjust_config(priv, &qpls_alloc_cfg, - &tx_alloc_cfg, &rx_alloc_cfg); + err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); if (err) { /* Revert the change on error. */ netdev->features = orig_features; @@ -2473,6 +2341,140 @@ static void gve_write_version(u8 __iomem *driver_version_register) writeb('\n', driver_version_register); } +static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_rx_ring *gve_per_q_mem; + int err; + + if (!priv->rx) + return -EAGAIN; + + /* Destroying queue 0 while other queues exist is not supported in DQO */ + if (!gve_is_gqi(priv) && idx == 0) + return -ERANGE; + + /* Single-queue destruction requires quiescence on all queues */ + gve_turndown(priv); + + /* This failure will trigger a reset - no need to clean up */ + err = gve_adminq_destroy_single_rx_queue(priv, idx); + if (err) + return err; + + if (gve_is_qpl(priv)) { + /* This failure will trigger a reset - no need to clean up */ + err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx)); + if (err) + return err; + } + + gve_rx_stop_ring(priv, idx); + + /* Turn the unstopped queues back up */ + gve_turnup_and_check_status(priv); + + gve_per_q_mem = (struct gve_rx_ring *)per_q_mem; + *gve_per_q_mem = priv->rx[idx]; + memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); + return 0; +} + +static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_rx_alloc_rings_cfg cfg = {0}; + struct gve_rx_ring *gve_per_q_mem; + + gve_per_q_mem = (struct gve_rx_ring *)per_q_mem; + gve_rx_get_curr_alloc_cfg(priv, &cfg); + + if (gve_is_gqi(priv)) + gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg); + else + gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg); +} + +static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem, + int idx) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_rx_alloc_rings_cfg cfg = {0}; + struct gve_rx_ring *gve_per_q_mem; + int err; + + if (!priv->rx) + return -EAGAIN; + + gve_per_q_mem = (struct gve_rx_ring *)per_q_mem; + gve_rx_get_curr_alloc_cfg(priv, &cfg); + + if (gve_is_gqi(priv)) + err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx); + else + err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx); + + return err; +} + +static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx) +{ + struct gve_priv *priv = netdev_priv(dev); + struct gve_rx_ring *gve_per_q_mem; + int err; + + if (!priv->rx) + return -EAGAIN; + + gve_per_q_mem = (struct gve_rx_ring *)per_q_mem; + priv->rx[idx] = *gve_per_q_mem; + + /* Single-queue creation requires quiescence on all queues */ + gve_turndown(priv); + + gve_rx_start_ring(priv, idx); + + if (gve_is_qpl(priv)) { + /* This failure will trigger a reset - no need to clean up */ + err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx)); + if (err) + goto abort; + } + + /* This failure will trigger a reset - no need to clean up */ + err = gve_adminq_create_single_rx_queue(priv, idx); + if (err) + goto abort; + + if (gve_is_gqi(priv)) + gve_rx_write_doorbell(priv, &priv->rx[idx]); + else + gve_rx_post_buffers_dqo(&priv->rx[idx]); + + /* Turn the unstopped queues back up */ + gve_turnup_and_check_status(priv); + return 0; + +abort: + gve_rx_stop_ring(priv, idx); + + /* All failures in this func result in a reset, by clearing the struct + * at idx, we prevent a double free when that reset runs. The reset, + * which needs the rtnl lock, will not run till this func returns and + * its caller gives up the lock. + */ + memset(&priv->rx[idx], 0, sizeof(priv->rx[idx])); + return err; +} + +static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = { + .ndo_queue_mem_size = sizeof(struct gve_rx_ring), + .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc, + .ndo_queue_mem_free = gve_rx_queue_mem_free, + .ndo_queue_start = gve_rx_queue_start, + .ndo_queue_stop = gve_rx_queue_stop, +}; + static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int max_tx_queues, max_rx_queues; @@ -2527,6 +2529,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); dev->ethtool_ops = &gve_ethtool_ops; dev->netdev_ops = &gve_netdev_ops; + dev->queue_mgmt_ops = &gve_queue_mgmt_ops; /* Set default and supported features. * diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 20f5a9e7fae9..acb73d4d0de6 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, u32 slots = rx->mask + 1; int i; + if (!rx->data.page_info) + return; + if (rx->data.raw_addressing) { for (i = 0; i < slots; i++) gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], @@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, for (i = 0; i < slots; i++) page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1); - gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); - rx->data.qpl = NULL; for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { page_ref_sub(rx->qpl_copy_pool[i].page, @@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, rx->data.page_info = NULL; } +static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) +{ + ctx->skb_head = NULL; + ctx->skb_tail = NULL; + ctx->total_size = 0; + ctx->frag_cnt = 0; + ctx->drop_pkt = false; +} + +static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx) +{ + rx->desc.seqno = 1; + rx->cnt = 0; + gve_rx_ctx_clear(&rx->ctx); +} + +static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + const u32 slots = priv->rx_desc_cnt; + size_t size; + + /* Reset desc ring */ + if (rx->desc.desc_ring) { + size = slots * sizeof(rx->desc.desc_ring[0]); + memset(rx->desc.desc_ring, 0, size); + } + + /* Reset q_resources */ + if (rx->q_resources) + memset(rx->q_resources, 0, sizeof(*rx->q_resources)); + + gve_rx_init_ring_state_gqi(rx); +} + void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -60,34 +96,48 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx) gve_remove_napi(priv, ntfy_idx); gve_rx_remove_from_block(priv, idx); + gve_rx_reset_ring_gqi(priv, idx); } -static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, - struct gve_rx_alloc_rings_cfg *cfg) +void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { struct device *dev = &priv->pdev->dev; u32 slots = rx->mask + 1; int idx = rx->q_num; size_t bytes; + u32 qpl_id; - bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; - dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); - rx->desc.desc_ring = NULL; + if (rx->desc.desc_ring) { + bytes = sizeof(struct gve_rx_desc) * cfg->ring_size; + dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); + rx->desc.desc_ring = NULL; + } - dma_free_coherent(dev, sizeof(*rx->q_resources), - rx->q_resources, rx->q_resources_bus); - rx->q_resources = NULL; + if (rx->q_resources) { + dma_free_coherent(dev, sizeof(*rx->q_resources), + rx->q_resources, rx->q_resources_bus); + rx->q_resources = NULL; + } gve_rx_unfill_pages(priv, rx, cfg); - bytes = sizeof(*rx->data.data_ring) * slots; - dma_free_coherent(dev, bytes, rx->data.data_ring, - rx->data.data_bus); - rx->data.data_ring = NULL; + if (rx->data.data_ring) { + bytes = sizeof(*rx->data.data_ring) * slots; + dma_free_coherent(dev, bytes, rx->data.data_ring, + rx->data.data_bus); + rx->data.data_ring = NULL; + } kvfree(rx->qpl_copy_pool); rx->qpl_copy_pool = NULL; + if (rx->data.qpl) { + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx); + gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); + rx->data.qpl = NULL; + } + netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } @@ -144,14 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, if (!rx->data.page_info) return -ENOMEM; - if (!rx->data.raw_addressing) { - rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num); - if (!rx->data.qpl) { - kvfree(rx->data.page_info); - rx->data.page_info = NULL; - return -ENOMEM; - } - } for (i = 0; i < slots; i++) { if (!rx->data.raw_addressing) { struct page *page = rx->data.qpl->pages[i]; @@ -204,9 +246,6 @@ alloc_err_qpl: page_ref_sub(rx->data.page_info[i].page, rx->data.page_info[i].pagecnt_bias - 1); - gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id); - rx->data.qpl = NULL; - return err; alloc_err_rda: @@ -217,15 +256,6 @@ alloc_err_rda: return err; } -static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) -{ - ctx->skb_head = NULL; - ctx->skb_tail = NULL; - ctx->total_size = 0; - ctx->frag_cnt = 0; - ctx->drop_pkt = false; -} - void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -234,14 +264,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx) gve_add_napi(priv, ntfy_idx, gve_napi_poll); } -static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, - struct gve_rx_alloc_rings_cfg *cfg, - struct gve_rx_ring *rx, - int idx) +int gve_rx_alloc_ring_gqi(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx) { struct device *hdev = &priv->pdev->dev; - u32 slots = priv->rx_data_slot_cnt; + u32 slots = cfg->ring_size; int filled_pages; + int qpl_page_cnt; + u32 qpl_id = 0; size_t bytes; int err; @@ -274,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, goto abort_with_slots; } + if (!rx->data.raw_addressing) { + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); + qpl_page_cnt = cfg->ring_size; + + rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); + if (!rx->data.qpl) { + err = -ENOMEM; + goto abort_with_copy_pool; + } + } + filled_pages = gve_rx_prefill_pages(rx, cfg); if (filled_pages < 0) { err = -ENOMEM; - goto abort_with_copy_pool; + goto abort_with_qpl; } rx->fill_cnt = filled_pages; /* Ensure data ring slots (packet buffers) are visible. */ @@ -304,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv, err = -ENOMEM; goto abort_with_q_resources; } - rx->cnt = 0; rx->db_threshold = slots / 2; - rx->desc.seqno = 1; + gve_rx_init_ring_state_gqi(rx); rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_rx_ctx_clear(&rx->ctx); @@ -319,6 +362,11 @@ abort_with_q_resources: rx->q_resources = NULL; abort_filled: gve_rx_unfill_pages(priv, rx, cfg); +abort_with_qpl: + if (!rx->data.raw_addressing) { + gve_free_queue_page_list(priv, rx->data.qpl, qpl_id); + rx->data.qpl = NULL; + } abort_with_copy_pool: kvfree(rx->qpl_copy_pool); rx->qpl_copy_pool = NULL; @@ -337,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv, int err = 0; int i, j; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 8e8071308aeb..c1c912de59c7 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx, return err; } else { idx = rx->dqo.next_qpl_page_idx; - if (idx >= priv->rx_pages_per_qpl) { + if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) { net_err_ratelimited("%s: Out of QPL pages\n", priv->dev->name); return -ENOMEM; @@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) } } +static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx, + const u32 buffer_queue_slots, + const u32 completion_queue_slots) +{ + int i; + + /* Set buffer queue state */ + rx->dqo.bufq.mask = buffer_queue_slots - 1; + rx->dqo.bufq.head = 0; + rx->dqo.bufq.tail = 0; + + /* Set completion queue state */ + rx->dqo.complq.num_free_slots = completion_queue_slots; + rx->dqo.complq.mask = completion_queue_slots - 1; + rx->dqo.complq.cur_gen_bit = 0; + rx->dqo.complq.head = 0; + + /* Set RX SKB context */ + rx->ctx.skb_head = NULL; + rx->ctx.skb_tail = NULL; + + /* Set up linked list of buffer IDs */ + if (rx->dqo.buf_states) { + for (i = 0; i < rx->dqo.num_buf_states - 1; i++) + rx->dqo.buf_states[i].next = i + 1; + rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; + } + + rx->dqo.free_buf_states = 0; + rx->dqo.recycled_buf_states.head = -1; + rx->dqo.recycled_buf_states.tail = -1; + rx->dqo.used_buf_states.head = -1; + rx->dqo.used_buf_states.tail = -1; +} + +static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx) +{ + struct gve_rx_ring *rx = &priv->rx[idx]; + size_t size; + int i; + + const u32 buffer_queue_slots = priv->rx_desc_cnt; + const u32 completion_queue_slots = priv->rx_desc_cnt; + + /* Reset buffer queue */ + if (rx->dqo.bufq.desc_ring) { + size = sizeof(rx->dqo.bufq.desc_ring[0]) * + buffer_queue_slots; + memset(rx->dqo.bufq.desc_ring, 0, size); + } + + /* Reset completion queue */ + if (rx->dqo.complq.desc_ring) { + size = sizeof(rx->dqo.complq.desc_ring[0]) * + completion_queue_slots; + memset(rx->dqo.complq.desc_ring, 0, size); + } + + /* Reset q_resources */ + if (rx->q_resources) + memset(rx->q_resources, 0, sizeof(*rx->q_resources)); + + /* Reset buf states */ + if (rx->dqo.buf_states) { + for (i = 0; i < rx->dqo.num_buf_states; i++) { + struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; + + if (bs->page_info.page) + gve_free_page_dqo(priv, bs, !rx->dqo.qpl); + } + } + + gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, + completion_queue_slots); +} + void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -220,16 +296,18 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) gve_remove_napi(priv, ntfy_idx); gve_rx_remove_from_block(priv, idx); + gve_rx_reset_ring_dqo(priv, idx); } -static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, - struct gve_rx_alloc_rings_cfg *cfg) +void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, + struct gve_rx_alloc_rings_cfg *cfg) { struct device *hdev = &priv->pdev->dev; size_t completion_queue_slots; size_t buffer_queue_slots; int idx = rx->q_num; size_t size; + u32 qpl_id; int i; completion_queue_slots = rx->dqo.complq.mask + 1; @@ -247,8 +325,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, if (bs->page_info.page) gve_free_page_dqo(priv, bs, !rx->dqo.qpl); } + if (rx->dqo.qpl) { - gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id); + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); + gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id); rx->dqo.qpl = NULL; } @@ -275,10 +355,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } -static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx) +static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx, + const u32 buf_count) { struct device *hdev = &priv->pdev->dev; - int buf_count = rx->dqo.bufq.mask + 1; rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count, &rx->dqo.hdr_bufs.addr, GFP_KERNEL); @@ -296,17 +376,17 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx) gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo); } -static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, - struct gve_rx_alloc_rings_cfg *cfg, - struct gve_rx_ring *rx, - int idx) +int gve_rx_alloc_ring_dqo(struct gve_priv *priv, + struct gve_rx_alloc_rings_cfg *cfg, + struct gve_rx_ring *rx, + int idx) { struct device *hdev = &priv->pdev->dev; + int qpl_page_cnt; size_t size; - int i; + u32 qpl_id; - const u32 buffer_queue_slots = cfg->raw_addressing ? - priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size; + const u32 buffer_queue_slots = cfg->ring_size; const u32 completion_queue_slots = cfg->ring_size; netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); @@ -314,15 +394,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, memset(rx, 0, sizeof(*rx)); rx->gve = priv; rx->q_num = idx; - rx->dqo.bufq.mask = buffer_queue_slots - 1; - rx->dqo.complq.num_free_slots = completion_queue_slots; - rx->dqo.complq.mask = completion_queue_slots - 1; - rx->ctx.skb_head = NULL; - rx->ctx.skb_tail = NULL; rx->dqo.num_buf_states = cfg->raw_addressing ? min_t(s16, S16_MAX, buffer_queue_slots * 4) : - priv->rx_pages_per_qpl; + gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, sizeof(rx->dqo.buf_states[0]), GFP_KERNEL); @@ -331,19 +406,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, /* Allocate header buffers for header-split */ if (cfg->enable_header_split) - if (gve_rx_alloc_hdr_bufs(priv, rx)) + if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots)) goto err; - /* Set up linked list of buffer IDs */ - for (i = 0; i < rx->dqo.num_buf_states - 1; i++) - rx->dqo.buf_states[i].next = i + 1; - - rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; - rx->dqo.recycled_buf_states.head = -1; - rx->dqo.recycled_buf_states.tail = -1; - rx->dqo.used_buf_states.head = -1; - rx->dqo.used_buf_states.tail = -1; - /* Allocate RX completion queue */ size = sizeof(rx->dqo.complq.desc_ring[0]) * completion_queue_slots; @@ -360,7 +425,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (!cfg->raw_addressing) { - rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num); + qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num); + qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); + + rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); if (!rx->dqo.qpl) goto err; rx->dqo.next_qpl_page_idx = 0; @@ -371,6 +440,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, if (!rx->q_resources) goto err; + gve_rx_init_ring_state_dqo(rx, buffer_queue_slots, + completion_queue_slots); + return 0; err: @@ -393,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv, int err; int i; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 4b9853adc113..24a64ec1073e 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -216,6 +216,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, struct device *hdev = &priv->pdev->dev; int idx = tx->q_num; size_t bytes; + u32 qpl_id; u32 slots; slots = tx->mask + 1; @@ -223,9 +224,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, tx->q_resources, tx->q_resources_bus); tx->q_resources = NULL; - if (!tx->raw_addressing) { - gve_tx_fifo_release(priv, &tx->tx_fifo); - gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id); + if (tx->tx_fifo.qpl) { + if (tx->tx_fifo.base) + gve_tx_fifo_release(priv, &tx->tx_fifo); + + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); tx->tx_fifo.qpl = NULL; } @@ -256,6 +260,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, int idx) { struct device *hdev = &priv->pdev->dev; + int qpl_page_cnt; + u32 qpl_id = 0; size_t bytes; /* Make sure everything is zeroed to start */ @@ -280,9 +286,14 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, tx->raw_addressing = cfg->raw_addressing; tx->dev = hdev; if (!tx->raw_addressing) { - tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx); + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + qpl_page_cnt = priv->tx_pages_per_qpl; + + tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); if (!tx->tx_fifo.qpl) goto abort_with_desc; + /* map Tx FIFO */ if (gve_tx_fifo_init(priv, &tx->tx_fifo)) goto abort_with_qpl; @@ -302,8 +313,10 @@ abort_with_fifo: if (!tx->raw_addressing) gve_tx_fifo_release(priv, &tx->tx_fifo); abort_with_qpl: - if (!tx->raw_addressing) - gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id); + if (!tx->raw_addressing) { + gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id); + tx->tx_fifo.qpl = NULL; + } abort_with_desc: dma_free_coherent(hdev, bytes, tx->desc, tx->bus); tx->desc = NULL; @@ -320,12 +333,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv, int err = 0; int i, j; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index bc34b6cd3a3e..fe1b26a4d736 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, struct device *hdev = &priv->pdev->dev; int idx = tx->q_num; size_t bytes; + u32 qpl_id; if (tx->q_resources) { dma_free_coherent(hdev, sizeof(*tx->q_resources), @@ -237,7 +238,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dqo.tx_qpl_buf_next = NULL; if (tx->dqo.qpl) { - gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id); + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id); tx->dqo.qpl = NULL; } @@ -285,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, { struct device *hdev = &priv->pdev->dev; int num_pending_packets; + int qpl_page_cnt; size_t bytes; + u32 qpl_id; int i; memset(tx, 0, sizeof(*tx)); @@ -295,9 +299,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, /* Queue sizes must be a power of 2 */ tx->mask = cfg->ring_size - 1; - tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? - priv->options_dqo_rda.tx_comp_ring_entries - 1 : - tx->mask; + tx->dqo.complq_mask = tx->mask; /* The max number of pending packets determines the maximum number of * descriptors which maybe written to the completion queue. @@ -354,7 +356,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (!cfg->raw_addressing) { - tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx); + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + qpl_page_cnt = priv->tx_pages_per_qpl; + + tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); if (!tx->dqo.qpl) goto err; @@ -376,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, int err = 0; int i, j; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 8a713eed4465..fd32e15cadcb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1777,7 +1777,7 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) } /* finally, set new mtu to netdevice */ - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); out: if (if_running) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index f19f1e1d1f9f..7cebb08bd320 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -104,6 +104,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_WOL_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B, HNAE3_DEV_SUPPORT_VF_FAULT_B, + HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, }; #define hnae3_ae_dev_fd_supported(ae_dev) \ @@ -181,6 +182,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_vf_fault_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps) +#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -362,6 +366,15 @@ struct hnae3_vector_info { #define HNAE3_FW_VERSION_BYTE0_SHIFT 0 #define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) +#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24 +#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16 +#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8 +#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0 +#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0) + struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; @@ -897,7 +910,7 @@ struct hnae3_handle { struct hnae3_roce_private_info rinfo; }; - u32 numa_node_mask; /* for multi-chip support */ + nodemask_t numa_node_mask; /* for multi-chip support */ enum hnae3_port_base_vlan_state port_base_vlan_state; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index 652d71326231..ea40b594dbac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -158,6 +158,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B}, + {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -470,10 +471,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, int num) { + bool is_special = hclge_comm_is_special_opcode(le16_to_cpu(desc->opcode)); struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; int ret; int ntc; + if (hw->cmq.ops.trace_cmd_send) + hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special); + spin_lock_bh(&hw->cmq.csq.lock); if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { @@ -507,6 +512,9 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, spin_unlock_bh(&hw->cmq.csq.lock); + if (hw->cmq.ops.trace_cmd_get) + hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special); + return ret; } @@ -584,6 +592,17 @@ err_csq: return ret; } +void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, + const struct hclge_comm_cmq_ops *ops) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + + if (ops) { + cmdq->ops.trace_cmd_send = ops->trace_cmd_send; + cmdq->ops.trace_cmd_get = ops->trace_cmd_get; + } +} + int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, unsigned long reset_pending) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 552396518e08..2c2a2f1e0d7a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -91,6 +91,7 @@ enum hclge_opcode_type { HCLGE_OPC_DFX_RCB_REG = 0x004D, HCLGE_OPC_DFX_TQP_REG = 0x004E, HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + HCLGE_OPC_DFX_GEN_REG = 0x7038, HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067, @@ -246,6 +247,9 @@ enum hclge_opcode_type { HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + /* SCC commands */ + HCLGE_OPC_QUERY_SCC_VER = 0x1A84, + /* Mailbox command */ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, @@ -353,6 +357,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_LANE_NUM_B = 27, HCLGE_COMM_CAP_WOL_B = 28, HCLGE_COMM_CAP_TM_FLUSH_B = 31, + HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32, }; enum HCLGE_COMM_API_CAP_BITS { @@ -392,6 +397,11 @@ struct hclge_comm_query_version_cmd { __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */ }; +struct hclge_comm_query_scc_cmd { + __le32 scc_version; + u8 rsv[20]; +}; + #define HCLGE_DESC_DATA_LEN 6 struct hclge_desc { __le16 opcode; @@ -423,11 +433,22 @@ enum hclge_comm_cmd_status { HCLGE_COMM_ERR_CSQ_ERROR = -3, }; +struct hclge_comm_hw; +struct hclge_comm_cmq_ops { + void (*trace_cmd_send)(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, bool is_special); + void (*trace_cmd_get)(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, bool is_special); +}; + struct hclge_comm_cmq { struct hclge_comm_cmq_ring csq; struct hclge_comm_cmq_ring crq; u16 tx_timeout; enum hclge_comm_cmd_status last_status; + struct hclge_comm_cmq_ops ops; }; struct hclge_comm_hw { @@ -474,5 +495,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw); int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, unsigned long reset_pending); - +void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, + const struct hclge_comm_cmq_ops *ops); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 19668a8d22f7..dfdc0e032c07 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2761,7 +2761,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) netdev_err(netdev, "failed to change MTU in hardware %d\n", ret); else - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 9ec471ced3d6..debf143e9940 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -18,6 +18,646 @@ static const char * const hclge_mac_state_str[] = { static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" }; +static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {true, "IGU_MC_CAR_DROP_PKT_L"}, + {true, "IGU_MC_CAR_DROP_PKT_H"}, + {true, "IGU_BC_CAR_DROP_PKT_L"}, + {true, "IGU_BC_CAR_DROP_PKT_H"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON, .dfx_msg = &hclge_dbg_bios_common_reg[0], @@ -161,10 +801,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, return 0; } -static int hclge_dbg_cmd_send(struct hclge_dev *hdev, - struct hclge_desc *desc_src, - int index, int bd_num, - enum hclge_opcode_type cmd) +int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src, + int index, int bd_num, enum hclge_opcode_type cmd) { struct hclge_desc *desc = desc_src; int ret, i; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index 724052928b88..2b998cbed826 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -99,646 +99,6 @@ struct hclge_dbg_status_dfx_info { char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; }; -static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { - {false, "Reserved"}, - {true, "BP_CPU_STATE"}, - {true, "DFX_MSIX_INFO_NIC_0"}, - {true, "DFX_MSIX_INFO_NIC_1"}, - {true, "DFX_MSIX_INFO_NIC_2"}, - {true, "DFX_MSIX_INFO_NIC_3"}, - - {true, "DFX_MSIX_INFO_ROC_0"}, - {true, "DFX_MSIX_INFO_ROC_1"}, - {true, "DFX_MSIX_INFO_ROC_2"}, - {true, "DFX_MSIX_INFO_ROC_3"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { - {false, "Reserved"}, - {true, "SSU_ETS_PORT_STATUS"}, - {true, "SSU_ETS_TCG_STATUS"}, - {false, "Reserved"}, - {false, "Reserved"}, - {true, "SSU_BP_STATUS_0"}, - - {true, "SSU_BP_STATUS_1"}, - {true, "SSU_BP_STATUS_2"}, - {true, "SSU_BP_STATUS_3"}, - {true, "SSU_BP_STATUS_4"}, - {true, "SSU_BP_STATUS_5"}, - {true, "SSU_MAC_TX_PFC_IND"}, - - {true, "MAC_SSU_RX_PFC_IND"}, - {true, "BTMP_AGEING_ST_B0"}, - {true, "BTMP_AGEING_ST_B1"}, - {true, "BTMP_AGEING_ST_B2"}, - {false, "Reserved"}, - {false, "Reserved"}, - - {true, "FULL_DROP_NUM"}, - {true, "PART_DROP_NUM"}, - {true, "PPP_KEY_DROP_NUM"}, - {true, "PPP_RLT_DROP_NUM"}, - {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, - {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, - - {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, - {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, - {true, "BTMP_AGEING_RLS_CNT_BANK0"}, - {true, "BTMP_AGEING_RLS_CNT_BANK1"}, - {true, "BTMP_AGEING_RLS_CNT_BANK2"}, - {true, "SSU_MB_RD_RLT_DROP_CNT"}, - - {true, "SSU_PPP_MAC_KEY_NUM_L"}, - {true, "SSU_PPP_MAC_KEY_NUM_H"}, - {true, "SSU_PPP_HOST_KEY_NUM_L"}, - {true, "SSU_PPP_HOST_KEY_NUM_H"}, - {true, "PPP_SSU_MAC_RLT_NUM_L"}, - {true, "PPP_SSU_MAC_RLT_NUM_H"}, - - {true, "PPP_SSU_HOST_RLT_NUM_L"}, - {true, "PPP_SSU_HOST_RLT_NUM_H"}, - {true, "NCSI_RX_PACKET_IN_CNT_L"}, - {true, "NCSI_RX_PACKET_IN_CNT_H"}, - {true, "NCSI_TX_PACKET_OUT_CNT_L"}, - {true, "NCSI_TX_PACKET_OUT_CNT_H"}, - - {true, "SSU_KEY_DROP_NUM"}, - {true, "MB_UNCOPY_NUM"}, - {true, "RX_OQ_DROP_PKT_CNT"}, - {true, "TX_OQ_DROP_PKT_CNT"}, - {true, "BANK_UNBALANCE_DROP_CNT"}, - {true, "BANK_UNBALANCE_RX_DROP_CNT"}, - - {true, "NIC_L2_ERR_DROP_PKT_CNT"}, - {true, "ROC_L2_ERR_DROP_PKT_CNT"}, - {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, - {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, - {true, "RX_OQ_GLB_DROP_PKT_CNT"}, - {false, "Reserved"}, - - {true, "LO_PRI_UNICAST_CUR_CNT"}, - {true, "HI_PRI_MULTICAST_CUR_CNT"}, - {true, "LO_PRI_MULTICAST_CUR_CNT"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { - {true, "prt_id"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, - - {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, - {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, - {true, "PACKET_CURR_BUFFER_CNT"}, - {false, "Reserved"}, - {false, "Reserved"}, - - {true, "RX_PACKET_IN_CNT_L"}, - {true, "RX_PACKET_IN_CNT_H"}, - {true, "RX_PACKET_OUT_CNT_L"}, - {true, "RX_PACKET_OUT_CNT_H"}, - {true, "TX_PACKET_IN_CNT_L"}, - {true, "TX_PACKET_IN_CNT_H"}, - - {true, "TX_PACKET_OUT_CNT_L"}, - {true, "TX_PACKET_OUT_CNT_H"}, - {true, "ROC_RX_PACKET_IN_CNT_L"}, - {true, "ROC_RX_PACKET_IN_CNT_H"}, - {true, "ROC_TX_PACKET_OUT_CNT_L"}, - {true, "ROC_TX_PACKET_OUT_CNT_H"}, - - {true, "RX_PACKET_TC_IN_CNT_0_L"}, - {true, "RX_PACKET_TC_IN_CNT_0_H"}, - {true, "RX_PACKET_TC_IN_CNT_1_L"}, - {true, "RX_PACKET_TC_IN_CNT_1_H"}, - {true, "RX_PACKET_TC_IN_CNT_2_L"}, - {true, "RX_PACKET_TC_IN_CNT_2_H"}, - - {true, "RX_PACKET_TC_IN_CNT_3_L"}, - {true, "RX_PACKET_TC_IN_CNT_3_H"}, - {true, "RX_PACKET_TC_IN_CNT_4_L"}, - {true, "RX_PACKET_TC_IN_CNT_4_H"}, - {true, "RX_PACKET_TC_IN_CNT_5_L"}, - {true, "RX_PACKET_TC_IN_CNT_5_H"}, - - {true, "RX_PACKET_TC_IN_CNT_6_L"}, - {true, "RX_PACKET_TC_IN_CNT_6_H"}, - {true, "RX_PACKET_TC_IN_CNT_7_L"}, - {true, "RX_PACKET_TC_IN_CNT_7_H"}, - {true, "RX_PACKET_TC_OUT_CNT_0_L"}, - {true, "RX_PACKET_TC_OUT_CNT_0_H"}, - - {true, "RX_PACKET_TC_OUT_CNT_1_L"}, - {true, "RX_PACKET_TC_OUT_CNT_1_H"}, - {true, "RX_PACKET_TC_OUT_CNT_2_L"}, - {true, "RX_PACKET_TC_OUT_CNT_2_H"}, - {true, "RX_PACKET_TC_OUT_CNT_3_L"}, - {true, "RX_PACKET_TC_OUT_CNT_3_H"}, - - {true, "RX_PACKET_TC_OUT_CNT_4_L"}, - {true, "RX_PACKET_TC_OUT_CNT_4_H"}, - {true, "RX_PACKET_TC_OUT_CNT_5_L"}, - {true, "RX_PACKET_TC_OUT_CNT_5_H"}, - {true, "RX_PACKET_TC_OUT_CNT_6_L"}, - {true, "RX_PACKET_TC_OUT_CNT_6_H"}, - - {true, "RX_PACKET_TC_OUT_CNT_7_L"}, - {true, "RX_PACKET_TC_OUT_CNT_7_H"}, - {true, "TX_PACKET_TC_IN_CNT_0_L"}, - {true, "TX_PACKET_TC_IN_CNT_0_H"}, - {true, "TX_PACKET_TC_IN_CNT_1_L"}, - {true, "TX_PACKET_TC_IN_CNT_1_H"}, - - {true, "TX_PACKET_TC_IN_CNT_2_L"}, - {true, "TX_PACKET_TC_IN_CNT_2_H"}, - {true, "TX_PACKET_TC_IN_CNT_3_L"}, - {true, "TX_PACKET_TC_IN_CNT_3_H"}, - {true, "TX_PACKET_TC_IN_CNT_4_L"}, - {true, "TX_PACKET_TC_IN_CNT_4_H"}, - - {true, "TX_PACKET_TC_IN_CNT_5_L"}, - {true, "TX_PACKET_TC_IN_CNT_5_H"}, - {true, "TX_PACKET_TC_IN_CNT_6_L"}, - {true, "TX_PACKET_TC_IN_CNT_6_H"}, - {true, "TX_PACKET_TC_IN_CNT_7_L"}, - {true, "TX_PACKET_TC_IN_CNT_7_H"}, - - {true, "TX_PACKET_TC_OUT_CNT_0_L"}, - {true, "TX_PACKET_TC_OUT_CNT_0_H"}, - {true, "TX_PACKET_TC_OUT_CNT_1_L"}, - {true, "TX_PACKET_TC_OUT_CNT_1_H"}, - {true, "TX_PACKET_TC_OUT_CNT_2_L"}, - {true, "TX_PACKET_TC_OUT_CNT_2_H"}, - - {true, "TX_PACKET_TC_OUT_CNT_3_L"}, - {true, "TX_PACKET_TC_OUT_CNT_3_H"}, - {true, "TX_PACKET_TC_OUT_CNT_4_L"}, - {true, "TX_PACKET_TC_OUT_CNT_4_H"}, - {true, "TX_PACKET_TC_OUT_CNT_5_L"}, - {true, "TX_PACKET_TC_OUT_CNT_5_H"}, - - {true, "TX_PACKET_TC_OUT_CNT_6_L"}, - {true, "TX_PACKET_TC_OUT_CNT_6_H"}, - {true, "TX_PACKET_TC_OUT_CNT_7_L"}, - {true, "TX_PACKET_TC_OUT_CNT_7_H"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { - {true, "OQ_INDEX"}, - {true, "QUEUE_CNT"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { - {true, "prt_id"}, - {true, "IGU_RX_ERR_PKT"}, - {true, "IGU_RX_NO_SOF_PKT"}, - {true, "EGU_TX_1588_SHORT_PKT"}, - {true, "EGU_TX_1588_PKT"}, - {true, "EGU_TX_ERR_PKT"}, - - {true, "IGU_RX_OUT_L2_PKT"}, - {true, "IGU_RX_OUT_L3_PKT"}, - {true, "IGU_RX_OUT_L4_PKT"}, - {true, "IGU_RX_IN_L2_PKT"}, - {true, "IGU_RX_IN_L3_PKT"}, - {true, "IGU_RX_IN_L4_PKT"}, - - {true, "IGU_RX_EL3E_PKT"}, - {true, "IGU_RX_EL4E_PKT"}, - {true, "IGU_RX_L3E_PKT"}, - {true, "IGU_RX_L4E_PKT"}, - {true, "IGU_RX_ROCEE_PKT"}, - {true, "IGU_RX_OUT_UDP0_PKT"}, - - {true, "IGU_RX_IN_UDP0_PKT"}, - {true, "IGU_MC_CAR_DROP_PKT_L"}, - {true, "IGU_MC_CAR_DROP_PKT_H"}, - {true, "IGU_BC_CAR_DROP_PKT_L"}, - {true, "IGU_BC_CAR_DROP_PKT_H"}, - {false, "Reserved"}, - - {true, "IGU_RX_OVERSIZE_PKT_L"}, - {true, "IGU_RX_OVERSIZE_PKT_H"}, - {true, "IGU_RX_UNDERSIZE_PKT_L"}, - {true, "IGU_RX_UNDERSIZE_PKT_H"}, - {true, "IGU_RX_OUT_ALL_PKT_L"}, - {true, "IGU_RX_OUT_ALL_PKT_H"}, - - {true, "IGU_TX_OUT_ALL_PKT_L"}, - {true, "IGU_TX_OUT_ALL_PKT_H"}, - {true, "IGU_RX_UNI_PKT_L"}, - {true, "IGU_RX_UNI_PKT_H"}, - {true, "IGU_RX_MULTI_PKT_L"}, - {true, "IGU_RX_MULTI_PKT_H"}, - - {true, "IGU_RX_BROAD_PKT_L"}, - {true, "IGU_RX_BROAD_PKT_H"}, - {true, "EGU_TX_OUT_ALL_PKT_L"}, - {true, "EGU_TX_OUT_ALL_PKT_H"}, - {true, "EGU_TX_UNI_PKT_L"}, - {true, "EGU_TX_UNI_PKT_H"}, - - {true, "EGU_TX_MULTI_PKT_L"}, - {true, "EGU_TX_MULTI_PKT_H"}, - {true, "EGU_TX_BROAD_PKT_L"}, - {true, "EGU_TX_BROAD_PKT_H"}, - {true, "IGU_TX_KEY_NUM_L"}, - {true, "IGU_TX_KEY_NUM_H"}, - - {true, "IGU_RX_NON_TUN_PKT_L"}, - {true, "IGU_RX_NON_TUN_PKT_H"}, - {true, "IGU_RX_TUN_PKT_L"}, - {true, "IGU_RX_TUN_PKT_H"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { - {true, "tc_queue_num"}, - {true, "FSM_DFX_ST0"}, - {true, "FSM_DFX_ST1"}, - {true, "RPU_RX_PKT_DROP_CNT"}, - {true, "BUF_WAIT_TIMEOUT"}, - {true, "BUF_WAIT_TIMEOUT_QID"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { - {false, "Reserved"}, - {true, "FIFO_DFX_ST0"}, - {true, "FIFO_DFX_ST1"}, - {true, "FIFO_DFX_ST2"}, - {true, "FIFO_DFX_ST3"}, - {true, "FIFO_DFX_ST4"}, - - {true, "FIFO_DFX_ST5"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { - {false, "Reserved"}, - {true, "NCSI_EGU_TX_FIFO_STS"}, - {true, "NCSI_PAUSE_STATUS"}, - {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, - {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, - {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, - - {true, "NCSI_RX_CTRL_PKT_CNT"}, - {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, - {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, - {true, "NCSI_RX_PT_PKT_CNT"}, - {true, "NCSI_RX_FCS_ERR_CNT"}, - {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, - - {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, - {true, "NCSI_TX_CTRL_PKT_CNT"}, - {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, - {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, - {true, "NCSI_TX_PT_PKT_CNT"}, - {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, - - {true, "NCSI_TX_PT_PKT_ERR_CNT"}, - {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, - {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, - {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, - {false, "Reserved"}, - {false, "Reserved"}, - - {true, "NCSI_MAC_RX_OCTETS_OK"}, - {true, "NCSI_MAC_RX_OCTETS_BAD"}, - {true, "NCSI_MAC_RX_UC_PKTS"}, - {true, "NCSI_MAC_RX_MC_PKTS"}, - {true, "NCSI_MAC_RX_BC_PKTS"}, - {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, - - {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, - {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, - {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, - {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, - {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, - {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, - - {true, "NCSI_MAC_RX_FCS_ERRORS"}, - {true, "NCSI_MAC_RX_LONG_ERRORS"}, - {true, "NCSI_MAC_RX_JABBER_ERRORS"}, - {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, - {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, - {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, - - {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, - {true, "NCSI_MAC_TX_OCTETS_OK"}, - {true, "NCSI_MAC_TX_OCTETS_BAD"}, - {true, "NCSI_MAC_TX_UC_PKTS"}, - {true, "NCSI_MAC_TX_MC_PKTS"}, - {true, "NCSI_MAC_TX_BC_PKTS"}, - - {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, - {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, - {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, - {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, - {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, - {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, - - {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, - {true, "NCSI_MAC_TX_UNDERRUN"}, - {true, "NCSI_MAC_TX_CRC_ERROR"}, - {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, - {true, "NCSI_MAC_RX_PAD_PKTS"}, - {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { - {false, "Reserved"}, - {true, "LGE_IGU_AFIFO_DFX_0"}, - {true, "LGE_IGU_AFIFO_DFX_1"}, - {true, "LGE_IGU_AFIFO_DFX_2"}, - {true, "LGE_IGU_AFIFO_DFX_3"}, - {true, "LGE_IGU_AFIFO_DFX_4"}, - - {true, "LGE_IGU_AFIFO_DFX_5"}, - {true, "LGE_IGU_AFIFO_DFX_6"}, - {true, "LGE_IGU_AFIFO_DFX_7"}, - {true, "LGE_EGU_AFIFO_DFX_0"}, - {true, "LGE_EGU_AFIFO_DFX_1"}, - {true, "LGE_EGU_AFIFO_DFX_2"}, - - {true, "LGE_EGU_AFIFO_DFX_3"}, - {true, "LGE_EGU_AFIFO_DFX_4"}, - {true, "LGE_EGU_AFIFO_DFX_5"}, - {true, "LGE_EGU_AFIFO_DFX_6"}, - {true, "LGE_EGU_AFIFO_DFX_7"}, - {true, "CGE_IGU_AFIFO_DFX_0"}, - - {true, "CGE_IGU_AFIFO_DFX_1"}, - {true, "CGE_EGU_AFIFO_DFX_0"}, - {true, "CGE_EGU_AFIFO_DFX_1"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { - {false, "Reserved"}, - {true, "DROP_FROM_PRT_PKT_CNT"}, - {true, "DROP_FROM_HOST_PKT_CNT"}, - {true, "DROP_TX_VLAN_PROC_CNT"}, - {true, "DROP_MNG_CNT"}, - {true, "DROP_FD_CNT"}, - - {true, "DROP_NO_DST_CNT"}, - {true, "DROP_MC_MBID_FULL_CNT"}, - {true, "DROP_SC_FILTERED"}, - {true, "PPP_MC_DROP_PKT_CNT"}, - {true, "DROP_PT_CNT"}, - {true, "DROP_MAC_ANTI_SPOOF_CNT"}, - - {true, "DROP_IG_VFV_CNT"}, - {true, "DROP_IG_PRTV_CNT"}, - {true, "DROP_CNM_PFC_PAUSE_CNT"}, - {true, "DROP_TORUS_TC_CNT"}, - {true, "DROP_TORUS_LPBK_CNT"}, - {true, "PPP_HFS_STS"}, - - {true, "PPP_MC_RSLT_STS"}, - {true, "PPP_P3U_STS"}, - {true, "PPP_RSLT_DESCR_STS"}, - {true, "PPP_UMV_STS_0"}, - {true, "PPP_UMV_STS_1"}, - {true, "PPP_VFV_STS"}, - - {true, "PPP_GRO_KEY_CNT"}, - {true, "PPP_GRO_INFO_CNT"}, - {true, "PPP_GRO_DROP_CNT"}, - {true, "PPP_GRO_OUT_CNT"}, - {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, - {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, - - {true, "PPP_GRO_INFO_MATCH_CNT"}, - {true, "PPP_GRO_FREE_ENTRY_CNT"}, - {true, "PPP_GRO_INNER_DFX_SIGNAL"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - - {true, "GET_RX_PKT_CNT_L"}, - {true, "GET_RX_PKT_CNT_H"}, - {true, "GET_TX_PKT_CNT_L"}, - {true, "GET_TX_PKT_CNT_H"}, - {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, - {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, - - {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, - {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, - {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, - {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, - {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, - {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, - - {true, "SEND_MC_FROM_PRT_CNT_L"}, - {true, "SEND_MC_FROM_PRT_CNT_H"}, - {true, "SEND_MC_FROM_HOST_CNT_L"}, - {true, "SEND_MC_FROM_HOST_CNT_H"}, - {true, "SSU_MC_RD_CNT_L"}, - {true, "SSU_MC_RD_CNT_H"}, - - {true, "SSU_MC_DROP_CNT_L"}, - {true, "SSU_MC_DROP_CNT_H"}, - {true, "SSU_MC_RD_PKT_CNT_L"}, - {true, "SSU_MC_RD_PKT_CNT_H"}, - {true, "PPP_MC_2HOST_PKT_CNT_L"}, - {true, "PPP_MC_2HOST_PKT_CNT_H"}, - - {true, "PPP_MC_2PRT_PKT_CNT_L"}, - {true, "PPP_MC_2PRT_PKT_CNT_H"}, - {true, "NTSNOS_PKT_CNT_L"}, - {true, "NTSNOS_PKT_CNT_H"}, - {true, "NTUP_PKT_CNT_L"}, - {true, "NTUP_PKT_CNT_H"}, - - {true, "NTLCL_PKT_CNT_L"}, - {true, "NTLCL_PKT_CNT_H"}, - {true, "NTTGT_PKT_CNT_L"}, - {true, "NTTGT_PKT_CNT_H"}, - {true, "RTNS_PKT_CNT_L"}, - {true, "RTNS_PKT_CNT_H"}, - - {true, "RTLPBK_PKT_CNT_L"}, - {true, "RTLPBK_PKT_CNT_H"}, - {true, "NR_PKT_CNT_L"}, - {true, "NR_PKT_CNT_H"}, - {true, "RR_PKT_CNT_L"}, - {true, "RR_PKT_CNT_H"}, - - {true, "MNG_TBL_HIT_CNT_L"}, - {true, "MNG_TBL_HIT_CNT_H"}, - {true, "FD_TBL_HIT_CNT_L"}, - {true, "FD_TBL_HIT_CNT_H"}, - {true, "FD_LKUP_CNT_L"}, - {true, "FD_LKUP_CNT_H"}, - - {true, "BC_HIT_CNT_L"}, - {true, "BC_HIT_CNT_H"}, - {true, "UM_TBL_UC_HIT_CNT_L"}, - {true, "UM_TBL_UC_HIT_CNT_H"}, - {true, "UM_TBL_MC_HIT_CNT_L"}, - {true, "UM_TBL_MC_HIT_CNT_H"}, - - {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, - {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, - {true, "MTA_TBL_HIT_CNT_L"}, - {true, "MTA_TBL_HIT_CNT_H"}, - {true, "FWD_BONDING_HIT_CNT_L"}, - {true, "FWD_BONDING_HIT_CNT_H"}, - - {true, "PROMIS_TBL_HIT_CNT_L"}, - {true, "PROMIS_TBL_HIT_CNT_H"}, - {true, "GET_TUNL_PKT_CNT_L"}, - {true, "GET_TUNL_PKT_CNT_H"}, - {true, "GET_BMC_PKT_CNT_L"}, - {true, "GET_BMC_PKT_CNT_H"}, - - {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, - {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, - {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, - {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, - {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, - {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, - - {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, - {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, - {true, "PPP_MC_2BMC_PKT_CNT_L"}, - {true, "PPP_MC_2BMC_PKT_CNT_H"}, - {true, "VLAN_MIRR_CNT_L"}, - {true, "VLAN_MIRR_CNT_H"}, - - {true, "IG_MIRR_CNT_L"}, - {true, "IG_MIRR_CNT_H"}, - {true, "EG_MIRR_CNT_L"}, - {true, "EG_MIRR_CNT_H"}, - {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, - {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, - - {true, "LAN_PAIR_CNT_L"}, - {true, "LAN_PAIR_CNT_H"}, - {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, - {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, - {true, "MTA_TBL_HIT_PKT_CNT_L"}, - {true, "MTA_TBL_HIT_PKT_CNT_H"}, - - {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, - {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { - {false, "Reserved"}, - {true, "FSM_DFX_ST0"}, - {true, "FSM_DFX_ST1"}, - {true, "FSM_DFX_ST2"}, - {true, "FIFO_DFX_ST0"}, - {true, "FIFO_DFX_ST1"}, - - {true, "FIFO_DFX_ST2"}, - {true, "FIFO_DFX_ST3"}, - {true, "FIFO_DFX_ST4"}, - {true, "FIFO_DFX_ST5"}, - {true, "FIFO_DFX_ST6"}, - {true, "FIFO_DFX_ST7"}, - - {true, "FIFO_DFX_ST8"}, - {true, "FIFO_DFX_ST9"}, - {true, "FIFO_DFX_ST10"}, - {true, "FIFO_DFX_ST11"}, - {true, "Q_CREDIT_VLD_0"}, - {true, "Q_CREDIT_VLD_1"}, - - {true, "Q_CREDIT_VLD_2"}, - {true, "Q_CREDIT_VLD_3"}, - {true, "Q_CREDIT_VLD_4"}, - {true, "Q_CREDIT_VLD_5"}, - {true, "Q_CREDIT_VLD_6"}, - {true, "Q_CREDIT_VLD_7"}, - - {true, "Q_CREDIT_VLD_8"}, - {true, "Q_CREDIT_VLD_9"}, - {true, "Q_CREDIT_VLD_10"}, - {true, "Q_CREDIT_VLD_11"}, - {true, "Q_CREDIT_VLD_12"}, - {true, "Q_CREDIT_VLD_13"}, - - {true, "Q_CREDIT_VLD_14"}, - {true, "Q_CREDIT_VLD_15"}, - {true, "Q_CREDIT_VLD_16"}, - {true, "Q_CREDIT_VLD_17"}, - {true, "Q_CREDIT_VLD_18"}, - {true, "Q_CREDIT_VLD_19"}, - - {true, "Q_CREDIT_VLD_20"}, - {true, "Q_CREDIT_VLD_21"}, - {true, "Q_CREDIT_VLD_22"}, - {true, "Q_CREDIT_VLD_23"}, - {true, "Q_CREDIT_VLD_24"}, - {true, "Q_CREDIT_VLD_25"}, - - {true, "Q_CREDIT_VLD_26"}, - {true, "Q_CREDIT_VLD_27"}, - {true, "Q_CREDIT_VLD_28"}, - {true, "Q_CREDIT_VLD_29"}, - {true, "Q_CREDIT_VLD_30"}, - {true, "Q_CREDIT_VLD_31"}, - - {true, "GRO_BD_SERR_CNT"}, - {true, "GRO_CONTEXT_SERR_CNT"}, - {true, "RX_STASH_CFG_SERR_CNT"}, - {true, "AXI_RD_FBD_SERR_CNT"}, - {true, "GRO_BD_MERR_CNT"}, - {true, "GRO_CONTEXT_MERR_CNT"}, - - {true, "RX_STASH_CFG_MERR_CNT"}, - {true, "AXI_RD_FBD_MERR_CNT"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, -}; - -static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { - {true, "q_num"}, - {true, "RCB_CFG_RX_RING_TAIL"}, - {true, "RCB_CFG_RX_RING_HEAD"}, - {true, "RCB_CFG_RX_RING_FBDNUM"}, - {true, "RCB_CFG_RX_RING_OFFSET"}, - {true, "RCB_CFG_RX_RING_FBDOFFSET"}, - - {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, - {true, "RCB_CFG_TX_RING_TAIL"}, - {true, "RCB_CFG_TX_RING_HEAD"}, - {true, "RCB_CFG_TX_RING_FBDNUM"}, - {true, "RCB_CFG_TX_RING_OFFSET"}, - {true, "RCB_CFG_TX_RING_EBDNUM"}, -}; - #define HCLGE_DBG_INFO_LEN 256 #define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256 #define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512 @@ -771,4 +131,7 @@ struct hclge_dbg_vlan_cfg { u8 pri_only2; }; +int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src, + int index, int bd_num, enum hclge_opcode_type cmd); + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c index 9a939c0b217f..a1571c108678 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -5,6 +5,34 @@ #include "hclge_devlink.h" +static int hclge_devlink_scc_info_get(struct devlink *devlink, + struct devlink_info_req *req) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char scc_version[HCLGE_DEVLINK_FW_SCC_LEN]; + struct hclge_dev *hdev = priv->hdev; + u32 scc_version_tmp; + int ret; + + ret = hclge_query_scc_version(hdev, &scc_version_tmp); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get scc version, ret = %d\n", ret); + return ret; + } + + snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu", + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + return devlink_info_version_running_put(req, "fw.scc", scc_version); +} + static int hclge_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) @@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink, struct hclge_devlink_priv *priv = devlink_priv(devlink); char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; struct hclge_dev *hdev = priv->hdev; + int ret; snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, @@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, HNAE3_FW_VERSION_BYTE0_SHIFT)); - return devlink_info_version_running_put(req, - DEVLINK_INFO_VERSION_GENERIC_FW, - version_str); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to set running version of fw\n"); + return ret; + } + + if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2) + ret = hclge_devlink_scc_info_get(devlink, req); + + return ret; } static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h index 918be04507a5..148effa5ea89 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -6,6 +6,8 @@ #include "hclge_main.h" +#define HCLGE_DEVLINK_FW_SCC_LEN 32 + struct hclge_devlink_priv { struct hclge_dev *hdev; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index d63e114f93d0..e132c2f09560 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1198,6 +1198,425 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { } }; +static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = { + { + .reg_name = "SSU_BP_STATUS_0~5", + .reg_offset_group = { 5, 6, 7, 8, 9, 10}, + .group_size = 6 + }, { + .reg_name = "LO_PRI_UNICAST_CUR_CNT", + .reg_offset_group = {54}, + .group_size = 1 + }, { + .reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT", + .reg_offset_group = {55, 56}, + .group_size = 2 + }, { + .reg_name = "SSU_MB_RD_RLT_DROP_CNT", + .reg_offset_group = {29}, + .group_size = 1 + }, { + .reg_name = "SSU_PPP_MAC_KEY_NUM", + .reg_offset_group = {31, 30}, + .group_size = 2 + }, { + .reg_name = "SSU_PPP_HOST_KEY_NUM", + .reg_offset_group = {33, 32}, + .group_size = 2 + }, { + .reg_name = "PPP_SSU_MAC/HOST_RLT_NUM", + .reg_offset_group = {35, 34, 37, 36}, + .group_size = 4 + }, { + .reg_name = "FULL/PART_DROP_NUM", + .reg_offset_group = {18, 19}, + .group_size = 2 + }, { + .reg_name = "PPP_KEY/RLT_DROP_NUM", + .reg_offset_group = {20, 21}, + .group_size = 2 + }, { + .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT", + .reg_offset_group = {48, 49}, + .group_size = 2 + }, { + .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX", + .reg_offset_group = {50, 51}, + .group_size = 2 + }, +}; + +static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = { + { + .reg_name = "RX_PACKET_IN/OUT_CNT", + .reg_offset_group = {13, 12, 15, 14}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_IN/OUT_CNT", + .reg_offset_group = {17, 16, 19, 18}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC0_IN/OUT_CNT", + .reg_offset_group = {25, 24, 41, 40}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC1_IN/OUT_CNT", + .reg_offset_group = {27, 26, 43, 42}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC2_IN/OUT_CNT", + .reg_offset_group = {29, 28, 45, 44}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC3_IN/OUT_CNT", + .reg_offset_group = {31, 30, 47, 46}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC4_IN/OUT_CNT", + .reg_offset_group = {33, 32, 49, 48}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC5_IN/OUT_CNT", + .reg_offset_group = {35, 34, 51, 50}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC6_IN/OUT_CNT", + .reg_offset_group = {37, 36, 53, 52}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC7_IN/OUT_CNT", + .reg_offset_group = {39, 38, 55, 54}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC0_IN/OUT_CNT", + .reg_offset_group = {57, 56, 73, 72}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC1_IN/OUT_CNT", + .reg_offset_group = {59, 58, 75, 74}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC2_IN/OUT_CNT", + .reg_offset_group = {61, 60, 77, 76}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC3_IN/OUT_CNT", + .reg_offset_group = {63, 62, 79, 78}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC4_IN/OUT_CNT", + .reg_offset_group = {65, 64, 81, 80}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC5_IN/OUT_CNT", + .reg_offset_group = {67, 66, 83, 82}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC6_IN/OUT_CNT", + .reg_offset_group = {69, 68, 85, 84}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC7_IN/OUT_CNT", + .reg_offset_group = {71, 70, 87, 86}, + .group_size = 4 + }, { + .reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT", + .reg_offset_group = {1, 2, 3, 4}, + .group_size = 4 + }, { + .reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT", + .reg_offset_group = {5, 6, 7, 8}, + .group_size = 4 + }, { + .reg_name = "ROC_RX_PACKET_IN_CNT", + .reg_offset_group = {21, 20}, + .group_size = 2 + }, { + .reg_name = "ROC_TX_PACKET_OUT_CNT", + .reg_offset_group = {23, 22}, + .group_size = 2 + } +}; + +static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = { + { + .reg_name = "RPU_FSM_DFX_ST0/ST1_TNL", + .has_suffix = true, + .reg_offset_group = {1, 2}, + .group_size = 2 + }, { + .reg_name = "RPU_RX_PKT_DROP_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {3}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = { + { + .reg_name = "FIFO_DFX_ST0_1_2_4", + .reg_offset_group = {1, 2, 3, 5}, + .group_size = 4 + } +}; + +static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = { + { + .reg_name = "IGU_RX_ERR_PKT", + .reg_offset_group = {1}, + .group_size = 1 + }, { + .reg_name = "IGU_RX_OUT_ALL_PKT", + .reg_offset_group = {29, 28}, + .group_size = 2 + }, { + .reg_name = "EGU_TX_OUT_ALL_PKT", + .reg_offset_group = {39, 38}, + .group_size = 2 + }, { + .reg_name = "EGU_TX_ERR_PKT", + .reg_offset_group = {5}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = { + { + .reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {1}, + .group_size = 1 + }, { + .reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {12}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_gen_reg_info[] = { + { + .reg_name = "SSU_OVERSIZE_DROP_CNT", + .reg_offset_group = {12}, + .group_size = 1 + }, { + .reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM", + .reg_offset_group = {13}, + .group_size = 1 + }, { + .reg_name = "RX_PKT_IN/OUT_ERR_CNT", + .reg_offset_group = {15, 14, 19, 18}, + .group_size = 4 + }, { + .reg_name = "TX_PKT_IN/OUT_ERR_CNT", + .reg_offset_group = {17, 16, 21, 20}, + .group_size = 4 + }, { + .reg_name = "ETS_TC_READY", + .reg_offset_group = {22}, + .group_size = 1 + }, { + .reg_name = "MIB_TX/RX_BAD_PKTS", + .reg_offset_group = {19, 18, 29, 28}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_GOOD_PKTS", + .reg_offset_group = {21, 20, 31, 30}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_TOTAL_PKTS", + .reg_offset_group = {23, 22, 33, 32}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_PAUSE_PKTS", + .reg_offset_group = {25, 24, 35, 34}, + .group_size = 4 + }, { + .reg_name = "MIB_TX_ERR_ALL_PKTS", + .reg_offset_group = {27, 26}, + .group_size = 2 + }, { + .reg_name = "MIB_RX_FCS_ERR_PKTS", + .reg_offset_group = {37, 36}, + .group_size = 2 + }, { + .reg_name = "IGU_EGU_AUTO_GATE_EN", + .reg_offset_group = {42}, + .group_size = 1 + }, { + .reg_name = "IGU_EGU_INT_SRC", + .reg_offset_group = {43}, + .group_size = 1 + }, { + .reg_name = "EGU_READY_NUM_CFG", + .reg_offset_group = {44}, + .group_size = 1 + }, { + .reg_name = "IGU_EGU_TNL_DFX", + .reg_offset_group = {45}, + .group_size = 1 + }, { + .reg_name = "TX_TNL_NOTE_PKT", + .reg_offset_group = {46}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = { + { + .cmd = HCLGE_OPC_DFX_SSU_REG_0, + .result_regs = hclge_ssu_reg_0_info, + .bd_num = HCLGE_BD_NUM_SSU_REG_0, + .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info) + }, { + .cmd = HCLGE_OPC_DFX_SSU_REG_1, + .result_regs = hclge_ssu_reg_1_info, + .bd_num = HCLGE_BD_NUM_SSU_REG_1, + .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info) + }, { + .cmd = HCLGE_OPC_DFX_RPU_REG_0, + .result_regs = hclge_rpu_reg_0_info, + .bd_num = HCLGE_BD_NUM_RPU_REG_0, + .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info), + .need_para = true + }, { + .cmd = HCLGE_OPC_DFX_RPU_REG_1, + .result_regs = hclge_rpu_reg_1_info, + .bd_num = HCLGE_BD_NUM_RPU_REG_1, + .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info) + }, { + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG, + .result_regs = hclge_igu_egu_reg_info, + .bd_num = HCLGE_BD_NUM_IGU_EGU_REG, + .result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info) + }, { + .cmd = HCLGE_OPC_DFX_GEN_REG, + .result_regs = hclge_gen_reg_info_tnl, + .bd_num = HCLGE_BD_NUM_GEN_REG, + .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl), + .need_para = true + }, { + .cmd = HCLGE_OPC_DFX_GEN_REG, + .result_regs = hclge_gen_reg_info, + .bd_num = HCLGE_BD_NUM_GEN_REG, + .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info) + } +}; + +static int +hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc, + const struct hclge_mod_reg_info *reg_info, int size) +{ + int i, j, pos, actual_len; + u8 offset, bd_idx, index; + char *buf; + + buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < size; i++) { + actual_len = strlen(reg_info[i].reg_name) + + HCLGE_MOD_REG_EXTRA_LEN + + HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size; + if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) { + dev_info(dev, "length of reg(%s) is invalid, len=%d\n", + reg_info[i].reg_name, actual_len); + continue; + } + + pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s", + reg_info[i].reg_name); + if (reg_info[i].has_suffix) + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u", + le32_to_cpu(desc->data[0])); + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, + ":"); + for (j = 0; j < reg_info[i].group_size; j++) { + offset = reg_info[i].reg_offset_group[j]; + index = offset % HCLGE_DESC_DATA_LEN; + bd_idx = offset / HCLGE_DESC_DATA_LEN; + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, + " %08x", + le32_to_cpu(desc[bd_idx].data[index])); + } + dev_info(dev, "%s\n", buf); + } + + kfree(buf); + return 0; +} + +static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode, + struct hclge_dev *hdev) +{ + if (opcode == HCLGE_OPC_DFX_GEN_REG && + !hnae3_ae_dev_gen_reg_dfx_supported(hdev)) + return false; + return true; +} + +/* For each common msg, send cmdq to IMP and print result reg info. + * If there is a parameter, loop it and request. + */ +static void +hclge_query_reg_info(struct hclge_dev *hdev, + struct hclge_mod_reg_common_msg *msg, u32 loop_time, + u32 *loop_para) +{ + int desc_len, i, ret; + + desc_len = msg->bd_num * sizeof(struct hclge_desc); + msg->desc = kzalloc(desc_len, GFP_KERNEL); + if (!msg->desc) { + dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d", + -ENOMEM); + return; + } + + for (i = 0; i < loop_time; i++) { + ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para, + msg->bd_num, msg->cmd); + loop_para++; + if (ret) + continue; + ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc, + msg->result_regs, + msg->result_regs_size); + if (ret) + dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n", + ret); + } + + kfree(msg->desc); +} + +static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev) +{ + u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0}; + struct hclge_mod_reg_common_msg msg; + u8 i, j, num; + u32 loop_time; + + num = ARRAY_SIZE(hclge_ssu_reg_common_msg); + for (i = 0; i < num; i++) { + msg = hclge_ssu_reg_common_msg[i]; + if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev)) + continue; + loop_time = 1; + loop_para[0] = 0; + if (msg.need_para) { + loop_time = hdev->ae_dev->dev_specs.tnl_num; + for (j = 0; j < loop_time; j++) + loop_para[j] = j + 1; + } + hclge_query_reg_info(hdev, &msg, loop_time, loop_para); + } +} + static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { { .module_id = MODULE_NONE, @@ -1210,7 +1629,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { .msg = "MODULE_GE" }, { .module_id = MODULE_IGU_EGU, - .msg = "MODULE_IGU_EGU" + .msg = "MODULE_IGU_EGU", + .query_reg_info = hclge_query_reg_info_of_ssu }, { .module_id = MODULE_LGE, .msg = "MODULE_LGE" @@ -1231,7 +1651,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { .msg = "MODULE_RTC" }, { .module_id = MODULE_SSU, - .msg = "MODULE_SSU" + .msg = "MODULE_SSU", + .query_reg_info = hclge_query_reg_info_of_ssu }, { .module_id = MODULE_TM, .msg = "MODULE_TM" @@ -2762,7 +3183,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev) } static bool -hclge_handle_error_type_reg_log(struct device *dev, +hclge_handle_error_type_reg_log(struct hclge_dev *hdev, struct hclge_mod_err_info *mod_info, struct hclge_type_reg_err_info *type_reg_info) { @@ -2770,6 +3191,7 @@ hclge_handle_error_type_reg_log(struct device *dev, #define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7 u8 mod_id, total_module, type_id, total_type, i, is_ras; + struct device *dev = &hdev->pdev->dev; u8 index_module = MODULE_NONE; u8 index_type = NONE_ERROR; bool cause_by_vf = false; @@ -2810,6 +3232,9 @@ hclge_handle_error_type_reg_log(struct device *dev, for (i = 0; i < type_reg_info->reg_num; i++) dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); + if (hclge_hw_module_id_st[index_module].query_reg_info) + hclge_hw_module_id_st[index_module].query_reg_info(hdev); + return cause_by_vf; } @@ -2850,7 +3275,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, type_reg_info = (struct hclge_type_reg_err_info *) &buf[offset++]; - if (hclge_handle_error_type_reg_log(dev, mod_info, + if (hclge_handle_error_type_reg_log(hdev, mod_info, type_reg_info)) cause_by_vf = true; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 68b738affa66..45a783a50643 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -5,6 +5,7 @@ #define __HCLGE_ERR_H #include "hclge_main.h" +#include "hclge_debugfs.h" #include "hnae3.h" #define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 @@ -115,6 +116,18 @@ #define HCLGE_REG_NUM_MAX 256 #define HCLGE_DESC_NO_DATA_LEN 8 +#define HCLGE_BD_NUM_SSU_REG_0 10 +#define HCLGE_BD_NUM_SSU_REG_1 15 +#define HCLGE_BD_NUM_RPU_REG_0 1 +#define HCLGE_BD_NUM_RPU_REG_1 2 +#define HCLGE_BD_NUM_IGU_EGU_REG 9 +#define HCLGE_BD_NUM_GEN_REG 8 +#define HCLGE_MOD_REG_INFO_LEN_MAX 256 +#define HCLGE_MOD_REG_EXTRA_LEN 11 +#define HCLGE_MOD_REG_VALUE_LEN 9 +#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6 +#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8 + enum hclge_err_int_type { HCLGE_ERR_INT_MSIX = 0, HCLGE_ERR_INT_RAS_CE = 1, @@ -191,6 +204,7 @@ struct hclge_hw_error { struct hclge_hw_module_id { enum hclge_mod_name_list module_id; const char *msg; + void (*query_reg_info)(struct hclge_dev *hdev); }; struct hclge_hw_type_id { @@ -218,6 +232,28 @@ struct hclge_type_reg_err_info { u32 hclge_reg[HCLGE_REG_NUM_MAX]; }; +struct hclge_mod_reg_info { + const char *reg_name; + bool has_suffix; /* add suffix for register name */ + /* the positions of reg values in hclge_desc.data */ + u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE]; + u8 group_size; +}; + +/* This structure defines cmdq used to query the hardware module debug + * regisgers. + */ +struct hclge_mod_reg_common_msg { + enum hclge_opcode_type cmd; + struct hclge_desc *desc; + u8 bd_num; /* the bd number of hclge_desc used */ + bool need_para; /* whether this cmdq needs to add para */ + + /* the regs need to print */ + const struct hclge_mod_reg_info *result_regs; + u16 result_regs_size; +}; + int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state); int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ff6a2ed23ddb..43cc6ee4d87d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -27,6 +27,8 @@ #include "hclge_devlink.h" #include "hclge_comm_cmd.h" +#include "hclge_trace.h" + #define HCLGE_NAME "hclge" #define HCLGE_BUF_SIZE_UNIT 256U @@ -391,6 +393,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) return hclge_comm_cmd_send(&hw->hw, desc, num); } +static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + trace_hclge_pf_cmd_send(hw, desc, 0, num); + + if (!is_special) { + for (i = 1; i < num; i++) + trace_hclge_pf_cmd_send(hw, &desc[i], i, num); + } else { + for (i = 1; i < num; i++) + trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i], + i, num); + } +} + +static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + return; + + trace_hclge_pf_cmd_get(hw, desc, 0, num); + + if (!is_special) { + for (i = 1; i < num; i++) + trace_hclge_pf_cmd_get(hw, &desc[i], i, num); + } else { + for (i = 1; i < num; i++) + trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i], + i, num); + } +} + +static const struct hclge_comm_cmq_ops hclge_cmq_ops = { + .trace_cmd_send = hclge_trace_cmd_send, + .trace_cmd_get = hclge_trace_cmd_get, +}; + static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -1537,6 +1581,9 @@ static int hclge_configure(struct hclge_dev *hdev) cfg.default_speed, ret); return ret; } + hdev->hw.mac.req_speed = hdev->hw.mac.speed; + hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; + hdev->hw.mac.req_duplex = DUPLEX_FULL; hclge_parse_link_mode(hdev, cfg.speed_ability); @@ -1766,7 +1813,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; - nic->numa_node_mask = hdev->numa_node_mask; + bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, + MAX_NUMNODES); nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclge_knic_setup(vport, num_tqps, @@ -2458,7 +2506,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; - roce->numa_node_mask = nic->numa_node_mask; + bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, + MAX_NUMNODES); return 0; } @@ -3342,9 +3391,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle, return ret; } - hdev->hw.mac.autoneg = cmd->base.autoneg; - hdev->hw.mac.speed = cmd->base.speed; - hdev->hw.mac.duplex = cmd->base.duplex; + hdev->hw.mac.req_autoneg = cmd->base.autoneg; + hdev->hw.mac.req_speed = cmd->base.speed; + hdev->hw.mac.req_duplex = cmd->base.duplex; linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); return 0; @@ -3377,9 +3426,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) if (!hnae3_dev_phy_imp_supported(hdev)) return 0; - cmd.base.autoneg = hdev->hw.mac.autoneg; - cmd.base.speed = hdev->hw.mac.speed; - cmd.base.duplex = hdev->hw.mac.duplex; + cmd.base.autoneg = hdev->hw.mac.req_autoneg; + cmd.base.speed = hdev->hw.mac.req_speed; + cmd.base.duplex = hdev->hw.mac.req_duplex; linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); @@ -7178,8 +7227,9 @@ static void hclge_get_cls_key_vlan(const struct flow_rule *flow, } } -static void hclge_get_cls_key_ip(const struct flow_rule *flow, - struct hclge_fd_rule *rule) +static int hclge_get_cls_key_ip(const struct flow_rule *flow, + struct hclge_fd_rule *rule, + struct netlink_ext_ack *extack) { u16 addr_type = 0; @@ -7188,6 +7238,9 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow, flow_rule_match_control(flow, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -7216,6 +7269,8 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow, rule->unused_tuple |= BIT(INNER_SRC_IP); rule->unused_tuple |= BIT(INNER_DST_IP); } + + return 0; } static void hclge_get_cls_key_port(const struct flow_rule *flow, @@ -7241,7 +7296,9 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev, struct hclge_fd_rule *rule) { struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); + struct netlink_ext_ack *extack = cls_flower->common.extack; struct flow_dissector *dissector = flow->match.dissector; + int ret; if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | @@ -7259,7 +7316,11 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev, hclge_get_cls_key_basic(flow, rule); hclge_get_cls_key_mac(flow, rule); hclge_get_cls_key_vlan(flow, rule); - hclge_get_cls_key_ip(flow, rule); + + ret = hclge_get_cls_key_ip(flow, rule, extack); + if (ret) + return ret; + hclge_get_cls_key_port(flow, rule); return 0; @@ -7952,8 +8013,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) /* Set the DOWN flag here to disable link updating */ set_bit(HCLGE_STATE_DOWN, &hdev->state); - /* flush memory to make sure DOWN is seen by service task */ - smp_mb__before_atomic(); + smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ hclge_flush_link_update(hdev); } } @@ -9906,6 +9966,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) static int hclge_init_vlan_filter(struct hclge_dev *hdev) { struct hclge_vport *vport; + bool enable = true; int ret; int i; @@ -9925,8 +9986,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev) vport->cur_vlan_fltr_en = true; } + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && + !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) + enable = false; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, 0); + HCLGE_FILTER_FE_INGRESS, enable, 0); } static int hclge_init_vlan_type(struct hclge_dev *hdev) @@ -10839,6 +10904,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } +int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) +{ + struct hclge_comm_query_scc_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1); + resp = (struct hclge_comm_query_scc_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + *scc_version = le32_to_cpu(resp->scc_version); + + return 0; +} + static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -11622,18 +11705,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto out; - ret = hclge_devlink_init(hdev); - if (ret) - goto err_pci_uninit; - - devl_lock(hdev->devlink); - /* Firmware command queue initialize */ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) - goto err_devlink_uninit; + goto err_pci_uninit; /* Firmware command initialize */ + hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, true, hdev->reset_pending); if (ret) @@ -11759,7 +11837,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_update_port_info(hdev); if (ret) - goto err_mdiobus_unreg; + goto err_ptp_uninit; INIT_KFIFO(hdev->mac_tnl_log); @@ -11799,6 +11877,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_warn(&pdev->dev, "failed to wake on lan init, ret = %d\n", ret); + ret = hclge_devlink_init(hdev); + if (ret) + goto err_ptp_uninit; + hclge_state_init(hdev); hdev->last_reset_time = jiffies; @@ -11806,10 +11888,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) HCLGE_DRIVER_NAME); hclge_task_schedule(hdev, round_jiffies_relative(HZ)); - - devl_unlock(hdev->devlink); return 0; +err_ptp_uninit: + hclge_ptp_uninit(hdev); err_mdiobus_unreg: if (hdev->hw.mac.phydev) mdiobus_unregister(hdev->hw.mac.mdio_bus); @@ -11819,9 +11901,6 @@ err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); -err_devlink_uninit: - devl_unlock(hdev->devlink); - hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.hw.io_base); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index e821dd2f1528..b5178b0f88b3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -279,11 +279,14 @@ struct hclge_mac { u8 media_type; /* port media type, e.g. fibre/copper/backplane */ u8 mac_addr[ETH_ALEN]; u8 autoneg; + u8 req_autoneg; u8 duplex; + u8 req_duplex; u8 support_autoneg; u8 speed_type; /* 0: sfp speed, 1: active speed */ u8 lane_num; u32 speed; + u32 req_speed; u32 max_speed; u32 speed_ability; /* speed ability supported by current media */ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ @@ -891,7 +894,7 @@ struct hclge_dev { u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ - u32 numa_node_mask; + nodemask_t numa_node_mask; u16 rx_buf_len; u16 num_tx_desc; /* desc num of per tx queue */ u16 num_rx_desc; /* desc num of per rx queue */ @@ -1169,4 +1172,5 @@ int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); int hclge_mac_update_stats(struct hclge_dev *hdev); struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf); int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type); +int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index d4a0e0be7a72..59c863306657 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) hdev = param->vport->back; cmd_func = hclge_mbx_ops_list[param->req->msg.code]; - if (cmd_func) - ret = cmd_func(param); - else + if (!cmd_func) { dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %u\n", param->req->msg.code); + return; + } + ret = cmd_func(param); /* PF driver should not reply IMP */ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index f3cd5a376eca..7e47f0c21d88 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -10,6 +10,7 @@ #include <linux/tracepoint.h> +#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32)) #define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) #define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) @@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send, ) ); +DECLARE_EVENT_CLASS(hclge_pf_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num), + + TP_STRUCT__entry(__field(u16, opcode) + __field(u16, flag) + __field(u16, retval) + __field(u16, rsv) + __field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, HCLGE_DESC_DATA_LEN)), + + TP_fast_assign(int i; + __entry->opcode = le16_to_cpu(desc->opcode); + __entry->flag = le16_to_cpu(desc->flag); + __entry->retval = le16_to_cpu(desc->retval); + __entry->rsv = le16_to_cpu(desc->rsv); + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < HCLGE_DESC_DATA_LEN; i++) + __entry->data[i] = le32_to_cpu(desc->data[i]);), + + TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s", + __get_str(pciname), __entry->opcode, + __entry->index, __entry->num, + __entry->flag, __entry->retval, __entry->rsv, + __print_array(__entry->data, + HCLGE_DESC_DATA_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + +DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + +DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + __le32 *data, + int index, + int num), + TP_ARGS(hw, data, index, num), + + TP_STRUCT__entry(__field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, PF_DESC_LEN)), + + TP_fast_assign(int i; + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < PF_DESC_LEN; i++) + __entry->data[i] = le32_to_cpu(data[i]); + ), + + TP_printk("%s %d-%d data:%s", + __get_str(pciname), + __entry->index, __entry->num, + __print_array(__entry->data, + PF_DESC_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + __le32 *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + +DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + __le32 *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + #endif /* _HCLGE_TRACE_H_ */ /* This must be outside ifdef _HCLGE_TRACE_H */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0aa9beefd1c7..3735d2fed11f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -11,6 +11,7 @@ #include "hnae3.h" #include "hclgevf_devlink.h" #include "hclge_comm_rss.h" +#include "hclgevf_trace.h" #define HCLGEVF_NAME "hclgevf" @@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) return hclge_comm_cmd_send(&hw->hw, desc, num); } +static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + trace_hclge_vf_cmd_send(hw, desc, 0, num); + + if (is_special) + return; + + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_send(hw, &desc[i], i, num); +} + +static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + return; + + trace_hclge_vf_cmd_get(hw, desc, 0, num); + + if (is_special) + return; + + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_get(hw, &desc[i], i, num); +} + +static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = { + .trace_cmd_send = hclgevf_trace_cmd_send, + .trace_cmd_get = hclgevf_trace_cmd_get, +}; + void hclgevf_arq_init(struct hclgevf_dev *hdev) { struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; @@ -412,7 +449,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->ae_algo = &ae_algovf; nic->pdev = hdev->pdev; - nic->numa_node_mask = hdev->numa_node_mask; + bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, + MAX_NUMNODES); nic->flags |= HNAE3_SUPPORT_VF; nic->kinfo.io_base = hdev->hw.hw.io_base; @@ -2082,8 +2120,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; - roce->numa_node_mask = nic->numa_node_mask; - + bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, + MAX_NUMNODES); return 0; } @@ -2180,8 +2218,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) } else { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - /* flush memory to make sure DOWN is seen by service task */ - smp_mb__before_atomic(); + smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ hclgevf_flush_link_update(hdev); } } @@ -2796,6 +2833,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) } hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, false, hdev->reset_pending); @@ -2845,15 +2883,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_devlink_init(hdev); - if (ret) - goto err_devlink_init; - ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_cmd_queue_init; hclgevf_arq_init(hdev); + + hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops); ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, false, hdev->reset_pending); @@ -2941,6 +2977,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) hclgevf_init_rxd_adv_layout(hdev); + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_config; + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); hdev->last_reset_time = jiffies; @@ -2960,8 +3000,6 @@ err_misc_irq_init: err_cmd_init: hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_cmd_queue_init: - hclgevf_devlink_uninit(hdev); -err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a73f2bf3a56a..cccef3228461 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -236,7 +236,7 @@ struct hclgevf_dev { u16 rss_size_max; /* HW defined max RSS task queue */ u16 num_alloc_vport; /* num vports this driver supports */ - u32 numa_node_mask; + nodemask_t numa_node_mask; u16 rx_buf_len; u16 num_tx_desc; /* desc num of per tx queue */ u16 num_rx_desc; /* desc num of per rx queue */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index b259e95dd53c..e2e3a2602b6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send, ) ); +DECLARE_EVENT_CLASS(hclge_vf_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + + TP_ARGS(hw, desc, index, num), + + TP_STRUCT__entry(__field(u16, opcode) + __field(u16, flag) + __field(u16, retval) + __field(u16, rsv) + __field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, HCLGE_DESC_DATA_LEN)), + + TP_fast_assign(int i; + __entry->opcode = le16_to_cpu(desc->opcode); + __entry->flag = le16_to_cpu(desc->flag); + __entry->retval = le16_to_cpu(desc->retval); + __entry->rsv = le16_to_cpu(desc->rsv); + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < HCLGE_DESC_DATA_LEN; i++) + __entry->data[i] = le32_to_cpu(desc->data[i]);), + + TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s", + __get_str(pciname), __entry->opcode, + __entry->index, __entry->num, + __entry->flag, __entry->retval, __entry->rsv, + __print_array(__entry->data, + HCLGE_DESC_DATA_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + +DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + #endif /* _HCLGEVF_TRACE_H_ */ /* This must be outside ifdef _HCLGEVF_TRACE_H */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 499c657d37a9..890f213da8d1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -581,7 +581,7 @@ static int hinic_change_mtu(struct net_device *netdev, int new_mtu) if (err) netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); else - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return err; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index e6e47b1842ea..a19d098f2e2b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1098,7 +1098,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) /* This is to prevent starting RX channel in emac_rx_enable() */ set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); - dev->ndev->mtu = new_mtu; + WRITE_ONCE(dev->ndev->mtu, new_mtu); emac_full_tx_reset(dev); } @@ -1130,7 +1130,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu) } if (!ret) { - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); dev->rx_skb_size = emac_rx_skb_size(new_mtu); dev->rx_sync_size = emac_rx_sync_size(new_mtu); } diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 2439f7e96e05..d92dd9c83031 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -605,9 +605,13 @@ static int mal_probe(struct platform_device *ofdev) INIT_LIST_HEAD(&mal->list); spin_lock_init(&mal->lock); - init_dummy_netdev(&mal->dummy_dev); + mal->dummy_dev = alloc_netdev_dummy(0); + if (!mal->dummy_dev) { + err = -ENOMEM; + goto fail_unmap; + } - netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll, + netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll, CONFIG_IBM_EMAC_POLL_WEIGHT); /* Load power-on reset defaults */ @@ -637,7 +641,7 @@ static int mal_probe(struct platform_device *ofdev) GFP_KERNEL); if (mal->bd_virt == NULL) { err = -ENOMEM; - goto fail_unmap; + goto fail_dummy; } for (i = 0; i < mal->num_tx_chans; ++i) @@ -703,6 +707,8 @@ static int mal_probe(struct platform_device *ofdev) free_irq(mal->serr_irq, mal); fail2: dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); + fail_dummy: + free_netdev(mal->dummy_dev); fail_unmap: dcr_unmap(mal->dcr_host, 0x100); fail: @@ -734,6 +740,8 @@ static void mal_remove(struct platform_device *ofdev) mal_reset(mal); + free_netdev(mal->dummy_dev); + dma_free_coherent(&ofdev->dev, sizeof(struct mal_descriptor) * (NUM_TX_BUFF * mal->num_tx_chans + diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h index d212373a72e7..e0ddc41186a2 100644 --- a/drivers/net/ethernet/ibm/emac/mal.h +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -205,7 +205,7 @@ struct mal_instance { int index; spinlock_t lock; - struct net_device dummy_dev; + struct net_device *dummy_dev; unsigned int features; }; diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index b5aef0b29efe..4c9d9badd698 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1537,7 +1537,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 30c47b8470ad..5e9a93bdb518 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2371,7 +2371,7 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); else ind_bufp->index = 0; - return 0; + return rc; } static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) @@ -2424,7 +2424,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_dropped++; tx_send_failed++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } @@ -2439,8 +2441,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 639fbb12bd35..e0287fbd501d 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -16,6 +16,9 @@ config NET_VENDOR_INTEL if NET_VENDOR_INTEL +source "drivers/net/ethernet/intel/libeth/Kconfig" +source "drivers/net/ethernet/intel/libie/Kconfig" + config E100 tristate "Intel(R) PRO/100+ support" depends on PCI @@ -41,7 +44,7 @@ config E100 config E1000 tristate "Intel(R) PRO/1000 Gigabit Ethernet support" - depends on PCI + depends on PCI && HAS_IOPORT help This driver supports Intel(R) PRO/1000 gigabit ethernet family of adapters. For more information on how to identify your adapter, go @@ -225,6 +228,7 @@ config I40E depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS + select LIBIE select NET_DEVLINK help This driver supports Intel(R) Ethernet Controller XL710 Family of @@ -253,6 +257,8 @@ config I40E_DCB # so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF config IAVF tristate + select LIBIE + config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" select IAVF @@ -283,6 +289,7 @@ config ICE depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB + select LIBIE select NET_DEVLINK select PLDMFW select DPLL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index dacb481ee5b1..04c844ef4964 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -3,6 +3,9 @@ # Makefile for the Intel network device drivers. # +obj-$(CONFIG_LIBETH) += libeth/ +obj-$(CONFIG_LIBIE) += libie/ + obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000E) += e1000e/ diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 3fcb8daaa243..9b068d40778d 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -3037,7 +3037,7 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake) return 0; } -static int __maybe_unused e100_suspend(struct device *dev_d) +static int e100_suspend(struct device *dev_d) { bool wake; @@ -3046,7 +3046,7 @@ static int __maybe_unused e100_suspend(struct device *dev_d) return 0; } -static int __maybe_unused e100_resume(struct device *dev_d) +static int e100_resume(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); @@ -3163,7 +3163,7 @@ static const struct pci_error_handlers e100_err_handler = { .resume = e100_io_resume, }; -static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); static struct pci_driver e100_driver = { .name = DRV_NAME, @@ -3172,7 +3172,7 @@ static struct pci_driver e100_driver = { .remove = e100_remove, /* Power Management hooks */ - .driver.pm = &e100_pm_ops, + .driver.pm = pm_sleep_ptr(&e100_pm_ops), .shutdown = e100_shutdown, .err_handler = &e100_err_handler, diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 1d1e93686af2..60fff9a6c53e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -149,8 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -static int __maybe_unused e1000_suspend(struct device *dev); -static int __maybe_unused e1000_resume(struct device *dev); +static int e1000_suspend(struct device *dev); +static int e1000_resume(struct device *dev); static void e1000_shutdown(struct pci_dev *pdev); #ifdef CONFIG_NET_POLL_CONTROLLER @@ -175,16 +175,14 @@ static const struct pci_error_handlers e1000_err_handler = { .resume = e1000_io_resume, }; -static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); static struct pci_driver e1000_driver = { .name = e1000_driver_name, .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, - .driver = { - .pm = &e1000_pm_ops, - }, + .driver.pm = pm_sleep_ptr(&e1000_pm_ops), .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; @@ -3571,7 +3569,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) e1000_up(adapter); @@ -5135,7 +5133,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -static int __maybe_unused e1000_suspend(struct device *dev) +static int e1000_suspend(struct device *dev) { int retval; struct pci_dev *pdev = to_pci_dev(dev); @@ -5147,7 +5145,7 @@ static int __maybe_unused e1000_suspend(struct device *dev) return retval; } -static int __maybe_unused e1000_resume(struct device *dev) +static int e1000_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 23a58cada43a..5e2cfa73f889 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -679,8 +679,6 @@ /* PCI/PCI-X/PCI-EX Config space */ #define PCI_HEADER_TYPE_REGISTER 0x0E -#define PCI_HEADER_TYPE_MULTIFUNC 0x80 - #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ #define MAX_PHY_MULTI_PAGE_REG 0xF diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index dc553c51d79a..85da20778e0f 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -156,7 +156,7 @@ static int e1000_get_link_ksettings(struct net_device *netdev, speed = adapter->link_speed; cmd->base.duplex = adapter->link_duplex - 1; } - } else if (!pm_runtime_suspended(netdev->dev.parent)) { + } else { u32 status = er32(STATUS); if (status & E1000_STATUS_LU) { @@ -274,16 +274,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev, ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); - pm_runtime_get_sync(netdev->dev.parent); - /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) { e_err("Cannot change link characteristics when SoL/IDER is active.\n"); - ret_val = -EINVAL; - goto out; + return -EINVAL; } /* MDI setting is only allowed when autoneg enabled because @@ -291,16 +288,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev, * duplex is forced. */ if (cmd->base.eth_tp_mdix_ctrl) { - if (hw->phy.media_type != e1000_media_type_copper) { - ret_val = -EOPNOTSUPP; - goto out; - } + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && (cmd->base.autoneg != AUTONEG_ENABLE)) { e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); - ret_val = -EINVAL; - goto out; + return -EINVAL; } } @@ -347,7 +341,6 @@ static int e1000_set_link_ksettings(struct net_device *netdev, } out: - pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return ret_val; } @@ -383,8 +376,6 @@ static int e1000_set_pauseparam(struct net_device *netdev, while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); - pm_runtime_get_sync(netdev->dev.parent); - if (adapter->fc_autoneg == AUTONEG_ENABLE) { hw->fc.requested_mode = e1000_fc_default; if (netif_running(adapter->netdev)) { @@ -417,7 +408,6 @@ static int e1000_set_pauseparam(struct net_device *netdev, } out: - pm_runtime_put_sync(netdev->dev.parent); clear_bit(__E1000_RESETTING, &adapter->state); return retval; } @@ -448,8 +438,6 @@ static void e1000_get_regs(struct net_device *netdev, u32 *regs_buff = p; u16 phy_data; - pm_runtime_get_sync(netdev->dev.parent); - memset(p, 0, E1000_REGS_LEN * sizeof(u32)); regs->version = (1u << 24) | @@ -495,8 +483,6 @@ static void e1000_get_regs(struct net_device *netdev, e1e_rphy(hw, MII_STAT1000, &phy_data); regs_buff[24] = (u32)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ - - pm_runtime_put_sync(netdev->dev.parent); } static int e1000_get_eeprom_len(struct net_device *netdev) @@ -529,8 +515,6 @@ static int e1000_get_eeprom(struct net_device *netdev, if (!eeprom_buff) return -ENOMEM; - pm_runtime_get_sync(netdev->dev.parent); - if (hw->nvm.type == e1000_nvm_eeprom_spi) { ret_val = e1000_read_nvm(hw, first_word, last_word - first_word + 1, @@ -544,8 +528,6 @@ static int e1000_get_eeprom(struct net_device *netdev, } } - pm_runtime_put_sync(netdev->dev.parent); - if (ret_val) { /* a read error occurred, throw away the result */ memset(eeprom_buff, 0xff, sizeof(u16) * @@ -595,8 +577,6 @@ static int e1000_set_eeprom(struct net_device *netdev, ptr = (void *)eeprom_buff; - pm_runtime_get_sync(netdev->dev.parent); - if (eeprom->offset & 1) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ @@ -637,7 +617,6 @@ static int e1000_set_eeprom(struct net_device *netdev, ret_val = e1000e_update_nvm_checksum(hw); out: - pm_runtime_put_sync(netdev->dev.parent); kfree(eeprom_buff); return ret_val; } @@ -733,8 +712,6 @@ static int e1000_set_ringparam(struct net_device *netdev, } } - pm_runtime_get_sync(netdev->dev.parent); - e1000e_down(adapter, true); /* We can't just free everything and then setup again, because the @@ -773,7 +750,6 @@ err_setup_rx: e1000e_free_tx_resources(temp_tx); err_setup: e1000e_up(adapter); - pm_runtime_put_sync(netdev->dev.parent); free_temp: vfree(temp_tx); vfree(temp_rx); @@ -1816,8 +1792,6 @@ static void e1000_diag_test(struct net_device *netdev, u8 autoneg; bool if_running = netif_running(netdev); - pm_runtime_get_sync(netdev->dev.parent); - set_bit(__E1000_TESTING, &adapter->state); if (!if_running) { @@ -1903,8 +1877,6 @@ static void e1000_diag_test(struct net_device *netdev, } msleep_interruptible(4 * 1000); - - pm_runtime_put_sync(netdev->dev.parent); } static void e1000_get_wol(struct net_device *netdev, @@ -2046,15 +2018,11 @@ static int e1000_set_coalesce(struct net_device *netdev, adapter->itr_setting = adapter->itr & ~3; } - pm_runtime_get_sync(netdev->dev.parent); - if (adapter->itr_setting != 0) e1000e_write_itr(adapter, adapter->itr); else e1000e_write_itr(adapter, 0); - pm_runtime_put_sync(netdev->dev.parent); - return 0; } @@ -2068,9 +2036,7 @@ static int e1000_nway_reset(struct net_device *netdev) if (!adapter->hw.mac.autoneg) return -EINVAL; - pm_runtime_get_sync(netdev->dev.parent); e1000e_reinit_locked(adapter); - pm_runtime_put_sync(netdev->dev.parent); return 0; } @@ -2084,12 +2050,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, int i; char *p = NULL; - pm_runtime_get_sync(netdev->dev.parent); - dev_get_stats(netdev, &net_stats); - pm_runtime_put_sync(netdev->dev.parent); - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: @@ -2146,9 +2108,7 @@ static int e1000_get_rxnfc(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; u32 mrqc; - pm_runtime_get_sync(netdev->dev.parent); mrqc = er32(MRQC); - pm_runtime_put_sync(netdev->dev.parent); if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) return 0; @@ -2211,13 +2171,9 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) return -EOPNOTSUPP; } - pm_runtime_get_sync(netdev->dev.parent); - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - pm_runtime_put_sync(netdev->dev.parent); + if (ret_val) return -EBUSY; - } /* EEE Capability */ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); @@ -2257,8 +2213,6 @@ release: if (ret_val) ret_val = -ENODATA; - pm_runtime_put_sync(netdev->dev.parent); - return ret_val; } @@ -2299,16 +2253,12 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; - pm_runtime_get_sync(netdev->dev.parent); - /* reset the link */ if (netif_running(netdev)) e1000e_reinit_locked(adapter); else e1000e_reset(adapter); - pm_runtime_put_sync(netdev->dev.parent); - return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3692fce20195..220d62fca55d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6038,7 +6038,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->max_frame_size = max_frame; netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); pm_runtime_get_sync(netdev->dev.parent); @@ -6968,13 +6968,13 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } -static __maybe_unused int e1000e_pm_prepare(struct device *dev) +static int e1000e_pm_prepare(struct device *dev) { return pm_runtime_suspended(dev) && pm_suspend_via_firmware(); } -static __maybe_unused int e1000e_pm_suspend(struct device *dev) +static int e1000e_pm_suspend(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -6997,7 +6997,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) return rc; } -static __maybe_unused int e1000e_pm_resume(struct device *dev) +static int e1000e_pm_resume(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -7031,7 +7031,7 @@ static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev) return -EBUSY; } -static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev) +static int e1000e_pm_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7050,7 +7050,7 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev) return rc; } -static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev) +static int e1000e_pm_runtime_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7937,8 +7937,7 @@ static const struct pci_device_id e1000_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); -static const struct dev_pm_ops e1000_pm_ops = { -#ifdef CONFIG_PM_SLEEP +static const struct dev_pm_ops e1000e_pm_ops = { .prepare = e1000e_pm_prepare, .suspend = e1000e_pm_suspend, .resume = e1000e_pm_resume, @@ -7946,9 +7945,8 @@ static const struct dev_pm_ops e1000_pm_ops = { .thaw = e1000e_pm_thaw, .poweroff = e1000e_pm_suspend, .restore = e1000e_pm_resume, -#endif - SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, - e1000e_pm_runtime_idle) + RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, + e1000e_pm_runtime_idle) }; /* PCI Device API Driver */ @@ -7957,9 +7955,7 @@ static struct pci_driver e1000_driver = { .id_table = e1000_pci_tbl, .probe = e1000_probe, .remove = e1000_remove, - .driver = { - .pm = &e1000_pm_ops, - }, + .driver.pm = pm_ptr(&e1000e_pm_ops), .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler }; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 93544f1cc2a5..f7ae0e0aa4a4 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usleep_range(50, 60); + udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usleep_range(100, 150); + udelay(100); if (success) { *data = (u16)mdic; @@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usleep_range(50, 60); + udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usleep_range(100, 150); + udelay(100); if (success) return 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index d748b98274e7..92de609b7218 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2342,7 +2342,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) * suspend or hibernation. This function does not need to handle lower PCIe * device state as the stack takes care of that for us. **/ -static int __maybe_unused fm10k_resume(struct device *dev) +static int fm10k_resume(struct device *dev) { struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; @@ -2369,7 +2369,7 @@ static int __maybe_unused fm10k_resume(struct device *dev) * system suspend or hibernation. This function does not need to handle lower * PCIe device state as the stack takes care of that for us. **/ -static int __maybe_unused fm10k_suspend(struct device *dev) +static int fm10k_suspend(struct device *dev) { struct fm10k_intfc *interface = dev_get_drvdata(dev); struct net_device *netdev = interface->netdev; @@ -2502,16 +2502,14 @@ static const struct pci_error_handlers fm10k_err_handler = { .reset_done = fm10k_io_reset_done, }; -static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume); static struct pci_driver fm10k_driver = { .name = fm10k_driver_name, .id_table = fm10k_pci_tbl, .probe = fm10k_probe, .remove = fm10k_remove, - .driver = { - .pm = &fm10k_pm_ops, - }, + .driver.pm = pm_sleep_ptr(&fm10k_pm_ops), .sriov_configure = fm10k_iov_configure, .err_handler = &fm10k_err_handler }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2fbabcdb5bb5..bca2084cc54b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -788,7 +788,6 @@ struct i40e_veb { u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ - u16 flags; u16 bw_limit; u8 bw_max_quanta; bool is_abs_credits; @@ -1213,7 +1212,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); -struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc); void i40e_veb_release(struct i40e_veb *veb); @@ -1237,8 +1236,8 @@ static inline void i40e_dbg_exit(void) {} int i40e_lan_add_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf); -void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); -void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); +void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf); +void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); void i40e_client_update_msix_info(struct i40e_pf *pf); @@ -1374,6 +1373,17 @@ i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid) } /** + * i40e_pf_get_main_vsi - get pointer to main VSI + * @pf: pointer to a PF + * + * Return: pointer to main VSI or NULL if it does not exist + **/ +static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf) +{ + return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL; +} + +/** * i40e_pf_get_veb_by_seid - find VEB by SEID * @pf: pointer to a PF * @seid: SEID of the VSI @@ -1391,4 +1401,15 @@ i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid) return NULL; } +/** + * i40e_pf_get_main_veb - get pointer to main VEB + * @pf: pointer to a PF + * + * Return: pointer to main VEB or NULL if it does not exist + **/ +static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf) +{ + return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL; +} + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index b32071ee84af..59263551c383 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) /** * i40e_notify_client_of_l2_param_changes - call the client notify callback - * @vsi: the VSI with l2 param changes + * @pf: PF device pointer * - * If there is a client to this VSI, call the client + * If there is a client, call its callback **/ -void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) +void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf) { - struct i40e_pf *pf = vsi->back; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev = pf->cinst; struct i40e_params params; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, + dev_dbg(&pf->pdev->dev, "Cannot locate client instance l2_param_change routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { - dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); + dev_dbg(&pf->pdev->dev, + "Client is not open, abort l2 param change\n"); return; } memset(¶ms, 0, sizeof(params)); @@ -157,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev) /** * i40e_notify_client_of_netdev_close - call the client close callback - * @vsi: the VSI with netdev closed + * @pf: PF device pointer * @reset: true when close called due to a reset pending * * If there is a client to this netdev, call the client with close **/ -void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) +void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset) { - struct i40e_pf *pf = vsi->back; struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, + dev_dbg(&pf->pdev->dev, "Cannot locate client instance close routine\n"); return; } @@ -333,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name) **/ static void i40e_client_add_instance(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev = NULL; struct netdev_hw_addr *mac = NULL; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -399,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf) **/ void i40e_client_subtask(struct i40e_pf *pf) { - struct i40e_client *client; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_client *client; int ret = 0; if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state)) @@ -665,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf); struct i40e_pf *pf = ldev->pf; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; bool update = true; int err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index de6ca6295742..e8031f1a9b4f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -381,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw, return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } -/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40e_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 2f53f0f53bc3..daa9f2c42f70 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, **/ static int i40e_ddp_restore(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct net_device *netdev = vsi->netdev; struct i40e_ddp_old_profile_list *entry; - struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; int status = 0; if (!list_empty(&pf->ddp_old_prof)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f9ba45f596c9..abf624d770e6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -53,6 +53,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + struct i40e_vsi *main_vsi; int bytes_not_copied; int buf_size = 256; char *buf; @@ -68,8 +69,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, if (!buf) return -ENOSPC; - len = snprintf(buf, buf_size, "%s: %s\n", - pf->vsi[pf->lan_vsi]->netdev->name, + main_vsi = i40e_pf_get_main_vsi(pf); + len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, i40e_dbg_command_buf); bytes_not_copied = copy_to_user(buffer, buf, len); @@ -128,7 +129,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " state[%d] = %08lx\n", i, vsi->state[i]); - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.port_addr); @@ -786,7 +787,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt == 0) { /* default to PF VSI */ - vsi_seid = pf->vsi[pf->lan_vsi]->seid; + vsi = i40e_pf_get_main_vsi(pf); + vsi_seid = vsi->seid; } else if (vsi_seid < 0) { dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", vsi_seid); @@ -867,7 +869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc); + veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc); if (veb) dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); else @@ -1030,7 +1032,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - vsi = pf->vsi[pf->lan_vsi]; + vsi = i40e_pf_get_main_vsi(pf); switch_id = le16_to_cpu(vsi->info.switch_id) & I40E_AQ_VSI_SW_ID_MASK; @@ -1380,6 +1382,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { + /* Get main VSI */ + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; @@ -1391,10 +1396,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, - pf->hw.mac.addr, - ETH_P_LLDP, 0, - pf->vsi[pf->lan_vsi]->seid, - 0, true, NULL, NULL); + pf->hw.mac.addr, ETH_P_LLDP, 0, + main_vsi->seid, 0, true, NULL, + NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Add Control Packet Filter AQ command failed =0x%x\n", @@ -1409,10 +1413,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, int ret; ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, - pf->hw.mac.addr, - ETH_P_LLDP, 0, - pf->vsi[pf->lan_vsi]->seid, - 0, false, NULL, NULL); + pf->hw.mac.addr, ETH_P_LLDP, 0, + main_vsi->seid, 0, false, NULL, + NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Remove Control Packet Filter AQ command failed =0x%x\n", @@ -1639,6 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + struct i40e_vsi *main_vsi; int bytes_not_copied; int buf_size = 256; char *buf; @@ -1654,8 +1658,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, if (!buf) return -ENOSPC; - len = snprintf(buf, buf_size, "%s: %s\n", - pf->vsi[pf->lan_vsi]->netdev->name, + main_vsi = i40e_pf_get_main_vsi(pf); + len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, i40e_dbg_netdev_ops_buf); bytes_not_copied = copy_to_user(buffer, buf, len); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 42e7e6cdaa6d..4e28785c9fb2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1241,7 +1241,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && @@ -1710,7 +1710,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; @@ -2029,7 +2029,7 @@ static void i40e_get_ringparam(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); ring->rx_max_pending = i40e_get_max_num_descriptors(pf); ring->tx_max_pending = i40e_get_max_num_descriptors(pf); @@ -2292,7 +2292,7 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_pf *pf = vsi->back; int stats_len; - if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) + if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1) stats_len = I40E_PF_STATS_LEN; else stats_len = I40E_VSI_STATS_LEN; @@ -2422,17 +2422,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_unlock(); - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) goto check_data_pointer; - veb_stats = ((pf->lan_veb != I40E_NO_VEB) && - (pf->lan_veb < I40E_MAX_VEB) && - test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)); + veb = i40e_pf_get_main_veb(pf); + veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); - if (veb_stats) { - veb = pf->veb[pf->lan_veb]; + if (veb_stats) i40e_update_veb_stats(veb); - } /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer @@ -2495,7 +2492,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) "rx", i); } - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2792,7 +2789,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* NVM bit on means WoL disabled for the port */ @@ -3370,6 +3367,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, struct i40e_rx_flow_userdef userdef = {0}; struct i40e_fdir_filter *rule = NULL; struct hlist_node *node2; + struct i40e_vsi *vsi; u64 input_set; u16 index; @@ -3493,9 +3491,8 @@ no_input_set: fsp->flow_type |= FLOW_EXT; } - if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { - struct i40e_vsi *vsi; - + vsi = i40e_pf_get_main_vsi(pf); + if (rule->dest_vsi != vsi->id) { vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); if (vsi && vsi->type == I40E_VSI_SRIOV) { /* VFs are zero-indexed by the driver, but ethtool diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ffb9f9f15c52..1f188c052828 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; @@ -989,7 +990,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi->type == I40E_VSI_MAIN) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; @@ -1234,7 +1235,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); @@ -2475,12 +2476,12 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; int aq_ret; if (vsi->type == I40E_VSI_MAIN && - pf->lan_veb != I40E_NO_VEB && + i40e_pf_get_main_veb(pf) && !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN @@ -2960,7 +2961,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); @@ -4322,7 +4323,7 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* We do not have a way to disarm Queue causes while leaving @@ -5472,7 +5473,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) **/ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; u8 enabled_tc = 1, i; @@ -5489,13 +5490,14 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { - struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc = 1; u8 num_tc = 0; - struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - if (i40e_is_tc_mqprio_enabled(pf)) - return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; + if (i40e_is_tc_mqprio_enabled(pf)) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + + return vsi->mqprio_qopt.qopt.num_tc; + } /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) @@ -5503,7 +5505,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* SFP mode will be enabled for all TCs on port */ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) - return i40e_dcb_get_num_tc(dcbcfg); + return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) @@ -5916,6 +5918,28 @@ out: } /** + * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map + * @vsi: VSI to be reconfigured + * + * This reconfigures a particular VSI for TCs that are mapped to the + * TC bitmap stored previously for the VSI. + * + * Context: It is expected that the VSI queues have been quisced before + * calling this function. + * + * Return: 0 on success, negative value on failure + **/ +static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi) +{ + u8 enabled_tc; + + enabled_tc = vsi->tc_config.enabled_tc; + vsi->tc_config.enabled_tc = 0; + + return i40e_vsi_config_tc(vsi, enabled_tc); +} + +/** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * @@ -6477,6 +6501,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { + struct i40e_vsi *main_vsi; u8 vsi_type; u16 seid; int ret; @@ -6490,7 +6515,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, } /* underlying switching element */ - seid = pf->vsi[pf->lan_vsi]->uplink_seid; + main_vsi = i40e_pf_get_main_vsi(pf); + seid = main_vsi->uplink_seid; /* create channel (VSI), configure TX rings */ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); @@ -6808,7 +6834,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ - if (v == pf->lan_vsi) + if (vsi->type == I40E_VSI_MAIN) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; @@ -7047,7 +7073,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) /* Configure Rx Packet Buffers in HW */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + mfs_tc[i] = main_vsi->netdev->mtu; mfs_tc[i] += I40E_PACKET_HDR_PAD; } @@ -8643,6 +8671,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -9113,7 +9145,7 @@ err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; @@ -9804,7 +9836,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ - i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf)); if (!disable_atr && !pf->fd_tcp4_filter_cnt) clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); @@ -9902,7 +9934,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) **/ static void i40e_link_event(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct i40e_veb *veb = i40e_pf_get_main_veb(pf); u8 new_link_speed, old_link_speed; bool new_link, old_link; int status; @@ -9942,8 +9975,8 @@ static void i40e_link_event(struct i40e_pf *pf) /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); + if (veb) + i40e_veb_link_event(veb, new_link); else i40e_vsi_link_event(vsi, new_link); @@ -10273,7 +10306,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -10309,7 +10342,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -10385,7 +10418,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (veb->uplink_seid == pf->mac_seid) { /* Check that the LAN VSI has VEB owning flag set */ - ctl_vsi = pf->vsi[pf->lan_vsi]; + ctl_vsi = i40e_pf_get_main_vsi(pf); if (WARN_ON(ctl_vsi->veb_idx != veb->idx || !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) { @@ -10528,7 +10561,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi); **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { - struct i40e_vsi *vsi; + struct i40e_vsi *main_vsi, *vsi; /* quick workaround for an NVM issue that leaves a critical register * uninitialized @@ -10553,8 +10586,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) /* create a new VSI if none exists */ if (!vsi) { - vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, - pf->vsi[pf->lan_vsi]->seid, 0); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); @@ -10833,7 +10866,7 @@ static int i40e_reset(struct i40e_pf *pf) static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; struct i40e_veb *veb; int ret; @@ -10842,7 +10875,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && is_recovery_mode_reported) - i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); + i40e_set_ethtool_ops(vsi->netdev); if (test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RECOVERY_MODE, pf->state)) @@ -11266,7 +11299,7 @@ static void i40e_service_task(struct work_struct *work) return; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { - i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); + i40e_detect_recover_hung(pf); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); @@ -11275,14 +11308,12 @@ static void i40e_service_task(struct work_struct *work) i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], - true); + i40e_notify_client_of_netdev_close(pf, true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) - i40e_notify_client_of_l2_param_changes( - pf->vsi[pf->lan_vsi]); + i40e_notify_client_of_l2_param_changes(pf); } i40e_sync_filters_subtask(pf); } else { @@ -11990,7 +12021,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) /* if not MSIX, give the one vector only to the LAN VSI */ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_q_vectors = vsi->num_q_vectors; - else if (vsi == pf->vsi[pf->lan_vsi]) + else if (vsi->type == I40E_VSI_MAIN) num_q_vectors = 1; else return -EINVAL; @@ -12396,7 +12427,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; @@ -12468,7 +12499,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); int new_rss_size; if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) @@ -13107,7 +13138,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, int rem; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ @@ -13117,20 +13148,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; - - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; @@ -13179,7 +13206,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct i40e_veb *veb; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ @@ -13761,9 +13788,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * the end, which is 4 bytes long, so force truncation of the * original name by IFNAMSIZ - 4 */ - snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", - IFNAMSIZ - 4, - pf->vsi[pf->lan_vsi]->netdev->name); + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, + main_vsi->netdev->name); eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -14130,8 +14158,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) vsi->seid, vsi->uplink_seid); return -ENODEV; } - if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, pf->state)) { + if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } @@ -14275,9 +14302,9 @@ vector_setup_out: **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { + struct i40e_vsi *main_vsi; u16 alloc_queue_pairs; struct i40e_pf *pf; - u8 enabled_tc; int ret; if (!vsi) @@ -14309,10 +14336,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ - enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi = i40e_pf_get_main_vsi(pf); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); + if (vsi->type == I40E_VSI_MAIN) i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); @@ -14386,13 +14413,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } if (vsi->uplink_seid == pf->mac_seid) - veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, + veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + if (vsi->type != I40E_VSI_MAIN) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; @@ -14783,7 +14810,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) /** * i40e_veb_setup - Set up a VEB * @pf: board private structure - * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map @@ -14796,9 +14822,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ -struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, - u16 uplink_seid, u16 vsi_seid, - u8 enabled_tc) +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid, + u16 vsi_seid, u8 enabled_tc) { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb; @@ -14829,7 +14854,6 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; - veb->flags = flags; veb->uplink_seid = uplink_seid; veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -14881,7 +14905,8 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; - if (pf->lan_veb >= I40E_MAX_VEB) { + veb = i40e_pf_get_main_veb(pf); + if (!veb) { int v; /* find existing or else empty VEB */ @@ -14895,12 +14920,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, pf->lan_veb = v; } } - if (pf->lan_veb >= I40E_MAX_VEB) + + /* Try to get again main VEB as pf->lan_veb may have changed */ + veb = i40e_pf_get_main_veb(pf); + if (!veb) break; - pf->veb[pf->lan_veb]->seid = seid; - pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; - pf->veb[pf->lan_veb]->pf = pf; + veb->seid = seid; + veb->uplink_seid = pf->mac_seid; + veb->pf = pf; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) @@ -14998,6 +15026,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) { + struct i40e_vsi *main_vsi; u16 flags = 0; int ret; @@ -15042,22 +15071,25 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui } /* first time setup */ - if (pf->lan_vsi == I40E_NO_VSI || reinit) { - struct i40e_vsi *vsi = NULL; + main_vsi = i40e_pf_get_main_vsi(pf); + if (!main_vsi || reinit) { + struct i40e_veb *veb; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - uplink_seid = pf->veb[pf->lan_veb]->seid; + veb = i40e_pf_get_main_veb(pf); + if (veb) + uplink_seid = veb->seid; else uplink_seid = pf->mac_seid; - if (pf->lan_vsi == I40E_NO_VSI) - vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + if (!main_vsi) + main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, + uplink_seid, 0); else if (reinit) - vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); - if (!vsi) { + main_vsi = i40e_vsi_reinit_setup(main_vsi); + if (!main_vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); @@ -15065,13 +15097,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui } } else { /* force a reset of TC and queue layout configurations */ - u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); } - i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + i40e_vlan_stripping_disable(main_vsi); i40e_fdir_sb_setup(pf); @@ -15098,7 +15127,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui rtnl_lock(); /* repopulate tunnel port filters */ - udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); + udp_tunnel_nic_reset_ntf(main_vsi->netdev); if (!lock_acquired) rtnl_unlock(); @@ -15242,6 +15271,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; char *buf; int i; @@ -15255,8 +15285,7 @@ static void i40e_print_features(struct i40e_pf *pf) i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs); if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " RSS"); if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) @@ -15920,7 +15949,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } - INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); + + vsi = i40e_pf_get_main_vsi(pf); + INIT_LIST_HEAD(&vsi->ch_list); /* if FDIR VSI was set up, start it now */ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); @@ -16223,7 +16254,7 @@ static void i40e_remove(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); i40e_fdir_teardown(pf); @@ -16422,15 +16453,15 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) **/ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; u8 mac_addr[6]; u16 flags = 0; int ret; /* Get current MAC address in case it's an LAA */ - if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { - ether_addr_copy(mac_addr, - pf->vsi[pf->lan_vsi]->netdev->dev_addr); + if (main_vsi && main_vsi->netdev) { + ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr); } else { dev_err(&pf->pdev->dev, "Failed to retrieve MAC address; using default\n"); @@ -16482,7 +16513,7 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && pf->wol_en) @@ -16518,7 +16549,7 @@ static void i40e_shutdown(struct pci_dev *pdev) * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_suspend(struct device *dev) +static int i40e_suspend(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); struct i40e_hw *hw = &pf->hw; @@ -16536,7 +16567,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && pf->wol_en) @@ -16569,7 +16600,7 @@ static int __maybe_unused i40e_suspend(struct device *dev) * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_resume(struct device *dev) +static int i40e_resume(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); int err; @@ -16615,16 +16646,14 @@ static const struct pci_error_handlers i40e_err_handler = { .resume = i40e_pci_error_resume, }; -static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, - .driver = { - .pm = &i40e_pm_ops, - }, + .driver.pm = pm_sleep_ptr(&i40e_pm_ops), .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 605fd82f5d20..7f0936f4e05e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -734,37 +734,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw, return ret_code; } -static int i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); -static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); -static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); -static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static inline u8 i40e_nvmupd_get_module(u32 val) +static u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); } @@ -799,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = { }; /** - * i40e_nvmupd_command - Process an NVM update command + * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command - * @bytes: pointer to the data buffer + * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * - * Dispatches command depending on what update state is current + * Return one of the valid command types or I40E_NVMUPD_INVALID **/ -int i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static enum i40e_nvmupd_cmd +i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, + int *perrno) { enum i40e_nvmupd_cmd upd_cmd; - int status; - - /* assume success */ - *perrno = 0; + u8 module, transaction; - /* early check for status command and debug msgs */ - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", - i40e_nvm_update_state_str[upd_cmd], - hw->nvmupd_state, - hw->nvm_release_on_done, hw->nvm_wait_opcode, - cmd->command, cmd->config, cmd->offset, cmd->data_size); + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); - if (upd_cmd == I40E_NVMUPD_INVALID) { - *perrno = -EFAULT; + /* limits on data size */ + if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) { i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command returns %d errno %d\n", - upd_cmd, *perrno); + "%s data_size %d\n", __func__, cmd->data_size); + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; } - /* a status request returns immediately rather than - * going into the state machine - */ - if (upd_cmd == I40E_NVMUPD_STATUS) { - if (!cmd->data_size) { - *perrno = -EFAULT; - return -EINVAL; + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + case I40E_NVM_EXEC: + if (module == 0xf) + upd_cmd = I40E_NVMUPD_STATUS; + else if (module == 0) + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + case I40E_NVM_AQE: + upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; + break; } + break; - bytes[0] = hw->nvmupd_state; - - if (cmd->data_size >= 4) { - bytes[1] = 0; - *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM | I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM | I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; } + break; + } - /* Clear error status on read */ - if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + return upd_cmd; +} - return 0; +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + int status = 0; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } - /* Clear status even it is not read and log */ - if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + u8 preservation_flags; + int status = 0; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, + preservation_flags, &cmd_details); + if (status) { i40e_debug(hw, I40E_DEBUG_NVM, - "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } - /* Acquire lock to prevent race condition where adminq_task - * can execute after i40e_nvmupd_nvm_read/write but before state - * variables (nvm_wait_opcode, nvm_release_on_done) are updated. - * - * During NVMUpdate, it is observed that lock could be held for - * ~5ms for most commands. However lock is held for ~60ms for - * NVMUPD_CSUM_LCB command. - */ - mutex_lock(&hw->aq.arq_mutex); - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT: - status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); - break; + return status; +} - case I40E_NVMUPD_STATE_READING: - status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); - break; +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + int status; + bool last; - case I40E_NVMUPD_STATE_WRITING: - status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); - break; + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - case I40E_NVMUPD_STATE_INIT_WAIT: - case I40E_NVMUPD_STATE_WRITE_WAIT: - /* if we need to stop waiting for an event, clear - * the wait info and return before doing anything else - */ - if (cmd->offset == 0xffff) { - i40e_nvmupd_clear_wait_state(hw); - status = 0; - break; + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + int status; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + if (cmd->offset == 0xffff) + return 0; + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return -EINVAL; + } + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); } - status = -EBUSY; - *perrno = -EBUSY; - break; + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + memcpy(buff, &bytes[aq_desc_len], aq_data_len); + } + } - default: - /* invalid state, should never happen */ + if (cmd->offset) + memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: no such state %d\n", hw->nvmupd_state); - status = -EOPNOTSUPP; - *perrno = -ESRCH; - break; + "%s err %pe aq_err %s\n", + __func__, ERR_PTR(status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + return status; + } + + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } - mutex_unlock(&hw->aq.arq_mutex); return status; } /** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return -EINVAL; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + memcpy(bytes, buff, len); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + memcpy(bytes, buff, remainder); + } + + return 0; +} + +/** + * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + + /* check copylength range */ + if (cmd->data_size > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, aq_total_len); + cmd->data_size = aq_total_len; + } + + memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); + + return 0; +} + +/** * i40e_nvmupd_state_init - Handle NVM update state Init * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -937,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); @@ -948,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) @@ -962,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); if (status) { @@ -979,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { @@ -996,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { @@ -1012,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { @@ -1185,7 +1442,7 @@ retry: * so here we try to reacquire the semaphore then retry the write. * We only do one retry, then give up. */ - if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY && !retry_attempt) { u32 old_asq_status = hw->aq.asq_last_status; int old_status = status; @@ -1215,457 +1472,168 @@ retry: } /** - * i40e_nvmupd_clear_wait_state - clear wait state on hw - * @hw: pointer to the hardware structure - **/ -void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) -{ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: clearing wait on opcode 0x%04x\n", - hw->nvm_wait_opcode); - - if (hw->nvm_release_on_done) { - i40e_release_nvm(hw); - hw->nvm_release_on_done = false; - } - hw->nvm_wait_opcode = 0; - - if (hw->aq.arq_last_status) { - hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; - return; - } - - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; - - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; - - default: - break; - } -} - -/** - * i40e_nvmupd_check_wait_event - handle NVM update operation events - * @hw: pointer to the hardware structure - * @opcode: the event that just happened - * @desc: AdminQ descriptor - **/ -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, - struct i40e_aq_desc *desc) -{ - u32 aq_desc_len = sizeof(struct i40e_aq_desc); - - if (opcode == hw->nvm_wait_opcode) { - memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); - i40e_nvmupd_clear_wait_state(hw); - } -} - -/** - * i40e_nvmupd_validate_command - Validate given command - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @perrno: pointer to return error code - * - * Return one of the valid command types or I40E_NVMUPD_INVALID - **/ -static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) -{ - enum i40e_nvmupd_cmd upd_cmd; - u8 module, transaction; - - /* anything that doesn't match a recognized case is an error */ - upd_cmd = I40E_NVMUPD_INVALID; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - - /* limits on data size */ - if ((cmd->data_size < 1) || - (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command data_size %d\n", - cmd->data_size); - *perrno = -EFAULT; - return I40E_NVMUPD_INVALID; - } - - switch (cmd->command) { - case I40E_NVM_READ: - switch (transaction) { - case I40E_NVM_CON: - upd_cmd = I40E_NVMUPD_READ_CON; - break; - case I40E_NVM_SNT: - upd_cmd = I40E_NVMUPD_READ_SNT; - break; - case I40E_NVM_LCB: - upd_cmd = I40E_NVMUPD_READ_LCB; - break; - case I40E_NVM_SA: - upd_cmd = I40E_NVMUPD_READ_SA; - break; - case I40E_NVM_EXEC: - if (module == 0xf) - upd_cmd = I40E_NVMUPD_STATUS; - else if (module == 0) - upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; - break; - case I40E_NVM_AQE: - upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; - break; - } - break; - - case I40E_NVM_WRITE: - switch (transaction) { - case I40E_NVM_CON: - upd_cmd = I40E_NVMUPD_WRITE_CON; - break; - case I40E_NVM_SNT: - upd_cmd = I40E_NVMUPD_WRITE_SNT; - break; - case I40E_NVM_LCB: - upd_cmd = I40E_NVMUPD_WRITE_LCB; - break; - case I40E_NVM_SA: - upd_cmd = I40E_NVMUPD_WRITE_SA; - break; - case I40E_NVM_ERA: - upd_cmd = I40E_NVMUPD_WRITE_ERA; - break; - case I40E_NVM_CSUM: - upd_cmd = I40E_NVMUPD_CSUM_CON; - break; - case (I40E_NVM_CSUM|I40E_NVM_SA): - upd_cmd = I40E_NVMUPD_CSUM_SA; - break; - case (I40E_NVM_CSUM|I40E_NVM_LCB): - upd_cmd = I40E_NVMUPD_CSUM_LCB; - break; - case I40E_NVM_EXEC: - if (module == 0) - upd_cmd = I40E_NVMUPD_EXEC_AQ; - break; - } - break; - } - - return upd_cmd; -} - -/** - * i40e_nvmupd_exec_aq - Run an AQ command + * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer + * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer * @perrno: pointer to return error code * - * cmd structure contains identifiers and data buffer + * Dispatches command depending on what update state is current **/ -static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +int i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - struct i40e_asq_cmd_details cmd_details; - struct i40e_aq_desc *aq_desc; - u32 buff_size = 0; - u8 *buff = NULL; - u32 aq_desc_len; - u32 aq_data_len; + enum i40e_nvmupd_cmd upd_cmd; int status; - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); - if (cmd->offset == 0xffff) - return 0; + /* assume success */ + *perrno = 0; - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - aq_desc_len = sizeof(struct i40e_aq_desc); - memset(&hw->nvm_wb_desc, 0, aq_desc_len); + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->nvm_release_on_done, hw->nvm_wait_opcode, + cmd->command, cmd->config, cmd->offset, cmd->data_size); - /* get the aq descriptor */ - if (cmd->data_size < aq_desc_len) { + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", - cmd->data_size, aq_desc_len); - *perrno = -EINVAL; - return -EINVAL; + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); } - aq_desc = (struct i40e_aq_desc *)bytes; - /* if data buffer needed, make sure it's ready */ - aq_data_len = cmd->data_size - aq_desc_len; - buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); - if (buff_size) { - if (!hw->nvm_buff.va) { - status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, - hw->aq.asq_buf_size); - if (status) - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", - status); - } - - if (hw->nvm_buff.va) { - buff = hw->nvm_buff.va; - memcpy(buff, &bytes[aq_desc_len], aq_data_len); + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return -EINVAL; } - } - - if (cmd->offset) - memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); - - /* and away we go! */ - status = i40e_asq_send_command(hw, aq_desc, buff, - buff_size, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "%s err %pe aq_err %s\n", - __func__, ERR_PTR(status), - i40e_aq_str(hw, hw->aq.asq_last_status)); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); - return status; - } - - /* should we wait for a followup event? */ - if (cmd->offset) { - hw->nvm_wait_opcode = cmd->offset; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; - } - - return status; -} -/** - * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer - **/ -static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) -{ - u32 aq_total_len; - u32 aq_desc_len; - int remainder; - u8 *buff; - - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); - - aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + bytes[0] = hw->nvmupd_state; - /* check offset range */ - if (cmd->offset > aq_total_len) { - i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", - __func__, cmd->offset, aq_total_len); - *perrno = -EINVAL; - return -EINVAL; - } + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } - /* check copylength range */ - if (cmd->data_size > (aq_total_len - cmd->offset)) { - int new_len = aq_total_len - cmd->offset; + /* Clear error status on read */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", - __func__, cmd->data_size, new_len); - cmd->data_size = new_len; + return 0; } - remainder = cmd->data_size; - if (cmd->offset < aq_desc_len) { - u32 len = aq_desc_len - cmd->offset; - - len = min(len, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", - __func__, cmd->offset, cmd->offset + len); - - buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; - memcpy(bytes, buff, len); - - bytes += len; - remainder -= len; - buff = hw->nvm_buff.va; - } else { - buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + /* Clear status even it is not read and log */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + i40e_debug(hw, I40E_DEBUG_NVM, + "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } - if (remainder > 0) { - int start_byte = buff - (u8 *)hw->nvm_buff.va; - - i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", - __func__, start_byte, start_byte + remainder); - memcpy(bytes, buff, remainder); - } + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. + */ + mutex_lock(&hw->aq.arq_mutex); + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); + break; - return 0; -} + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); + break; -/** - * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer - **/ -static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) -{ - u32 aq_total_len; - u32 aq_desc_len; + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_clear_wait_state(hw); + status = 0; + break; + } - aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + status = -EBUSY; + *perrno = -EBUSY; + break; - /* check copylength range */ - if (cmd->data_size > aq_total_len) { + default: + /* invalid state, should never happen */ i40e_debug(hw, I40E_DEBUG_NVM, - "%s: copy length %d too big, trimming to %d\n", - __func__, cmd->data_size, aq_total_len); - cmd->data_size = aq_total_len; + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = -EOPNOTSUPP; + *perrno = -ESRCH; + break; } - memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); - - return 0; + mutex_unlock(&hw->aq.arq_mutex); + return status; } /** - * i40e_nvmupd_nvm_read - Read NVM - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer + * i40e_nvmupd_clear_wait_state - clear wait state on hw + * @hw: pointer to the hardware structure **/ -static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - int status; - bool last; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", + hw->nvm_wait_opcode); - status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - bytes, last, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_read status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; } + hw->nvm_wait_opcode = 0; - return status; -} - -/** - * i40e_nvmupd_nvm_erase - Erase an NVM module - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @perrno: pointer to return error code - * - * module, offset, data_size and data are in cmd structure - **/ -static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) -{ - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - int status = 0; - bool last; + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction & I40E_NVM_LCB); + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; - status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - last, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_erase status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + default: + break; } - - return status; } /** - * i40e_nvmupd_nvm_write - Write NVM - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * module, offset, data_size and data are in cmd structure + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + * @desc: AdminQ descriptor **/ -static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc) { - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - u8 preservation_flags; - int status = 0; - bool last; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction & I40E_NVM_LCB); - preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); - - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + u32 aq_desc_len = sizeof(struct i40e_aq_desc); - status = i40e_aq_update_nvm(hw, module, cmd->offset, - (u16)cmd->data_size, bytes, last, - preservation_flags, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_write status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + if (opcode == hw->nvm_wait_opcode) { + memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); + i40e_nvmupd_clear_wait_state(hw); } - - return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ce1f11b8ad65..5a0699ca7ce5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); int i40e_set_mac_type(struct i40e_hw *hw); -extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40e_ptype_lookup[ptype]; -} - /** * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * @link_speed: the speed to convert diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index e7ebcb09f23c..b72a4b5d76b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1472,7 +1472,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf) **/ void i40e_ptp_init(struct i40e_pf *pf) { - struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct net_device *netdev = vsi->netdev; struct i40e_hw *hw = &pf->hw; u32 pf_id; long err; @@ -1536,6 +1537,7 @@ void i40e_ptp_init(struct i40e_pf *pf) **/ void i40e_ptp_stop(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; u32 regval; @@ -1555,7 +1557,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, - pf->vsi[pf->lan_vsi]->netdev->name); + main_vsi->netdev->name); } if (i40e_is_ptp_pin_dev(&pf->hw)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1a12b732818e..c006f716a3bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/rx.h> #include <linux/prefetch.h> #include <linux/sctp.h> #include <net/mpls.h> @@ -23,7 +24,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring, { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; - u32 flex_ptype, dtype_cmd; + u32 flex_ptype, dtype_cmd, vsi_id; u16 i; /* grab the next descriptor */ @@ -41,8 +42,8 @@ static void i40e_fdir(struct i40e_ring *tx_ring, flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype); /* Use LAN VSI Id if not programmed by user */ - flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, - fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id); + vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id; + flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id); dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; @@ -860,13 +861,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_detect_recover_hung - Function to detect and recover hung_queues - * @vsi: pointer to vsi struct with tx queues + * @pf: pointer to PF struct * - * VSI has netdev and netdev has TX queues. This function is to check each of - * those TX queues if they are hung, trigger recovery by issuing SW interrupt. + * LAN VSI has netdev and netdev has TX queues. This function is to check + * each of those TX queues if they are hung, trigger recovery by issuing + * SW interrupt. **/ -void i40e_detect_recover_hung(struct i40e_vsi *vsi) +void i40e_detect_recover_hung(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; @@ -1741,38 +1744,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1800,20 +1795,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -1821,29 +1806,6 @@ checksum_fail: } /** - * i40e_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static inline int i40e_ptype_to_htype(u8 ptype) -{ - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - -/** * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor @@ -1855,17 +1817,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -2144,9 +2108,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, */ /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 2cdc7de6301c..7c26c9a2bf65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -470,7 +470,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); -void i40e_detect_recover_hung(struct i40e_vsi *vsi); +void i40e_detect_recover_hung(struct i40e_pf *pf); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index d9031499697e..28568e126850 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks { #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 232b65b9c8ea..662622f01e31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -795,13 +795,13 @@ error_param: static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) { struct i40e_mac_filter *f = NULL; + struct i40e_vsi *main_vsi, *vsi; struct i40e_pf *pf = vf->pf; - struct i40e_vsi *vsi; u64 max_tx_rate = 0; int ret = 0; - vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, - vf->vf_id); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, @@ -3322,8 +3322,9 @@ error_param: static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { struct i40e_pf *pf = vf->pf; - int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + struct i40e_vsi *main_vsi; int aq_ret = 0; + int abs_vf_id; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { @@ -3331,8 +3332,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, - msg, msglen); + main_vsi = i40e_pf_get_main_vsi(pf); + abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); error_param: /* send the response to the VF */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 11500003af0d..a85b425794df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) goto out; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index db8188c7ac4b..23a6557fc3db 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -287,7 +287,7 @@ struct iavf_adapter { #define IAVF_FLAG_RESET_PENDING BIT(4) #define IAVF_FLAG_RESET_NEEDED BIT(5) #define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) -#define IAVF_FLAG_LEGACY_RX BIT(15) +/* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */ #define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16) #define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 5a25233a89d5..aa751ce3425b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -432,259 +432,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); } -/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT iavf_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum iavf_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ - IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - IAVF_RX_PTYPE_##OUTER_FRAG, \ - IAVF_RX_PTYPE_TUNNEL_##T, \ - IAVF_RX_PTYPE_TUNNEL_END_##TE, \ - IAVF_RX_PTYPE_##TEF, \ - IAVF_RX_PTYPE_INNER_PROT_##I, \ - IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG -#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG -#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */ -struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = { - /* L2 Packet types */ - IAVF_PTT_UNUSED_ENTRY(0), - IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT_UNUSED_ENTRY(4), - IAVF_PTT_UNUSED_ENTRY(5), - IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT_UNUSED_ENTRY(8), - IAVF_PTT_UNUSED_ENTRY(9), - IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(25), - IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(32), - IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(39), - IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(47), - IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(54), - IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(62), - IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(69), - IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(77), - IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(84), - IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(91), - IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(98), - IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(105), - IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(113), - IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(120), - IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(128), - IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(135), - IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(143), - IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(150), - IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - /** * iavf_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 378c3e9ddf9d..52273f7eab2c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -240,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = { #define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats) -/* For now we have one and only one private flag and it is only defined - * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead - * of leaving all this code sitting around empty we will strip it unless - * our one private flag is actually available. - */ -struct iavf_priv_flags { - char flag_string[ETH_GSTRING_LEN]; - u32 flag; - bool read_only; -}; - -#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \ - .flag_string = _name, \ - .flag = _flag, \ - .read_only = _read_only, \ -} - -static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = { - IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0), -}; - -#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags) - /** * iavf_get_link_ksettings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -342,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset) return IAVF_STATS_LEN + (IAVF_QUEUE_STATS_LEN * 2 * netdev->real_num_tx_queues); - else if (sset == ETH_SS_PRIV_FLAGS) - return IAVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -386,21 +361,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, } /** - * iavf_get_priv_flag_strings - Get private flag strings - * @netdev: network interface device structure - * @data: buffer for string data - * - * Builds the private flags string table - **/ -static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data) -{ - unsigned int i; - - for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) - ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string); -} - -/** * iavf_get_stat_strings - Get stat strings * @netdev: network interface device structure * @data: buffer for string data @@ -438,109 +398,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data) case ETH_SS_STATS: iavf_get_stat_strings(netdev, data); break; - case ETH_SS_PRIV_FLAGS: - iavf_get_priv_flag_strings(netdev, data); - break; default: break; } } /** - * iavf_get_priv_flags - report device private flags - * @netdev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 iavf_get_priv_flags(struct net_device *netdev) -{ - struct iavf_adapter *adapter = netdev_priv(netdev); - u32 i, ret_flags = 0; - - for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { - const struct iavf_priv_flags *priv_flags; - - priv_flags = &iavf_gstrings_priv_flags[i]; - - if (priv_flags->flag & adapter->flags) - ret_flags |= BIT(i); - } - - return ret_flags; -} - -/** - * iavf_set_priv_flags - set private flags - * @netdev: network interface device structure - * @flags: bit flags to be set - **/ -static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) -{ - struct iavf_adapter *adapter = netdev_priv(netdev); - u32 orig_flags, new_flags, changed_flags; - int ret = 0; - u32 i; - - orig_flags = READ_ONCE(adapter->flags); - new_flags = orig_flags; - - for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) { - const struct iavf_priv_flags *priv_flags; - - priv_flags = &iavf_gstrings_priv_flags[i]; - - if (flags & BIT(i)) - new_flags |= priv_flags->flag; - else - new_flags &= ~(priv_flags->flag); - - if (priv_flags->read_only && - ((orig_flags ^ new_flags) & ~BIT(i))) - return -EOPNOTSUPP; - } - - /* Before we finalize any flag changes, any checks which we need to - * perform to determine if the new flags will be supported should go - * here... - */ - - /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg returns anything but the old value, this means - * something else must have modified the flags variable since we - * copied it. We'll just punt with an error and log something in the - * message buffer. - */ - if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) { - dev_warn(&adapter->pdev->dev, - "Unable to update adapter->flags as it was modified by another thread...\n"); - return -EAGAIN; - } - - changed_flags = orig_flags ^ new_flags; - - /* Process any additional changes needed as a result of flag changes. - * The changed_flags value reflects the list of bits that were changed - * in the code above. - */ - - /* issue a reset to force legacy-rx change to take effect */ - if (changed_flags & IAVF_FLAG_LEGACY_RX) { - if (netif_running(netdev)) { - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); - ret = iavf_wait_for_reset(adapter); - if (ret) - netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset"); - } - } - - return ret; -} - -/** * iavf_get_msglevel - Get debug message level * @netdev: network interface device structure * @@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev, strscpy(drvinfo->driver, iavf_driver_name, 32); strscpy(drvinfo->fw_version, "N/A", 4); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; } /** @@ -1995,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = { .get_strings = iavf_get_strings, .get_ethtool_stats = iavf_get_ethtool_stats, .get_sset_count = iavf_get_sset_count, - .get_priv_flags = iavf_get_priv_flags, - .set_priv_flags = iavf_set_priv_flags, .get_msglevel = iavf_get_msglevel, .set_msglevel = iavf_set_msglevel, .get_coalesce = iavf_get_coalesce, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 166832a4213a..c6dff0963053 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include <linux/net/intel/libie/rx.h> + #include "iavf.h" #include "iavf_prototype.h" /* All iavf tracepoints are defined by the include below, which must @@ -45,6 +47,8 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_IMPORT_NS(LIBETH); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; @@ -714,40 +718,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter) **/ static void iavf_configure_rx(struct iavf_adapter *adapter) { - unsigned int rx_buf_len = IAVF_RXBUFFER_2048; struct iavf_hw *hw = &adapter->hw; - int i; - - /* Legacy Rx will always default to a 2048 buffer size. */ -#if (PAGE_SIZE < 8192) - if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) { - struct net_device *netdev = adapter->netdev; - - /* For jumbo frames on systems with 4K pages we have to use - * an order 1 page, so we might as well increase the size - * of our Rx buffer to make better use of the available space - */ - rx_buf_len = IAVF_RXBUFFER_3072; - - /* We use a 1536 buffer size for configurations with - * standard Ethernet mtu. On x86 this gives us enough room - * for shared info and 192 bytes of padding. - */ - if (!IAVF_2K_TOO_SMALL_WITH_PADDING && - (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; - } -#endif - for (i = 0; i < adapter->num_active_queues; i++) { + for (u32 i = 0; i < adapter->num_active_queues; i++) adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - - if (adapter->flags & IAVF_FLAG_LEGACY_RX) - clear_ring_build_skb_enabled(&adapter->rx_rings[i]); - else - set_ring_build_skb_enabled(&adapter->rx_rings[i]); - } } /** @@ -1615,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; - rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; rx_ring->itr_setting = IAVF_ITR_RX_DEF; } @@ -2642,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; + netdev->max_mtu = LIBIE_MAX_MTU; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -3785,6 +3757,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -4324,7 +4300,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) { iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); @@ -5051,7 +5027,7 @@ err_dma: * * Called when the system (VM) is entering sleep/suspend. **/ -static int __maybe_unused iavf_suspend(struct device *dev_d) +static int iavf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); @@ -5079,7 +5055,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int __maybe_unused iavf_resume(struct device *dev_d) +static int iavf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct iavf_adapter *adapter; @@ -5266,14 +5242,14 @@ static void iavf_shutdown(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D3hot); } -static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); static struct pci_driver iavf_driver = { .name = iavf_driver_name, .id_table = iavf_pci_tbl, .probe = iavf_probe, .remove = iavf_remove, - .driver.pm = &iavf_pm_ops, + .driver.pm = pm_sleep_ptr(&iavf_pm_ops), .shutdown = iavf_shutdown, }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index 4a48e6171405..48c3901381b4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, struct iavf_aqc_get_set_rss_key_data *key); -extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; - -static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return iavf_ptype_lookup[ptype]; -} - void iavf_vf_parse_hw_config(struct iavf_hw *hw, struct virtchnl_vf_resource *msg); enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index b71484c87a84..26b424fd6718 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/bitfield.h> +#include <linux/net/intel/libie/rx.h> #include <linux/prefetch.h> #include "iavf.h" @@ -184,7 +185,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) * pending work. */ packets = tx_ring->stats.packets & INT_MAX; - if (tx_ring->tx_stats.prev_pkt_ctr == packets) { + if (tx_ring->prev_pkt_ctr == packets) { iavf_force_wb(vsi, tx_ring->q_vector); continue; } @@ -193,7 +194,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi) * to iavf_get_tx_pending() */ smp_rmb(); - tx_ring->tx_stats.prev_pkt_ctr = + tx_ring->prev_pkt_ctr = iavf_get_tx_pending(tx_ring, true) ? packets : -1; } } @@ -319,7 +320,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, ((j / WB_STRIDE) == 0) && (j > 0) && !test_bit(__IAVF_VSI_DOWN, vsi->state) && (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; + tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB; } /* notify netdev of completed buffers */ @@ -674,7 +675,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->tx_stats.prev_pkt_ctr = -1; + tx_ring->prev_pkt_ctr = -1; return 0; err: @@ -689,11 +690,8 @@ err: **/ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { - unsigned long bi_size; - u16 i; - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_bi) + if (!rx_ring->rx_fqes) return; if (rx_ring->skb) { @@ -701,41 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) rx_ring->skb = NULL; } - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + /* Free all the Rx ring buffers */ + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; - if (!rx_bi->page) - continue; + page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - rx_bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); - - rx_bi->page = NULL; - rx_bi->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_bi, 0, bi_size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -748,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) **/ void iavf_free_rx_resources(struct iavf_ring *rx_ring) { + struct libeth_fq fq = { + .fqes = rx_ring->rx_fqes, + .pp = rx_ring->pp, + }; + iavf_clean_rx_ring(rx_ring); - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; if (rx_ring->desc) { - dma_free_coherent(rx_ring->dev, rx_ring->size, + dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } + + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; } /** @@ -767,38 +747,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring) **/ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { - struct device *dev = rx_ring->dev; - int bi_size; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!rx_ring->rx_bi) - goto err; + struct libeth_fq fq = { + .count = rx_ring->count, + .buf_len = LIBIE_MAX_RX_BUF_LEN, + .nid = NUMA_NO_NODE, + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); + if (ret) + return ret; + + rx_ring->pp = fq.pp; + rx_ring->rx_fqes = fq.fqes; + rx_ring->truesize = fq.truesize; + rx_ring->rx_buf_len = fq.buf_len; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { - dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); goto err; } - rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; + err: - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; + return -ENOMEM; } @@ -811,9 +799,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -824,69 +809,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) } /** - * iavf_rx_offset - Return expected offset into page to access data - * @rx_ring: Ring we are requesting offset of - * - * Returns the offset value for ring into the data buffer. - */ -static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) -{ - return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; -} - -/** - * iavf_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buffer struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - **/ -static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; - return true; - } - - /* alloc new page for storage */ - page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, iavf_rx_pg_order(rx_ring)); - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = iavf_rx_offset(rx_ring); - - /* initialize pagecnt_bias to 1 representing we fully own page */ - bi->pagecnt_bias = 1; - - return true; -} - -/** * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play * @skb: packet to send up @@ -916,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, **/ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; u16 ntu = rx_ring->next_to_use; union iavf_rx_desc *rx_desc; - struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; rx_desc = IAVF_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; do { - if (!iavf_alloc_mapped_page(rx_ring, bi)) - goto no_buffers; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) + goto no_buffers; /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = IAVF_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; ntu = 0; } @@ -966,6 +887,8 @@ no_buffers: if (rx_ring->next_to_use != ntu) iavf_release_rx_desc(rx_ring, ntu); + rx_ring->rx_stats.alloc_page_failed++; + /* make sure to come back via polling to try again after * allocation failure */ @@ -982,38 +905,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, union iavf_rx_desc *rx_desc) { - struct iavf_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | @@ -1037,17 +952,7 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IAVF_RX_PTYPE_INNER_PROT_TCP: - case IAVF_RX_PTYPE_INNER_PROT_UDP: - case IAVF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -1055,29 +960,6 @@ checksum_fail: } /** - * iavf_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static int iavf_ptype_to_htype(u8 ptype) -{ - struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - -/** * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor @@ -1089,17 +971,19 @@ static void iavf_rx_hash(struct iavf_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -1152,95 +1036,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) } /** - * iavf_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *old_buff) -{ - struct iavf_rx_buffer *new_buff; - u16 nta = rx_ring->next_to_alloc; - - new_buff = &rx_ring->rx_bi[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->dma = old_buff->dma; - new_buff->page = old_buff->page; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - -/** - * iavf_can_reuse_rx_page - Determine if this page can be reused by - * the adapter for another receive - * - * @rx_buffer: buffer containing the page - * - * If page is reusable, rx_buffer->page_offset is adjusted to point to - * an unused region in the page. - * - * For small pages, @truesize will be a constant value, half the size - * of the memory at page. We'll attempt to alternate between high and - * low halves of the page, with one half ready for use by the hardware - * and the other half being consumed by the stack. We use the page - * ref count to determine whether the stack has finished consuming the - * portion of this page that was passed up with a previous packet. If - * the page ref count is >1, we'll assume the "other" half page is - * still busy, and this page cannot be reused. - * - * For larger pages, @truesize will be the actual space used by the - * received packet (adjusted upward to an even multiple of the cache - * line size). This will advance through the page by the amount - * actually consumed by the received packets while there is still - * space for a buffer. Each region of larger pages will be used at - * most once, after which the page will not be reused. - * - * In either case, if the page is reusable its refcount is increased. - **/ -static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* Is any reuse possible? */ - if (!dev_page_is_reusable(page)) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((page_count(page) - pagecnt_bias) > 1)) - return false; -#else -#define IAVF_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048) - if (rx_buffer->page_offset > IAVF_LAST_OFFSET) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(!pagecnt_bias)) { - page_ref_add(page, USHRT_MAX); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into + * @rx_buffer: buffer containing page to add * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. @@ -1248,206 +1046,50 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer) * * The function will then update the page offset. **/ -static void iavf_add_rx_frag(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, - struct sk_buff *skb, +static void iavf_add_rx_frag(struct sk_buff *skb, + const struct libeth_fqe *rx_buffer, unsigned int size) { -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); -#endif - - if (!size) - return; + u32 hr = rx_buffer->page->pp->p.offset; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); - - /* page is being used so we must update the page offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif -} - -/** - * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use - * @rx_ring: rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, - const unsigned int size) -{ - struct iavf_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - prefetchw(rx_buffer->page); - if (!size) - return rx_buffer; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - - /* We have pulled a buffer for use, so decrement pagecnt_bias */ - rx_buffer->pagecnt_bias--; - - return rx_buffer; -} - -/** - * iavf_construct_skb - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * @size: size of buffer to add to skb - * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. - */ -static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, - unsigned int size) -{ - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(size); -#endif - unsigned int headlen; - struct sk_buff *skb; - - if (!rx_buffer) - return NULL; - /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - net_prefetch(va); - - /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - IAVF_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); - if (unlikely(!skb)) - return NULL; - - /* Determine available headroom for copy */ - headlen = size; - if (headlen > IAVF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* update all of the pointers */ - size -= headlen; - if (size) { - skb_add_rx_frag(skb, 0, rx_buffer->page, - rx_buffer->page_offset + headlen, - size, truesize); - - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - } else { - /* buffer is unused, reset bias back to rx_buffer */ - rx_buffer->pagecnt_bias++; - } - - return skb; + rx_buffer->offset + hr, size, rx_buffer->truesize); } /** * iavf_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ -static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, +static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, unsigned int size) { - void *va; -#if (PAGE_SIZE < 8192) - unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IAVF_SKB_PAD + size); -#endif + u32 hr = rx_buffer->page->pp->p.offset; struct sk_buff *skb; + void *va; - if (!rx_buffer || !size) - return NULL; /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - net_prefetch(va); + va = page_address(rx_buffer->page) + rx_buffer->offset; + net_prefetch(va + hr); /* build an skb around the page buffer */ - skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); + skb = napi_build_skb(va, rx_buffer->truesize); if (unlikely(!skb)) return NULL; + skb_mark_for_recycle(skb); + /* update pointers within the skb to store the data */ - skb_reserve(skb, IAVF_SKB_PAD); + skb_reserve(skb, hr); __skb_put(skb, size); - /* buffer is used by skb, update page_offset */ -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif - return skb; } /** - * iavf_put_rx_buffer - Clean up used buffer and either recycle or free - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * - * This function will clean up the contents of the rx_buffer. It will - * either recycle the buffer or unmap it and free the associated resources. - */ -static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer) -{ - if (!rx_buffer) - return; - - if (iavf_can_reuse_rx_page(rx_buffer)) { - /* hand second half of page back to the ring */ - iavf_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; -} - -/** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed * @rx_desc: Rx descriptor for current buffer @@ -1500,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { - struct iavf_rx_buffer *rx_buffer; + struct libeth_fqe *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag = 0; @@ -1535,28 +1177,27 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = iavf_get_rx_buffer(rx_ring, size); + + rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; + if (!libeth_rx_sync_for_cpu(rx_buffer, size)) + goto skip_data; /* retrieve a buffer from the ring */ if (skb) - iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); - else if (ring_uses_build_skb(rx_ring)) - skb = iavf_build_skb(rx_ring, rx_buffer, size); + iavf_add_rx_frag(skb, rx_buffer, size); else - skb = iavf_construct_skb(rx_ring, rx_buffer, size); + skb = iavf_build_skb(rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; - if (rx_buffer && size) - rx_buffer->pagecnt_bias++; break; } - iavf_put_rx_buffer(rx_ring, rx_buffer); +skip_data: cleaned_count++; - if (iavf_is_non_eop(rx_ring, rx_desc, skb)) + if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and @@ -1743,8 +1384,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget) clean_complete = false; continue; } - arm_wb |= ring->arm_wb; - ring->arm_wb = false; + arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB); + ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB; } /* Handle case where we are called by netpoll with a budget of 0 */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index 10ba36602c0c..d7b5587aeb8e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -80,79 +80,8 @@ enum iavf_dyn_idx_t { BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) -/* Supported Rx Buffer Sizes (a multiple of 128) */ -#define IAVF_RXBUFFER_256 256 -#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ -#define IAVF_RXBUFFER_2048 2048 -#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ -#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ - -/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) - * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) - */ -#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256 -#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define iavf_rx_desc iavf_32byte_rx_desc -#define IAVF_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) - -/* Attempt to maximize the headroom available for incoming frames. We - * use a 2K buffer for receives and need 1536/1534 to store the data for - * the frame. This leaves us with 512 bytes of room. From that we need - * to deduct the space needed for the shared info and the padding needed - * to IP align the frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define IAVF_2K_TOO_SMALL_WITH_PADDING \ -((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048)) - -static inline int iavf_compute_pad(int rx_buf_len) -{ - int page_size, pad_size; - - page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; - - return pad_size; -} - -static inline int iavf_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (IAVF_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = IAVF_RXBUFFER_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return iavf_compute_pad(rx_buf_len); -} - -#define IAVF_SKB_PAD iavf_skb_pad() -#else -#define IAVF_2K_TOO_SMALL_WITH_PADDING false -#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - /** * iavf_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) @@ -271,17 +200,6 @@ struct iavf_tx_buffer { u32 tx_flags; }; -struct iavf_rx_buffer { - dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; -}; - struct iavf_queue_stats { u64 packets; u64 bytes; @@ -293,7 +211,6 @@ struct iavf_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; - int prev_pkt_ctr; u64 tx_lost_interrupt; }; @@ -301,14 +218,6 @@ struct iavf_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; - u64 page_reuse_count; - u64 realloc_count; -}; - -enum iavf_ring_state_t { - __IAVF_TX_FDIR_INIT_DONE, - __IAVF_TX_XPS_INIT_DONE, - __IAVF_RING_STATE_NBITS /* must be last */ }; /* some useful defines for virtchannel interface, which @@ -326,16 +235,19 @@ enum iavf_ring_state_t { struct iavf_ring { struct iavf_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + union { + struct page_pool *pp; /* Used on Rx for buffer management */ + struct device *dev; /* Used on Tx for DMA mapping */ + }; struct net_device *netdev; /* netdev ring maps to */ union { + struct libeth_fqe *rx_fqes; struct iavf_tx_buffer *tx_bi; - struct iavf_rx_buffer *rx_bi; }; - DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS); - u16 queue_index; /* Queue number of ring */ - u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; + u32 truesize; + + u16 queue_index; /* Queue number of ring */ /* high bit set means dynamic, use accessors routines to read/write. * hardware only supports 2us resolution for the ITR registers. @@ -345,23 +257,15 @@ struct iavf_ring { u16 itr_setting; u16 count; /* Number of descriptors */ - u16 reg_idx; /* HW register index of the ring */ - u16 rx_buf_len; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u8 atr_sample_rate; - u8 atr_count; - - bool ring_active; /* is ring online or not */ - bool arm_wb; /* do something to arm write back */ - u8 packet_stride; - u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) -#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) +#define IAVF_TXR_FLAGS_ARM_WB BIT(1) +/* BIT(2) is free */ #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) @@ -374,6 +278,7 @@ struct iavf_ring { struct iavf_rx_queue_stats rx_stats; }; + int prev_pkt_ctr; /* For Tx stall detection */ unsigned int size; /* length of descriptor ring in bytes */ dma_addr_t dma; /* physical address of ring */ @@ -381,7 +286,6 @@ struct iavf_ring { struct iavf_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ - u16 next_to_alloc; struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must * return before it sees the EOP for * the current packet, we save that skb @@ -390,22 +294,9 @@ struct iavf_ring { * iavf_clean_rx_ring_irq() is called * for this ring. */ -} ____cacheline_internodealigned_in_smp; - -static inline bool ring_uses_build_skb(struct iavf_ring *ring) -{ - return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED); -} -static inline void set_ring_build_skb_enabled(struct iavf_ring *ring) -{ - ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; -} - -static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring) -{ - ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED; -} + u32 rx_buf_len; +} ____cacheline_internodealigned_in_smp; #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002 @@ -428,17 +319,6 @@ struct iavf_ring_container { #define iavf_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) -{ -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif - return 0; -} - -#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) - bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 2b6a207fa441..f6b09e57abce 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -10,8 +10,6 @@ #include "iavf_adminq.h" #include "iavf_devids.h" -#define IAVF_RXQ_CTX_DBUFF_SHIFT 7 - /* IAVF_MASK is a macro used on 32 bit registers */ #define IAVF_MASK(mask, shift) ((u32)(mask) << (shift)) @@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks { #define IAVF_RXD_QW1_PTYPE_SHIFT 30 #define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) -/* Packet type non-ip values */ -enum iavf_rx_l2_ptype { - IAVF_RX_PTYPE_L2_RESERVED = 0, - IAVF_RX_PTYPE_L2_MAC_PAY2 = 1, - IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - IAVF_RX_PTYPE_L2_FIP_PAY2 = 3, - IAVF_RX_PTYPE_L2_OUI_PAY2 = 4, - IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6, - IAVF_RX_PTYPE_L2_ECP_PAY2 = 7, - IAVF_RX_PTYPE_L2_EVB_PAY2 = 8, - IAVF_RX_PTYPE_L2_QCN_PAY2 = 9, - IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10, - IAVF_RX_PTYPE_L2_ARP = 11, - IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12, - IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct iavf_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum iavf_rx_ptype_outer_ip { - IAVF_RX_PTYPE_OUTER_L2 = 0, - IAVF_RX_PTYPE_OUTER_IP = 1 -}; - -enum iavf_rx_ptype_outer_ip_ver { - IAVF_RX_PTYPE_OUTER_NONE = 0, - IAVF_RX_PTYPE_OUTER_IPV4 = 0, - IAVF_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum iavf_rx_ptype_outer_fragmented { - IAVF_RX_PTYPE_NOT_FRAG = 0, - IAVF_RX_PTYPE_FRAG = 1 -}; - -enum iavf_rx_ptype_tunnel_type { - IAVF_RX_PTYPE_TUNNEL_NONE = 0, - IAVF_RX_PTYPE_TUNNEL_IP_IP = 1, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum iavf_rx_ptype_tunnel_end_prot { - IAVF_RX_PTYPE_TUNNEL_END_NONE = 0, - IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1, - IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum iavf_rx_ptype_inner_prot { - IAVF_RX_PTYPE_INNER_PROT_NONE = 0, - IAVF_RX_PTYPE_INNER_PROT_UDP = 1, - IAVF_RX_PTYPE_INNER_PROT_TCP = 2, - IAVF_RX_PTYPE_INNER_PROT_SCTP = 3, - IAVF_RX_PTYPE_INNER_PROT_ICMP = 4, - IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum iavf_rx_ptype_payload_layer { - IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - #define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 22f2df7c460b..1e543f6a7c30 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include <linux/net/intel/libie/rx.h> + #include "iavf.h" #include "iavf_prototype.h" @@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; struct virtchnl_queue_pair_info *vqpi; + u32 i, max_frame; size_t len; - if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) - max_frame = IAVF_MAX_RXBUFFER; + max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); + max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame); if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter) if (!vqci) return; - /* Limit maximum frame size when jumbo frames is not enabled */ - if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) && - (adapter->netdev->mtu <= ETH_DATA_LEN)) - max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN; - vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; @@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = max_frame; - vqpi->rxq.databuffer_size = - ALIGN(adapter->rx_rings[i].rx_buf_len, - BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT)); + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; if (CRC_OFFLOAD_ALLOWED(adapter)) vqpi->rxq.crc_disable = !!(adapter->netdev->features & NETIF_F_RXFCS); diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index cddd82d4ca0f..03500e28ac99 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -5,6 +5,7 @@ # Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver # +subdir-ccflags-y += -I$(src) obj-$(CONFIG_ICE) += ice.o ice-y := ice_main.o \ @@ -28,7 +29,8 @@ ice-y := ice_main.o \ ice_flex_pipe.o \ ice_flow.o \ ice_idc.o \ - ice_devlink.o \ + devlink/devlink.o \ + devlink/devlink_port.o \ ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ @@ -36,7 +38,8 @@ ice-y := ice_main.o \ ice_repr.o \ ice_tc_lib.o \ ice_fwlog.o \ - ice_debugfs.o + ice_debugfs.o \ + ice_adapter.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index b516e42b41f0..c4b69655cdf5 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -5,13 +5,11 @@ #include "ice.h" #include "ice_lib.h" -#include "ice_devlink.h" +#include "devlink.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" -static int ice_active_port_option = -1; - /* context for devlink info version reporting */ struct ice_info_ctx { char buf[128]; @@ -478,17 +476,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change, case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: if (ice_is_eswitch_mode_switchdev(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Go to legacy mode before doing reinit\n"); + "Go to legacy mode before doing reinit"); return -EOPNOTSUPP; } if (ice_is_adq_active(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Turn off ADQ before doing reinit\n"); + "Turn off ADQ before doing reinit"); return -EOPNOTSUPP; } if (ice_has_vfs(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Remove all VFs before doing reinit\n"); + "Remove all VFs before doing reinit"); return -EOPNOTSUPP; } ice_devlink_reinit_down(pf); @@ -526,248 +524,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf, } /** - * ice_devlink_port_opt_speed_str - convert speed to a string - * @speed: speed value - */ -static const char *ice_devlink_port_opt_speed_str(u8 speed) -{ - switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { - case ICE_AQC_PORT_OPT_MAX_LANE_100M: - return "0.1"; - case ICE_AQC_PORT_OPT_MAX_LANE_1G: - return "1"; - case ICE_AQC_PORT_OPT_MAX_LANE_2500M: - return "2.5"; - case ICE_AQC_PORT_OPT_MAX_LANE_5G: - return "5"; - case ICE_AQC_PORT_OPT_MAX_LANE_10G: - return "10"; - case ICE_AQC_PORT_OPT_MAX_LANE_25G: - return "25"; - case ICE_AQC_PORT_OPT_MAX_LANE_50G: - return "50"; - case ICE_AQC_PORT_OPT_MAX_LANE_100G: - return "100"; - } - - return "-"; -} - -#define ICE_PORT_OPT_DESC_LEN 50 -/** - * ice_devlink_port_options_print - Print available port split options - * @pf: the PF to print split port options + * ice_get_tx_topo_user_sel - Read user's choice from flash + * @pf: pointer to pf structure + * @layers: value read from flash will be saved here * - * Prints a table with available port split options and max port speeds + * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. + * + * Return: zero when read was successful, negative values otherwise. */ -static void ice_devlink_port_options_print(struct ice_pf *pf) +static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) { - u8 i, j, options_count, cnt, speed, pending_idx, active_idx; - struct ice_aqc_get_port_options_elem *options, *opt; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - char desc[ICE_PORT_OPT_DESC_LEN]; - const char *str; - int status; + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, - sizeof(*options), GFP_KERNEL); - if (!options) - return; + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) + return err; - for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { - opt = options + i * ICE_AQC_PORT_OPT_MAX; - options_count = ICE_AQC_PORT_OPT_MAX; - active_valid = 0; + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - status = ice_aq_get_port_options(&pf->hw, opt, &options_count, - i, true, &active_idx, - &active_valid, &pending_idx, - &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", - i, status); - goto err; - } - } + if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) + *layers = ICE_SCHED_5_LAYERS; + else + *layers = ICE_SCHED_9_LAYERS; - dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); - dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); - dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); +exit_release_res: + ice_release_nvm(hw); - for (i = 0; i < options_count; i++) { - cnt = 0; + return err; +} - if (i == ice_active_port_option) - str = "Active"; - else if ((i == pending_idx) && pending_valid) - str = "Pending"; - else - str = ""; +/** + * ice_update_tx_topo_user_sel - Save user's preference in flash + * @pf: pointer to pf structure + * @layers: value to be saved in flash + * + * Variable "layers" defines user's preference about number of layers in Tx + * Scheduler Topology Tree. This choice should be stored in PFA TLV field + * and be picked up by driver, next time during init. + * + * Return: zero when save was successful, negative values otherwise. + */ +static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) +{ + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-8s", str); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) + return err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-6u", options[i].pmd); + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { - speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; - str = ice_devlink_port_opt_speed_str(speed); - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%3s ", str); - } + if (layers == ICE_SCHED_5_LAYERS) + usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; + else + usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; - dev_dbg(dev, "%s\n", desc); - } + err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, + sizeof(usr_sel.data), &usr_sel.data, + true, NULL, NULL); +exit_release_res: + ice_release_nvm(hw); -err: - kfree(options); + return err; } /** - * ice_devlink_aq_set_port_option - Send set port option admin queue command - * @pf: the PF to print split port options - * @option_idx: selected port option - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to store the parameter value * - * Sends set port option admin queue command with selected port option and - * calls NVM write activate. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) { - struct device *dev = ice_pf_to_dev(pf); - int status; - - status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); - if (status) { - dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); - return -EIO; - } - - status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); - if (status) { - dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); - return -EIO; - } - - status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); - if (status) { - dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); - ice_release_nvm(&pf->hw); - return -EIO; - } + struct ice_pf *pf = devlink_priv(devlink); + int err; - ice_release_nvm(&pf->hw); + err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); + if (err) + return err; - NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); return 0; } /** - * ice_devlink_port_split - .port_split devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @count: number of ports to split to - * @extack: extended netdev ack structure - * - * Callback for the devlink .port_split operation. - * - * Unfortunately, the devlink expression of available options is limited - * to just a number, so search for an FW port option which supports - * the specified number. As there could be multiple FW port options with - * the same port split count, allow switching between them. When the same - * port split count request is issued again, switch to the next FW port - * option with the same port split count. + * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to get the parameter value + * @extack: netlink extended ACK structure * - * Return: zero on success or an error code on failure. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, - unsigned int count, struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, j, active_idx, pending_idx, new_option; struct ice_pf *pf = devlink_priv(devlink); - u8 option_count = ICE_AQC_PORT_OPT_MAX; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - int status; - - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port split options, err = %d\n", - status); - NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); - return -EIO; - } - - new_option = ICE_AQC_PORT_OPT_MAX; - active_idx = pending_valid ? pending_idx : active_idx; - for (i = 1; i <= option_count; i++) { - /* In order to allow switching between FW port options with - * the same port split count, search for a new option starting - * from the active/pending option (with array wrap around). - */ - j = (active_idx + i) % option_count; - - if (count == options[j].pmd) { - new_option = j; - break; - } - } - - if (new_option == active_idx) { - dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", - count); - NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } - - if (new_option == ICE_AQC_PORT_OPT_MAX) { - dev_dbg(dev, "request to split: count: %u not found\n", count); - NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } + int err; - status = ice_devlink_aq_set_port_option(pf, new_option, extack); - if (status) - return status; + err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); + if (err) + return err; - ice_devlink_port_options_print(pf); + NL_SET_ERR_MSG_MOD(extack, + "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); return 0; } /** - * ice_devlink_port_unsplit - .port_unsplit devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers + * parameter value + * @devlink: unused pointer to devlink instance + * @id: the parameter ID to validate + * @val: value to validate + * @extack: netlink extended ACK structure * - * Callback for the devlink .port_unsplit operation. - * Calls ice_devlink_port_split with split count set to 1. - * There could be no FW option available with split count 1. + * Supported values are: + * - 5 - five layers Tx Scheduler Topology Tree + * - 9 - nine layers Tx Scheduler Topology Tree * - * Return: zero on success or an error code on failure. + * Return: zero when passed parameter value is supported. Negative value on + * error. */ -static int -ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) { - return ice_devlink_port_split(devlink, port, 1, extack); + if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { + NL_SET_ERR_MSG_MOD(extack, + "Wrong number of tx scheduler layers provided."); + return -EINVAL; + } + + return 0; } /** @@ -1290,18 +1193,16 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, static int ice_devlink_reinit_up(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); - struct ice_vsi_cfg_params params; int err; err = ice_init_dev(pf); if (err) return err; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_INIT; + vsi->flags = ICE_VSI_FLAG_INIT; rtnl_lock(); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); rtnl_unlock(); if (err) goto err_vsi_cfg; @@ -1391,9 +1292,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, return 0; } -static int -ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); bool roce_ena = ctx->val.vbool; @@ -1442,9 +1343,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, return 0; } -static int -ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); bool iw_ena = ctx->val.vbool; @@ -1482,6 +1383,11 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, return 0; } +enum ice_param_id { + ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, +}; + static const struct devlink_param ice_devlink_params[] = { DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), ice_devlink_enable_roce_get, @@ -1491,7 +1397,13 @@ static const struct devlink_param ice_devlink_params[] = { ice_devlink_enable_iw_get, ice_devlink_enable_iw_set, ice_devlink_enable_iw_validate), - + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + "tx_scheduling_layers", + DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + ice_devlink_tx_sched_layers_get, + ice_devlink_tx_sched_layers_set, + ice_devlink_tx_sched_layers_validate), }; static void ice_devlink_free(void *devlink_ptr) @@ -1534,7 +1446,7 @@ void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - devlink_register(devlink); + devl_register(devlink); } /** @@ -1545,197 +1457,28 @@ void ice_devlink_register(struct ice_pf *pf) */ void ice_devlink_unregister(struct ice_pf *pf) { - devlink_unregister(priv_to_devlink(pf)); -} - -/** - * ice_devlink_set_switch_id - Set unique switch id based on pci dsn - * @pf: the PF to create a devlink port for - * @ppid: struct with switch id information - */ -static void -ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) -{ - struct pci_dev *pdev = pf->pdev; - u64 id; - - id = pci_get_dsn(pdev); - - ppid->id_len = sizeof(id); - put_unaligned_be64(id, &ppid->id); + devl_unregister(priv_to_devlink(pf)); } int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); + struct ice_hw *hw = &pf->hw; + size_t params_size; - return devlink_params_register(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); -} - -void ice_devlink_unregister_params(struct ice_pf *pf) -{ - devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); -} - -/** - * ice_devlink_set_port_split_options - Set port split options - * @pf: the PF to set port split options - * @attrs: devlink attributes - * - * Sets devlink port split options based on available FW port options - */ -static void -ice_devlink_set_port_split_options(struct ice_pf *pf, - struct devlink_port_attrs *attrs) -{ - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; - bool active_valid, pending_valid; - int status; - - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", - status); - return; - } - - /* find the biggest available port split count */ - for (i = 0; i < option_count; i++) - attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); - - attrs->splittable = attrs->lanes ? 1 : 0; - ice_active_port_option = active_idx; -} - -static const struct devlink_port_ops ice_devlink_port_ops = { - .port_split = ice_devlink_port_split, - .port_unsplit = ice_devlink_port_unsplit, -}; - -/** - * ice_devlink_create_pf_port - Create a devlink port for this PF - * @pf: the PF to create a devlink port for - * - * Create and register a devlink_port for this PF. - * This function has to be called under devl_lock. - * - * Return: zero on success or an error code on failure. - */ -int ice_devlink_create_pf_port(struct ice_pf *pf) -{ - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; - struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; - int err; - - devlink = priv_to_devlink(pf); - - dev = ice_pf_to_dev(pf); - - devlink_port = &pf->devlink_port; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return -EIO; - - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pf->hw.bus.func; - - /* As FW supports only port split options for whole device, - * set port split options only for first PF. - */ - if (pf->hw.pf_id == 0) - ice_devlink_set_port_split_options(pf, &attrs); - - ice_devlink_set_switch_id(pf, &attrs.switch_id); - - devlink_port_attrs_set(devlink_port, &attrs); - - err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, - &ice_devlink_port_ops); - if (err) { - dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", - pf->hw.pf_id, err); - return err; - } - - return 0; -} - -/** - * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF - * @pf: the PF to cleanup - * - * Unregisters the devlink_port structure associated with this PF. - * This function has to be called under devl_lock. - */ -void ice_devlink_destroy_pf_port(struct ice_pf *pf) -{ - devl_port_unregister(&pf->devlink_port); -} - -/** - * ice_devlink_create_vf_port - Create a devlink port for this VF - * @vf: the VF to create a port for - * - * Create and register a devlink_port for this VF. - * - * Return: zero on success or an error code on failure. - */ -int ice_devlink_create_vf_port(struct ice_vf *vf) -{ - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; - struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; - struct ice_pf *pf; - int err; - - pf = vf->pf; - dev = ice_pf_to_dev(pf); - devlink_port = &vf->devlink_port; - - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; - - attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = pf->hw.bus.func; - attrs.pci_vf.vf = vf->vf_id; - - ice_devlink_set_switch_id(pf, &attrs.switch_id); - - devlink_port_attrs_set(devlink_port, &attrs); - devlink = priv_to_devlink(pf); + params_size = ARRAY_SIZE(ice_devlink_params); - err = devlink_port_register(devlink, devlink_port, vsi->idx); - if (err) { - dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", - vf->vf_id, err); - return err; - } + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) + params_size--; - return 0; + return devl_params_register(devlink, ice_devlink_params, + params_size); } -/** - * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF - * @vf: the VF to cleanup - * - * Unregisters the devlink_port structure associated with this VF. - */ -void ice_devlink_destroy_vf_port(struct ice_vf *vf) +void ice_devlink_unregister_params(struct ice_pf *pf) { - devl_rate_leaf_destroy(&vf->devlink_port); - devlink_port_unregister(&vf->devlink_port); + devl_params_unregister(priv_to_devlink(pf), ice_devlink_params, + ARRAY_SIZE(ice_devlink_params)); } #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) @@ -1976,8 +1719,8 @@ void ice_devlink_init_regions(struct ice_pf *pf) u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; - pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, - nvm_size); + pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, + nvm_size); if (IS_ERR(pf->nvm_region)) { dev_err(dev, "failed to create NVM devlink region, err %ld\n", PTR_ERR(pf->nvm_region)); @@ -1985,17 +1728,17 @@ void ice_devlink_init_regions(struct ice_pf *pf) } sram_size = pf->hw.flash.sr_words * 2u; - pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, - 1, sram_size); + pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); if (IS_ERR(pf->sram_region)) { dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", PTR_ERR(pf->sram_region)); pf->sram_region = NULL; } - pf->devcaps_region = devlink_region_create(devlink, - &ice_devcaps_region_ops, 10, - ICE_AQ_MAX_BUF_LEN); + pf->devcaps_region = devl_region_create(devlink, + &ice_devcaps_region_ops, 10, + ICE_AQ_MAX_BUF_LEN); if (IS_ERR(pf->devcaps_region)) { dev_err(dev, "failed to create device-caps devlink region, err %ld\n", PTR_ERR(pf->devcaps_region)); @@ -2012,11 +1755,11 @@ void ice_devlink_init_regions(struct ice_pf *pf) void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) - devlink_region_destroy(pf->nvm_region); + devl_region_destroy(pf->nvm_region); if (pf->sram_region) - devlink_region_destroy(pf->sram_region); + devl_region_destroy(pf->sram_region); if (pf->devcaps_region) - devlink_region_destroy(pf->devcaps_region); + devl_region_destroy(pf->devcaps_region); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h index d291c0e2e17b..d291c0e2e17b 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c new file mode 100644 index 000000000000..13e6790d3cae --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include <linux/vmalloc.h> + +#include "ice.h" +#include "devlink.h" + +static int ice_active_port_option = -1; + +/** + * ice_devlink_port_opt_speed_str - convert speed to a string + * @speed: speed value + */ +static const char *ice_devlink_port_opt_speed_str(u8 speed) +{ + switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { + case ICE_AQC_PORT_OPT_MAX_LANE_100M: + return "0.1"; + case ICE_AQC_PORT_OPT_MAX_LANE_1G: + return "1"; + case ICE_AQC_PORT_OPT_MAX_LANE_2500M: + return "2.5"; + case ICE_AQC_PORT_OPT_MAX_LANE_5G: + return "5"; + case ICE_AQC_PORT_OPT_MAX_LANE_10G: + return "10"; + case ICE_AQC_PORT_OPT_MAX_LANE_25G: + return "25"; + case ICE_AQC_PORT_OPT_MAX_LANE_50G: + return "50"; + case ICE_AQC_PORT_OPT_MAX_LANE_100G: + return "100"; + } + + return "-"; +} + +#define ICE_PORT_OPT_DESC_LEN 50 +/** + * ice_devlink_port_options_print - Print available port split options + * @pf: the PF to print split port options + * + * Prints a table with available port split options and max port speeds + */ +static void ice_devlink_port_options_print(struct ice_pf *pf) +{ + u8 i, j, options_count, cnt, speed, pending_idx, active_idx; + struct ice_aqc_get_port_options_elem *options, *opt; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + char desc[ICE_PORT_OPT_DESC_LEN]; + const char *str; + int status; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, + sizeof(*options), GFP_KERNEL); + if (!options) + return; + + for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { + opt = options + i * ICE_AQC_PORT_OPT_MAX; + options_count = ICE_AQC_PORT_OPT_MAX; + active_valid = 0; + + status = ice_aq_get_port_options(&pf->hw, opt, &options_count, + i, true, &active_idx, + &active_valid, &pending_idx, + &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", + i, status); + goto err; + } + } + + dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); + dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); + dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); + + for (i = 0; i < options_count; i++) { + cnt = 0; + + if (i == ice_active_port_option) + str = "Active"; + else if ((i == pending_idx) && pending_valid) + str = "Pending"; + else + str = ""; + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-8s", str); + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-6u", options[i].pmd); + + for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { + speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; + str = ice_devlink_port_opt_speed_str(speed); + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%3s ", str); + } + + dev_dbg(dev, "%s\n", desc); + } + +err: + kfree(options); +} + +/** + * ice_devlink_aq_set_port_option - Send set port option admin queue command + * @pf: the PF to print split port options + * @option_idx: selected port option + * @extack: extended netdev ack structure + * + * Sends set port option admin queue command with selected port option and + * calls NVM write activate. + */ +static int +ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + int status; + + status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); + if (status) { + dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); + return -EIO; + } + + status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + return -EIO; + } + + status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); + if (status) { + dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); + ice_release_nvm(&pf->hw); + return -EIO; + } + + ice_release_nvm(&pf->hw); + + NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); + return 0; +} + +/** + * ice_devlink_port_split - .port_split devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @count: number of ports to split to + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_split operation. + * + * Unfortunately, the devlink expression of available options is limited + * to just a number, so search for an FW port option which supports + * the specified number. As there could be multiple FW port options with + * the same port split count, allow switching between them. When the same + * port split count request is issued again, switch to the next FW port + * option with the same port split count. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, + unsigned int count, struct netlink_ext_ack *extack) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, j, active_idx, pending_idx, new_option; + struct ice_pf *pf = devlink_priv(devlink); + u8 option_count = ICE_AQC_PORT_OPT_MAX; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port split options, err = %d\n", + status); + NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); + return -EIO; + } + + new_option = ICE_AQC_PORT_OPT_MAX; + active_idx = pending_valid ? pending_idx : active_idx; + for (i = 1; i <= option_count; i++) { + /* In order to allow switching between FW port options with + * the same port split count, search for a new option starting + * from the active/pending option (with array wrap around). + */ + j = (active_idx + i) % option_count; + + if (count == options[j].pmd) { + new_option = j; + break; + } + } + + if (new_option == active_idx) { + dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", + count); + NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + if (new_option == ICE_AQC_PORT_OPT_MAX) { + dev_dbg(dev, "request to split: count: %u not found\n", count); + NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + status = ice_devlink_aq_set_port_option(pf, new_option, extack); + if (status) + return status; + + ice_devlink_port_options_print(pf); + + return 0; +} + +/** + * ice_devlink_port_unsplit - .port_unsplit devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_unsplit operation. + * Calls ice_devlink_port_split with split count set to 1. + * There could be no FW option available with split count 1. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return ice_devlink_port_split(devlink, port, 1, extack); +} + +/** + * ice_devlink_set_port_split_options - Set port split options + * @pf: the PF to set port split options + * @attrs: devlink attributes + * + * Sets devlink port split options based on available FW port options + */ +static void +ice_devlink_set_port_split_options(struct ice_pf *pf, + struct devlink_port_attrs *attrs) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", + status); + return; + } + + /* find the biggest available port split count */ + for (i = 0; i < option_count; i++) + attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); + + attrs->splittable = attrs->lanes ? 1 : 0; + ice_active_port_option = active_idx; +} + +static const struct devlink_port_ops ice_devlink_port_ops = { + .port_split = ice_devlink_port_split, + .port_unsplit = ice_devlink_port_unsplit, +}; + +/** + * ice_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for + * + * Create and register a devlink_port for this PF. + * This function has to be called under devl_lock. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_pf_port(struct ice_pf *pf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + int err; + + devlink = priv_to_devlink(pf); + + dev = ice_pf_to_dev(pf); + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.bus.func; + + /* As FW supports only port split options for whole device, + * set port split options only for first PF. + */ + if (pf->hw.pf_id == 0) + ice_devlink_set_port_split_options(pf, &attrs); + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_ops); + if (err) { + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + * This function has to be called under devl_lock. + */ +void ice_devlink_destroy_pf_port(struct ice_pf *pf) +{ + devl_port_unregister(&pf->devlink_port); +} + +/** + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for + * + * Create and register a devlink_port for this VF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_vf_port(struct ice_vf *vf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + int err; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + devlink_port = &vf->devlink_port; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.bus.func; + attrs.pci_vf.vf = vf->vf_id; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devlink_port_register(devlink, devlink_port, vsi->idx); + if (err) { + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup + * + * Unregisters the devlink_port structure associated with this VF. + */ +void ice_devlink_destroy_vf_port(struct ice_vf *vf) +{ + devl_rate_leaf_destroy(&vf->devlink_port); + devlink_port_unregister(&vf->devlink_port); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h new file mode 100644 index 000000000000..9223bcdb6444 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _DEVLINK_PORT_H_ +#define _DEVLINK_PORT_H_ + +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); + +#endif /* _DEVLINK_PORT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 365c03d1c462..6ad8002b22e1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -77,6 +77,7 @@ #include "ice_gnss.h" #include "ice_irq.h" #include "ice_dpll.h" +#include "ice_adapter.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -330,7 +331,6 @@ struct ice_vsi { struct net_device *netdev; struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ - struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_rx_ring **rx_rings; /* Rx ring array */ struct ice_tx_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ @@ -348,12 +348,9 @@ struct ice_vsi { /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; - enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - struct ice_vf *vf; /* VF associated with this VSI */ - u16 num_gfltr; u16 num_bfltr; @@ -445,12 +442,18 @@ struct ice_vsi { u8 old_numtc; u16 old_ena_tc; - struct ice_channel *ch; - /* setup back reference, to which aggregator node this VSI * corresponds to */ struct ice_agg_node *agg_node; + + struct_group_tagged(ice_vsi_cfg_params, params, + struct ice_port_info *port_info; /* back pointer to port_info */ + struct ice_channel *ch; /* VSI's channel structure, may be NULL */ + struct ice_vf *vf; /* VF associated with this VSI, may be NULL */ + u32 flags; /* VSI flags used for rebuild and configuration */ + enum ice_vsi_type type; /* the type of the VSI */ + ); } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ @@ -458,7 +461,7 @@ struct ice_q_vector { struct ice_vsi *vsi; u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; + u16 reg_idx; /* PF relative register index */ u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ @@ -480,6 +483,7 @@ struct ice_q_vector { char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + u16 vf_reg_idx; /* VF relative register index */ struct msi_map irq; } ____cacheline_internodealigned_in_smp; @@ -522,17 +526,10 @@ enum ice_misc_thread_tasks { }; struct ice_eswitch { - struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; struct xarray reprs; bool is_running; - /* struct to allow cp queues management optimization */ - struct { - int to_reach; - int value; - bool is_reaching; - } qs; }; struct ice_agg_node { @@ -544,6 +541,7 @@ struct ice_agg_node { struct ice_pf { struct pci_dev *pdev; + struct ice_adapter *adapter; struct devlink_region *nvm_region; struct devlink_region *sram_region; diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c new file mode 100644 index 000000000000..52d15ef7f4b1 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright Red Hat + +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/xarray.h> +#include "ice_adapter.h" + +static DEFINE_XARRAY(ice_adapters); + +/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ +#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) +#define INDEX_FIELD_BUS GENMASK(12, 5) +#define INDEX_FIELD_SLOT GENMASK(4, 0) + +static unsigned long ice_adapter_index(const struct pci_dev *pdev) +{ + unsigned int domain = pci_domain_nr(pdev->bus); + + WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); + + return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | + FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | + FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); +} + +static struct ice_adapter *ice_adapter_new(void) +{ + struct ice_adapter *adapter; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) + return NULL; + + spin_lock_init(&adapter->ptp_gltsyn_time_lock); + refcount_set(&adapter->refcount, 1); + + return adapter; +} + +static void ice_adapter_free(struct ice_adapter *adapter) +{ + kfree(adapter); +} + +DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) + +/** + * ice_adapter_get - Get a shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. + * + * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs) + * of the same multi-function PCI device share one ice_adapter structure. + * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put + * to release its reference. + * + * Context: Process, may sleep. + * Return: Pointer to ice_adapter on success. + * ERR_PTR() on error. -ENOMEM is the only possible error. + */ +struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) +{ + struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL; + unsigned long index = ice_adapter_index(pdev); + + adapter = ice_adapter_new(); + if (!adapter) + return ERR_PTR(-ENOMEM); + + xa_lock(&ice_adapters); + ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL); + if (xa_is_err(ret)) { + ret = ERR_PTR(xa_err(ret)); + goto unlock; + } + if (ret) { + refcount_inc(&ret->refcount); + goto unlock; + } + ret = no_free_ptr(adapter); +unlock: + xa_unlock(&ice_adapters); + return ret; +} + +/** + * ice_adapter_put - Release a reference to the shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter. + * + * Releases the reference to ice_adapter previously obtained with + * ice_adapter_get. + * + * Context: Any. + */ +void ice_adapter_put(const struct pci_dev *pdev) +{ + unsigned long index = ice_adapter_index(pdev); + struct ice_adapter *adapter; + + xa_lock(&ice_adapters); + adapter = xa_load(&ice_adapters, index); + if (WARN_ON(!adapter)) + goto unlock; + + if (!refcount_dec_and_test(&adapter->refcount)) + goto unlock; + + WARN_ON(__xa_erase(&ice_adapters, index) != adapter); + ice_adapter_free(adapter); +unlock: + xa_unlock(&ice_adapters); +} diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h new file mode 100644 index 000000000000..9d11014ec02f --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-FileCopyrightText: Copyright Red Hat */ + +#ifndef _ICE_ADAPTER_H_ +#define _ICE_ADAPTER_H_ + +#include <linux/spinlock_types.h> +#include <linux/refcount_types.h> + +struct pci_dev; + +/** + * struct ice_adapter - PCI adapter resources shared across PFs + * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME + * register of the PTP clock. + * @refcount: Reference count. struct ice_pf objects hold the references. + */ +struct ice_adapter { + /* For access to the GLTSYN_TIME register */ + spinlock_t ptp_gltsyn_time_lock; + + refcount_t refcount; +}; + +struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); +void ice_adapter_put(const struct pci_dev *pdev); + +#endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1f3e7a6903e5..e76c388b9905 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -121,6 +121,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 +#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 #define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 #define ICE_AQC_BIT_ROCEV2_LAG 0x01 #define ICE_AQC_BIT_SRIOV_LAG 0x02 @@ -264,6 +265,8 @@ struct ice_aqc_set_port_params { #define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15) #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 @@ -808,6 +811,23 @@ struct ice_aqc_get_topo { __le32 addr_low; }; +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */ +struct ice_aqc_get_set_tx_topo { + u8 set_flags; +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0) +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1) +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4) +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5) + + u8 get_flags; +#define ICE_AQC_TX_TOPO_GET_RAM 2 + + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) * Add TSE (indirect 0x0401) @@ -1664,6 +1684,15 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_START_POINT 0 +#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B + +struct ice_aqc_nvm_tx_topo_user_sel { + __le16 length; + u8 data; +#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4) + u8 reserved; +}; + /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; @@ -2536,6 +2565,7 @@ struct ice_aq_desc { struct ice_aqc_get_link_topo get_link_topo; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; + struct ice_aqc_get_set_tx_topo get_set_tx_topo; } params; }; @@ -2642,6 +2672,10 @@ enum ice_adminq_opc { ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_remove_rl_profiles = 0x0415, + /* tx topology commands */ + ice_aqc_opc_set_tx_topo = 0x0417, + ice_aqc_opc_get_tx_topo = 0x0418, + /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index a545a7917e4f..687f6cb2b917 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->irq.index = -ENOENT; if (vsi->type == ICE_VSI_VF) { - q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector); + ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); @@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) skip_alloc: q_vector->reg_idx = q_vector->irq.index; + q_vector->vf_reg_idx = q_vector->irq.index; /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) @@ -264,30 +265,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 } /** - * ice_eswitch_calc_txq_handle - * @ring: pointer to ring which unique index is needed - * - * To correctly work with many netdevs ring->q_index of Tx rings on switchdev - * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it - * here by finding index in vsi->tx_rings of this ring. - * - * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, - * because VSI is get from ring->vsi, so it has to be present in this VSI. - */ -static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) -{ - const struct ice_vsi *vsi = ring->vsi; - int i; - - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i] == ring) - return i; - } - - return ICE_INVAL_Q_INDEX; -} - -/** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * @@ -353,9 +330,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; - case ICE_VSI_SWITCHDEV_CTRL: - tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; - break; default: return; } @@ -479,6 +453,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi + * metadata and flags to allow redirecting to PR netdev + */ + if (ice_is_eswitch_mode_switchdev(vsi->back)) { + ring->flags |= ICE_RX_FLAGS_MULTIDEV; + rxdid = ICE_RXDID_FLEX_NIC_2; + } + /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format * increasing context priority to pick up profile ID; default is 0x01; @@ -919,14 +901,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ring->q_handle = ice_eswitch_calc_txq_handle(ring); - - if (ring->q_handle == ICE_INVAL_Q_INDEX) - return -ENODEV; - } else { - ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); - } + ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); if (ch) status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d9f6cc71d900..5649b257e631 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -160,10 +160,16 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E825C_SGMII: hw->mac_type = ICE_MAC_GENERIC_3K_E825; break; - case ICE_DEV_ID_E830_BACKPLANE: - case ICE_DEV_ID_E830_QSFP56: - case ICE_DEV_ID_E830_SFP: - case ICE_DEV_ID_E830_SFP_DD: + case ICE_DEV_ID_E830CC_BACKPLANE: + case ICE_DEV_ID_E830CC_QSFP56: + case ICE_DEV_ID_E830CC_SFP: + case ICE_DEV_ID_E830CC_SFP_DD: + case ICE_DEV_ID_E830C_BACKPLANE: + case ICE_DEV_ID_E830_XXV_BACKPLANE: + case ICE_DEV_ID_E830C_QSFP: + case ICE_DEV_ID_E830_XXV_QSFP: + case ICE_DEV_ID_E830C_SFP: + case ICE_DEV_ID_E830_XXV_SFP: hw->mac_type = ICE_MAC_E830; break; default: @@ -1142,6 +1148,8 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; mutex_init(&hw->tnl_lock); + ice_init_chk_recipe_reuse_support(hw); + return 0; err_unroll_fltr_mgmt_struct: @@ -1615,6 +1623,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_set_port_params: case ice_aqc_opc_get_vlan_mode_parameters: case ice_aqc_opc_set_vlan_mode_parameters: + case ice_aqc_opc_set_tx_topo: + case ice_aqc_opc_get_tx_topo: case ice_aqc_opc_add_recipe: case ice_aqc_opc_recipe_to_profile: case ice_aqc_opc_get_recipe: @@ -2171,6 +2181,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", prefix, caps->sriov_lag); break; + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; default: /* Not one of the recognized common capabilities */ found = false; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 6e20ee610022..a94e7072b570 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -3,7 +3,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" /** * ice_dcb_get_ena_tc - return bitmap of enabled TCs @@ -291,7 +291,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) switch (vsi->type) { case ICE_VSI_CHNL: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: if (ena) ice_ena_vsi(vsi, locked); @@ -776,8 +775,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) /* no need to proceed with remaining cfg if it is CHNL * or switchdev VSI */ - if (vsi->type == ICE_VSI_CHNL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) + if (vsi->type == ICE_VSI_CHNL) continue; ice_vsi_map_rings_to_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index fc91c4d41186..ce5034ed2b24 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -4,6 +4,7 @@ #include "ice_common.h" #include "ice.h" #include "ice_ddp.h" +#include "ice_sched.h" /* For supporting double VLAN mode, it is necessary to enable or disable certain * boost tcam entries. The metadata labels names that match the following @@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx) } } +static bool ice_is_pfcp_profile(u16 prof_idx) +{ + return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && + prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; +} + /** * ice_get_sw_prof_type - determine switch profile type * @hw: pointer to the HW structure @@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, if (ice_is_gtp_u_profile(prof_idx)) return ICE_PROF_TUN_GTPU; + if (ice_is_pfcp_profile(prof_idx)) + return ICE_PROF_TUN_PFCP; + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && @@ -1424,14 +1434,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, goto exit; } - conf_idx = le32_to_cpu(seg->signed_seg_idx); - start = le32_to_cpu(seg->signed_buf_start); count = le32_to_cpu(seg->signed_buf_count); - state = ice_download_pkg_sig_seg(hw, seg); - if (state) + if (state || !count) goto exit; + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, count); @@ -2263,3 +2273,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return state; } + +/** + * ice_get_set_tx_topo - get or set Tx topology + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set Tx topology + * + * Return: zero when set was successful, negative values otherwise. + */ +static int +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topology */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + + if (ice_is_e825c(hw)) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + } + + if (!ice_is_e825c(hw)) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return 0; +} + +/** + * ice_cfg_tx_topo - Initialize new Tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + * + * Return: zero when update was successful, negative values otherwise. + */ +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +{ + u8 *current_topo, *new_topo = NULL; + struct ice_run_time_cfg_seg *seg; + struct ice_buf_hdr *section; + struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 offset, size = 0; + u32 reg = 0; + int status; + u8 flags; + + if (!buf || !len) + return -EINVAL; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return -EOPNOTSUPP; + } + + current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!current_topo) + return -ENOMEM; + + /* Get the current Tx topology */ + status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, + &flags, false); + + kfree(current_topo); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n"); + return -EEXIST; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n"); + return -EEXIST; + } + + /* Setting topology already issued? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n"); + /* Add a small delay before exiting */ + msleep(2000); + return -EEXIST; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", + state); + return -EIO; + } + + /* Find runtime configuration segment */ + seg = (struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return -EIO; + } + + if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return -EIO; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + if (!section || le32_to_cpu(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return -EIO; + } + + size = le16_to_cpu(section->section_entry[0].size); + offset = le16_to_cpu(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return -EIO; + } + + /* Make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return -EIO; + } + + /* Get the new topology buffer */ + new_topo = ((u8 *)section) + offset; + +update_topo: + /* Acquire global lock to make sure that set topology issued + * by one PF. + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* Check if reset was triggered already. */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the HW again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); + ice_check_reset(hw); + return 0; + } + + /* Set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); + return status; + } + + /* New topology is updated, delay 1 second before issuing the CORER */ + msleep(1000); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release. + */ + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index ff66c2ffb1a2..622543f08b43 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); + #endif diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c index d252d98218d0..9fc0fd95a13d 100644 --- a/drivers/net/ethernet/intel/ice/ice_debugfs.c +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 8) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 4) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 2) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 5) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 9dfae9bce758..34fd604132f5 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -16,14 +16,26 @@ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D +/* Intel(R) Ethernet Controller E830-CC for backplane */ +#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1 +/* Intel(R) Ethernet Controller E830-CC for QSFP */ +#define ICE_DEV_ID_E830CC_QSFP56 0x12D2 +/* Intel(R) Ethernet Controller E830-CC for SFP */ +#define ICE_DEV_ID_E830CC_SFP 0x12D3 +/* Intel(R) Ethernet Controller E830-CC for SFP-DD */ +#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4 /* Intel(R) Ethernet Controller E830-C for backplane */ -#define ICE_DEV_ID_E830_BACKPLANE 0x12D1 +#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5 /* Intel(R) Ethernet Controller E830-C for QSFP */ -#define ICE_DEV_ID_E830_QSFP56 0x12D2 +#define ICE_DEV_ID_E830C_QSFP 0x12D8 /* Intel(R) Ethernet Controller E830-C for SFP */ -#define ICE_DEV_ID_E830_SFP 0x12D3 -/* Intel(R) Ethernet Controller E830-C for SFP-DD */ -#define ICE_DEV_ID_E830_SFP_DD 0x12D4 +#define ICE_DEV_ID_E830C_SFP 0x12DA +/* Intel(R) Ethernet Controller E830-XXV for backplane */ +#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC +/* Intel(R) Ethernet Controller E830-XXV for QSFP */ +#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD +/* Intel(R) Ethernet Controller E830-XXV for SFP */ +#define ICE_DEV_ID_E830_XXV_SFP 0x12DE /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 9069725c71b4..b102db8b829a 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -7,89 +7,10 @@ #include "ice_eswitch_br.h" #include "ice_fltr.h" #include "ice_repr.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" #include "ice_tc_lib.h" /** - * ice_eswitch_del_sp_rules - delete adv rules added on PRs - * @pf: pointer to the PF struct - * - * Delete all advanced rules that were used to forward packets with the - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static void ice_eswitch_del_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - if (repr->sp_rule.rid) - ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); - } -} - -/** - * ice_eswitch_add_sp_rule - add adv rule with device's VSI index - * @pf: pointer to PF struct - * @repr: pointer to the repr struct - * - * This function adds advanced rule that forwards packets with - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) -{ - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - struct ice_adv_rule_info rule_info = { 0 }; - struct ice_adv_lkup_elem *list; - struct ice_hw *hw = &pf->hw; - const u16 lkups_cnt = 1; - int err; - - list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); - if (!list) - return -ENOMEM; - - ice_rule_add_src_vsi_metadata(list); - - rule_info.sw_act.flag = ICE_FLTR_TX; - rule_info.sw_act.vsi_handle = ctrl_vsi->idx; - rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; - rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[repr->q_id]; - rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; - rule_info.flags_info.act_valid = true; - rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = repr->src_vsi->idx; - - err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &repr->sp_rule); - if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", - repr->id); - - kfree(list); - return err; -} - -static int -ice_eswitch_add_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - int err; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - err = ice_eswitch_add_sp_rule(pf, repr); - if (err) { - ice_eswitch_del_sp_rules(pf); - return err; - } - } - - return 0; -} - -/** * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * @@ -99,10 +20,13 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf) static int ice_eswitch_setup_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct net_device *netdev = uplink_vsi->netdev; + bool if_running = netif_running(netdev); struct ice_vsi_vlan_ops *vlan_ops; - bool rule_added = false; + + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state)) + if (ice_down(uplink_vsi)) + return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); @@ -112,98 +36,53 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) + goto err_vlan_zero; + + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_RX)) goto err_def_rx; - if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) { - if (ice_set_dflt_vsi(uplink_vsi)) - goto err_def_rx; - rule_added = true; - } + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_TX)) + goto err_def_tx; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); if (vlan_ops->dis_rx_filtering(uplink_vsi)) - goto err_dis_rx; + goto err_vlan_filtering; if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) goto err_override_uplink; - if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_control; - if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; + if (if_running && ice_up(uplink_vsi)) + goto err_up; + return 0; +err_up: + ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); -err_override_control: ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); -err_dis_rx: - if (rule_added) - ice_clear_dflt_vsi(uplink_vsi); +err_vlan_filtering: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); +err_def_tx: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); err_def_rx: + ice_vsi_del_vlan_zero(uplink_vsi); +err_vlan_zero: ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); - return -ENODEV; -} - -/** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI - * @eswitch: pointer to eswitch struct - * - * In eswitch number of allocated Tx/Rx rings is equal. - * - * This function fills q_vectors structures associated with representor and - * move each ring pairs to port representor netdevs. Each port representor - * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to - * number of VFs. - */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) -{ - struct ice_vsi *vsi = eswitch->control_vsi; - unsigned long repr_id = 0; - int q_id; - - ice_for_each_txq(vsi, q_id) { - struct ice_q_vector *q_vector; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - struct ice_repr *repr; - - repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, - XA_PRESENT); - if (!repr) - break; - - repr_id += 1; - repr->q_id = q_id; - q_vector = repr->q_vector; - tx_ring = vsi->tx_rings[q_id]; - rx_ring = vsi->rx_rings[q_id]; - - q_vector->vsi = vsi; - q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; - - q_vector->num_ring_tx = 1; - q_vector->tx.tx_ring = tx_ring; - tx_ring->q_vector = q_vector; - tx_ring->next = NULL; - tx_ring->netdev = repr->netdev; - /* In switchdev mode, from OS stack perspective, there is only - * one queue for given netdev, so it needs to be indexed as 0. - */ - tx_ring->q_index = 0; + if (if_running) + ice_up(uplink_vsi); - q_vector->num_ring_rx = 1; - q_vector->rx.rx_ring = rx_ring; - rx_ring->q_vector = q_vector; - rx_ring->next = NULL; - rx_ring->netdev = repr->netdev; - } + return -ENODEV; } /** @@ -225,8 +104,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) repr->dst = NULL; ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); - - netif_napi_del(&repr->q_vector->napi); } /** @@ -236,7 +113,7 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) */ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; @@ -252,15 +129,11 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) if (ice_vsi_add_vlan_zero(vsi)) goto err_update_security; - netif_napi_add(repr->netdev, &repr->q_vector->napi, - ice_napi_poll); - - netif_keep_dst(repr->netdev); + netif_keep_dst(uplink_vsi->netdev); dst = repr->dst; dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); + dst->u.port_info.lower_dev = uplink_vsi->netdev; return 0; @@ -318,27 +191,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct ice_netdev_priv *np; - struct ice_repr *repr; - struct ice_vsi *vsi; - - np = netdev_priv(netdev); - vsi = np->vsi; - - if (!vsi || !ice_is_switchdev_running(vsi->back)) - return NETDEV_TX_BUSY; - - if (ice_is_reset_in_progress(vsi->back->state) || - test_bit(ICE_VF_DIS, vsi->back->state)) - return NETDEV_TX_BUSY; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + unsigned int len = skb->len; + int ret; - repr = ice_netdev_to_repr(netdev); skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->q_id; + skb->dev = repr->dst->u.port_info.lower_dev; + + ret = dev_queue_xmit(skb); + ice_repr_inc_tx_stats(repr, len, ret); - return ice_start_xmit(skb, netdev); + return ret; } /** @@ -374,71 +239,29 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, static void ice_eswitch_release_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); - ice_clear_dflt_vsi(uplink_vsi); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); } /** - * ice_eswitch_vsi_setup - configure eswitch control VSI - * @pf: pointer to PF structure - * @pi: pointer to port_info structure - */ -static struct ice_vsi * -ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) -{ - struct ice_vsi_cfg_params params = {}; - - params.type = ICE_VSI_SWITCHDEV_CTRL; - params.pi = pi; - params.flags = ICE_VSI_FLAG_INIT; - - return ice_vsi_setup(pf, ¶ms); -} - -/** - * ice_eswitch_napi_enable - enable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_enable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_enable(&repr->q_vector->napi); -} - -/** - * ice_eswitch_napi_disable - disable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_disable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_disable(&repr->q_vector->napi); -} - -/** * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode * @pf: pointer to PF structure */ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi, *uplink_vsi; + struct ice_vsi *uplink_vsi; uplink_vsi = ice_get_main_vsi(pf); if (!uplink_vsi) @@ -450,17 +273,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->eswitch.control_vsi) - return -ENODEV; - - ctrl_vsi = pf->eswitch.control_vsi; - /* cp VSI is createad with 1 queue as default */ - pf->eswitch.qs.value = 1; pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) - goto err_vsi; + return -ENODEV; if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; @@ -471,8 +287,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) err_br_offloads: ice_eswitch_release_env(pf); -err_vsi: - ice_vsi_release(ctrl_vsi); return -ENODEV; } @@ -482,14 +296,10 @@ err_vsi: */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_vsi_release(ctrl_vsi); pf->eswitch.is_running = false; - pf->eswitch.qs.is_reaching = false; } /** @@ -530,7 +340,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); - xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); + xa_init(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -602,56 +412,18 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - ice_eswitch_del_sp_rules(pf); ice_eswitch_stop_all_tx_queues(pf); - ice_eswitch_napi_disable(&pf->eswitch.reprs); } static void ice_eswitch_start_reprs(struct ice_pf *pf) { - ice_eswitch_napi_enable(&pf->eswitch.reprs); ice_eswitch_start_all_tx_queues(pf); - ice_eswitch_add_sp_rules(pf); -} - -static void -ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) -{ - struct ice_vsi *cp = eswitch->control_vsi; - int queues = 0; - - if (eswitch->qs.is_reaching) { - if (eswitch->qs.to_reach >= eswitch->qs.value + change) { - queues = eswitch->qs.to_reach; - eswitch->qs.is_reaching = false; - } else { - queues = 0; - } - } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || - change < 0) { - queues = cp->alloc_txq + change; - } - - if (queues) { - cp->req_txq = queues; - cp->req_rxq = queues; - ice_vsi_close(cp); - ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); - ice_vsi_open(cp); - } else if (!change) { - /* change == 0 means that VSI wasn't open, open it here */ - ice_vsi_open(cp); - } - - eswitch->qs.value += change; - ice_eswitch_remap_rings_to_vectors(eswitch); } int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { struct ice_repr *repr; - int change = 1; int err; if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) @@ -661,9 +433,6 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err = ice_eswitch_enable_switchdev(pf); if (err) return err; - /* Control plane VSI is created with 1 queue as default */ - pf->eswitch.qs.to_reach -= 1; - change = 0; } ice_eswitch_stop_reprs(pf); @@ -678,14 +447,12 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) if (err) goto err_setup_repr; - err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, - XA_LIMIT(1, INT_MAX), GFP_KERNEL); + err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL); if (err) goto err_xa_alloc; vf->repr_id = repr->id; - ice_eswitch_cp_change_queues(&pf->eswitch, change); ice_eswitch_start_reprs(pf); return 0; @@ -715,8 +482,6 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); - else - ice_eswitch_cp_change_queues(&pf->eswitch, -1); ice_eswitch_release_repr(pf, repr); ice_repr_rem_vf(repr); @@ -738,37 +503,37 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) * ice_eswitch_rebuild - rebuild eswitch * @pf: pointer to PF structure */ -int ice_eswitch_rebuild(struct ice_pf *pf) +void ice_eswitch_rebuild(struct ice_pf *pf) { struct ice_repr *repr; unsigned long id; - int err; if (!ice_is_switchdev_running(pf)) - return 0; - - err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); - if (err) - return err; + return; xa_for_each(&pf->eswitch.reprs, id, repr) ice_eswitch_detach(pf, repr->vf); - - return 0; } /** - * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues - * @pf: pointer to PF structure - * @change: how many more (or less) queues is needed + * ice_eswitch_get_target - get netdev based on src_vsi from descriptor + * @rx_ring: ring used to receive the packet + * @rx_desc: descriptor used to get src_vsi value * - * Remember to call ice_eswitch_attach/detach() the "change" times. + * Get src_vsi value from descriptor and load correct representor. If it isn't + * found return rx_ring->netdev. */ -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) { - if (pf->eswitch.qs.value + change < 0) - return; + struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch; + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_repr *repr; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi)); + if (!repr) + return rx_ring->netdev; - pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; - pf->eswitch.qs.is_reaching = true; + return repr->netdev; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 1a288a03a79a..e2e5c0c75e7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -10,7 +10,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); -int ice_eswitch_rebuild(struct ice_pf *pf); +void ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -26,7 +26,8 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); #else /* CONFIG_ICE_SWITCHDEV */ static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } @@ -78,7 +79,11 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } -static inline void -ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } +static inline struct net_device * +ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 9a1a04f5f146..e3cab8e98f52 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = { static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) { switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + return ETHER_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: return TCP_V4_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: @@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) { switch (eth) { + case ETHER_FLOW: + return ICE_FLTR_PTYPE_NONF_ETH; case TCP_V4_FLOW: return ICE_FLTR_PTYPE_NONF_IPV4_TCP; case UDP_V4_FLOW: @@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); switch (fsp->flow_type) { + case ETHER_FLOW: + fsp->h_u.ether_spec = rule->eth; + fsp->m_u.ether_spec = rule->eth_mask; + break; case IPV4_USER_FLOW: fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = 0; @@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, } /** + * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule + * @dev: network interface device structure + * @fsp: pointer to ethtool Rx flow specification + * + * Return: true if vlan data is valid, false otherwise + */ +static bool ice_fdir_vlan_valid(struct device *dev, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype)) + return false; + + if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + return false; + + /* proto and vlan must have vlan-etype defined */ + if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci && + !fsp->m_ext.vlan_etype) { + dev_warn(dev, "Filter with proto and vlan require also vlan-etype"); + return false; + } + + return true; +} + +/** + * ice_set_ether_flow_seg - set address and protocol segments for ether flow + * @dev: network interface device structure + * @seg: flow segment for programming + * @eth_spec: mask data from ethtool + * + * Return: 0 on success and errno in case of error. + */ +static int ice_set_ether_flow_seg(struct device *dev, + struct ice_flow_seg_info *seg, + struct ethhdr *eth_spec) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH); + + /* empty rules are not valid */ + if (is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest) && + !eth_spec->h_proto) + return -EINVAL; + + /* Ethertype */ + if (eth_spec->h_proto == htons(0xFFFF)) { + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } else if (eth_spec->h_proto) { + dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether"); + return -EOPNOTSUPP; + } + + /* Source MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_source)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_source)) + goto err_mask; + + /* Destination MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_dest)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_dest)) + goto err_mask; + + return 0; + +err_mask: + dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether"); + return -EOPNOTSUPP; +} + +/** + * ice_set_fdir_vlan_seg - set vlan segments for ether flow + * @seg: flow segment for programming + * @ext_masks: masks for additional RX flow fields + * + * Return: 0 on success and errno in case of error. + */ +static int +ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg, + struct ethtool_flow_ext *ext_masks) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN); + + if (ext_masks->vlan_etype) { + if (ext_masks->vlan_etype != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + if (ext_masks->vlan_tci) { + if (ext_masks->vlan_tci != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + return 0; +} + +/** * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter * @pf: PF structure * @fsp: pointer to ethtool Rx flow specification @@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, struct device *dev = ice_pf_to_dev(pf); enum ice_fltr_ptype fltr_idx; struct ice_hw *hw = &pf->hw; - bool perfect_filter; + bool perfect_filter = false; int ret; seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); @@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, &perfect_filter); break; + case ETHER_FLOW: + ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec); + if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) { + if (!ice_fdir_vlan_valid(dev, fsp)) { + ret = -EINVAL; + break; + } + ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext); + } + break; default: ret = -EINVAL; } @@ -1823,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; break; + case ETHER_FLOW: + input->eth = fsp->h_u.ether_spec; + input->eth_mask = fsp->m_u.ether_spec; + break; default: /* not doing un-parsed flow types */ return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 5840c3e04a5b..26b357c0ae15 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -4,6 +4,8 @@ #include "ice_common.h" /* These are training packet headers used to program flow director filters. */ +static const u8 ice_fdir_eth_pkt[22]; + static const u8 ice_fdir_tcpv4_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, @@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = { /* Flow Director no-op training packet table */ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { { + ICE_FLTR_PTYPE_NONF_ETH, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + }, + { ICE_FLTR_PTYPE_NONF_IPV4_TCP, sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, @@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, * perspective. The input from user is from Rx filter perspective. */ switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + ice_pkt_insert_mac_addr(loc, input->eth.h_dest); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source); + if (input->ext_data.vlan_tag || input->ext_data.vlan_type) { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->ext_data.vlan_type); + ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET, + input->ext_data.vlan_tag); + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET, + input->eth.h_proto); + } else { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->eth.h_proto); + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); @@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) * ice_fdir_comp_rules - compare 2 filters * @a: a Flow Director filter data structure * @b: a Flow Director filter data structure - * @v6: bool true if v6 filter * * Returns true if the filters match */ static bool -ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b) { enum ice_fltr_ptype flow_type = a->flow_type; /* The calling function already checks that the two filters have the * same flow_type. */ - if (!v6) { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.dst_port == b->ip.v4.dst_port && - a->ip.v4.src_port == b->ip.v4.src_port) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.l4_header == b->ip.v4.l4_header && - a->ip.v4.proto == b->ip.v4.proto && - a->ip.v4.ip_ver == b->ip.v4.ip_ver && - a->ip.v4.tos == b->ip.v4.tos) - return true; - } - } else { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port && - !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, - b->ip.v6.dst_ip) && - !ice_cmp_ipv6_addr(a->ip.v6.src_ip, - b->ip.v6.src_ip)) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port) - return true; - } + switch (flow_type) { + case ICE_FLTR_PTYPE_NONF_ETH: + if (!memcmp(&a->eth, &b->eth, sizeof(a->eth))) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.dst_port == b->ip.v4.dst_port && + a->ip.v4.src_port == b->ip.v4.src_port) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.l4_header == b->ip.v4.l4_header && + a->ip.v4.proto == b->ip.v4.proto && + a->ip.v4.ip_ver == b->ip.v4.ip_ver && + a->ip.v4.tos == b->ip.v4.tos) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port && + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, + b->ip.v6.dst_ip) && + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, + b->ip.v6.src_ip)) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port) + return true; + break; + default: + break; } return false; @@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) bool ret = false; list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { - enum ice_fltr_ptype flow_type; - if (rule->flow_type != input->flow_type) continue; - flow_type = input->flow_type; - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) - ret = ice_fdir_comp_rules(rule, input, false); - else - ret = ice_fdir_comp_rules(rule, input, true); + ret = ice_fdir_comp_rules(rule, input); if (ret) { if (rule->fltr_id == input->fltr_id && rule->q_index != input->q_index) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 1b9b84490689..021ecbac7848 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -8,6 +8,9 @@ #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) /* macros for offsets into packets for flow director programming */ +#define ICE_ETH_TYPE_F_OFFSET 12 +#define ICE_ETH_VLAN_TCI_OFFSET 14 +#define ICE_ETH_TYPE_VLAN_OFFSET 16 #define ICE_IPV4_SRC_ADDR_OFFSET 26 #define ICE_IPV4_DST_ADDR_OFFSET 30 #define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 @@ -159,6 +162,8 @@ struct ice_fdir_fltr { struct list_head fltr_node; enum ice_fltr_ptype flow_type; + struct ethhdr eth, eth_mask; + union { struct ice_fdir_v4 v4; struct ice_fdir_v6 v6; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index d427a79d001a..817beca591e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -93,6 +93,7 @@ enum ice_tunnel_type { TNL_GRETAP, TNL_GTPC, TNL_GTPU, + TNL_PFCP, __TNL_TYPE_CNT, TNL_LAST = 0xFF, TNL_ALL = 0xFF, @@ -358,7 +359,8 @@ enum ice_prof_type { ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_GTPU = 0x8, ICE_PROF_TUN_GTPC = 0x10, - ICE_PROF_TUN_ALL = 0x1E, + ICE_PROF_TUN_PFCP = 0x20, + ICE_PROF_TUN_ALL = 0x3E, ICE_PROF_ALL = 0xFF, }; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 319a2d6fe26c..f81db6c107c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Returns: zero on success, or a negative error code on failure. */ -static int -ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, - u16 block_size, u8 *block, bool last_cmd, - u8 *reset_level, struct netlink_ext_ack *extack) +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h index 750574885716..04b200462757 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.h +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack); int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, struct netlink_ext_ack *extack); +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index f0e76f0a6d60..1ccb572ce285 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -202,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) * @act: rule action * @recipe_id: recipe id for the new rule * @rule_idx: pointer to rule index + * @direction: ICE_FLTR_RX or ICE_FLTR_TX * @add: boolean on whether we are adding filters */ static int ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, - bool add) + u8 direction, bool add) { struct ice_sw_rule_lkup_rx_tx *s_rule; u16 s_rule_sz, vsi_num; @@ -231,9 +232,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); - s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); s_rule->recipe_id = cpu_to_le16(recipe_id); - s_rule->src = cpu_to_le16(hw->port_info->lport); + if (direction == ICE_FLTR_RX) { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); + } else { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->src = cpu_to_le16(vsi_num); + } s_rule->act = cpu_to_le32(act); s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); opc = ice_aqc_opc_add_sw_rules; @@ -266,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) { u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; + int err; + + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, add); + if (err) + goto err_rx; - return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, - &lag->pf_rule_id, add); + act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | + ICE_SINGLE_ACT_LB_ENABLE; + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id, + ICE_FLTR_TX, add); + if (err) + goto err_tx; + + return 0; + +err_tx: + ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, !add); +err_rx: + return err; } /** @@ -284,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) ICE_SINGLE_ACT_DROP; return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, - &lag->lport_rule_idx, add); + &lag->lport_rule_idx, ICE_FLTR_RX, add); } /** @@ -310,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) dev = ice_pf_to_dev(lag->pf); /* interface not active - remove old default VSI rule */ - if (bonding_info->slave.state && lag->pf_rule_id) { + if (bonding_info->slave.state && lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, false)) dev_err(dev, "Error removing old default VSI filter\n"); if (ice_lag_cfg_drop_fltr(lag, true)) @@ -319,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) } /* interface becoming active - add new default VSI rule */ - if (!bonding_info->slave.state && !lag->pf_rule_id) { + if (!bonding_info->slave.state && !lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(dev, "Error adding new default VSI filter\n"); if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) @@ -714,8 +740,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_lag_move_single_vf_nodes(lag, oldport, newport, i); } @@ -953,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_reclaim_vf_tc(lag, src_hw, i, tc); } @@ -1976,8 +2000,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i, tc); @@ -2149,7 +2172,7 @@ void ice_lag_rebuild(struct ice_pf *pf) ice_lag_cfg_cp_fltr(lag, true); - if (lag->pf_rule_id) + if (lag->pf_rx_rule_id) if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 183b38792ef2..bab2c83142a1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -43,7 +43,8 @@ struct ice_lag { u8 primary:1; /* this is primary */ u16 pf_recipe; u16 lport_recipe; - u16 pf_rule_id; + u16 pf_rx_rule_id; + u16 pf_tx_rule_id; u16 cp_rule_idx; u16 lport_rule_idx; u8 role; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d384ddfcb83e..611577ebc29d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -160,64 +160,6 @@ struct ice_fltr_desc { (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL -struct ice_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum ice_rx_ptype_outer_ip { - ICE_RX_PTYPE_OUTER_L2 = 0, - ICE_RX_PTYPE_OUTER_IP = 1, -}; - -enum ice_rx_ptype_outer_ip_ver { - ICE_RX_PTYPE_OUTER_NONE = 0, - ICE_RX_PTYPE_OUTER_IPV4 = 1, - ICE_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum ice_rx_ptype_outer_fragmented { - ICE_RX_PTYPE_NOT_FRAG = 0, - ICE_RX_PTYPE_FRAG = 1, -}; - -enum ice_rx_ptype_tunnel_type { - ICE_RX_PTYPE_TUNNEL_NONE = 0, - ICE_RX_PTYPE_TUNNEL_IP_IP = 1, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum ice_rx_ptype_tunnel_end_prot { - ICE_RX_PTYPE_TUNNEL_END_NONE = 0, - ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, - ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum ice_rx_ptype_inner_prot { - ICE_RX_PTYPE_INNER_PROT_NONE = 0, - ICE_RX_PTYPE_INNER_PROT_UDP = 1, - ICE_RX_PTYPE_INNER_PROT_TCP = 2, - ICE_RX_PTYPE_INNER_PROT_SCTP = 3, - ICE_RX_PTYPE_INNER_PROT_ICMP = 4, - ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum ice_rx_ptype_payload_layer { - ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - /* Rx Flex Descriptor * This descriptor is used instead of the legacy version descriptor when * ice_rlan_ctx.adv_desc is set @@ -651,266 +593,4 @@ struct ice_tlan_ctx { u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; -/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT ice_ptype_lkup[ptype].known - * THEN - * Packet is unknown - * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum ice_rx_l2_ptype to decode the packet type - * ENDIF - */ -#define ICE_PTYPES \ - /* L2 Packet types */ \ - ICE_PTT_UNUSED_ENTRY(0), \ - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ - ICE_PTT_UNUSED_ENTRY(2), \ - ICE_PTT_UNUSED_ENTRY(3), \ - ICE_PTT_UNUSED_ENTRY(4), \ - ICE_PTT_UNUSED_ENTRY(5), \ - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(8), \ - ICE_PTT_UNUSED_ENTRY(9), \ - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(12), \ - ICE_PTT_UNUSED_ENTRY(13), \ - ICE_PTT_UNUSED_ENTRY(14), \ - ICE_PTT_UNUSED_ENTRY(15), \ - ICE_PTT_UNUSED_ENTRY(16), \ - ICE_PTT_UNUSED_ENTRY(17), \ - ICE_PTT_UNUSED_ENTRY(18), \ - ICE_PTT_UNUSED_ENTRY(19), \ - ICE_PTT_UNUSED_ENTRY(20), \ - ICE_PTT_UNUSED_ENTRY(21), \ - \ - /* Non Tunneled IPv4 */ \ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(25), \ - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv4 */ \ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(32), \ - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv6 */ \ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(39), \ - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT */ \ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> IPv4 */ \ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(47), \ - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> IPv6 */ \ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(54), \ - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC */ \ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(62), \ - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(69), \ - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(77), \ - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(84), \ - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ - \ - /* Non Tunneled IPv6 */ \ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(91), \ - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv4 */ \ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(98), \ - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv6 */ \ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(105), \ - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT */ \ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> IPv4 */ \ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(113), \ - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> IPv6 */ \ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(120), \ - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC */ \ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(128), \ - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(135), \ - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(143), \ - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(150), \ - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - -#define ICE_NUM_DEFINED_PTYPES 154 - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - ICE_RX_PTYPE_##OUTER_FRAG, \ - ICE_RX_PTYPE_TUNNEL_##T, \ - ICE_RX_PTYPE_TUNNEL_END_##TE, \ - ICE_RX_PTYPE_##TEF, \ - ICE_RX_PTYPE_INNER_PROT_##I, \ - ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG -#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG - -/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ -static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - ICE_PTYPES - - /* unused entries */ - [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - -static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) -{ - return ice_ptype_lkup[ptype]; -} - - #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 558422120312..5371e91f6bbb 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -7,7 +7,6 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_dcb_lib.h" -#include "ice_devlink.h" #include "ice_vsi_vlan_ops.h" /** @@ -27,8 +26,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_CHNL"; case ICE_VSI_LB: return "ICE_VSI_LB"; - case ICE_VSI_SWITCHDEV_CTRL: - return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -144,7 +141,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -211,21 +207,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) max_t(int, vsi->alloc_rxq, vsi->alloc_txq)); break; - case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of PRs - * Each ring is associated to the corresponding VF_PR netdev. - * Tx and Rx rings are always equal - */ - if (vsi->req_txq && vsi->req_rxq) { - vsi->alloc_txq = vsi->req_txq; - vsi->alloc_rxq = vsi->req_rxq; - } else { - vsi->alloc_txq = 1; - vsi->alloc_rxq = 1; - } - - vsi->num_q_vectors = 1; - break; case ICE_VSI_VF: if (vf->num_req_qs) vf->num_vf_qs = vf->num_req_qs; @@ -522,22 +503,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - struct ice_pf *pf = q_vector->vsi->back; - struct ice_repr *repr; - unsigned long id; - - if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) - return IRQ_HANDLED; - - xa_for_each(&pf->eswitch.reprs, id, repr) - napi_schedule(&repr->q_vector->napi); - - return IRQ_HANDLED; -} - /** * ice_vsi_alloc_stat_arrays - Allocate statistics arrays * @vsi: VSI pointer @@ -600,10 +565,6 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) } switch (vsi->type) { - case ICE_VSI_SWITCHDEV_CTRL: - /* Setup eswitch MSIX irq handler for VSI */ - vsi->irq_handler = ice_eswitch_msix_clean_rings; - break; case ICE_VSI_PF: /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; @@ -933,11 +894,6 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) max_rss_size); vsi->rss_lut_type = ICE_LUT_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: - vsi->rss_table_size = ICE_LUT_VSI_SIZE; - vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); - vsi->rss_lut_type = ICE_LUT_VSI; - break; case ICE_VSI_VF: /* VF VSI will get a small RSS table. * For VSI_LUT, LUT size should be set to 64 bytes. @@ -1263,7 +1219,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_CHNL: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; @@ -2145,7 +2100,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2273,10 +2227,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) /** * ice_vsi_cfg_def - configure default VSI based on the type * @vsi: pointer to VSI - * @params: the parameters to configure this VSI with */ -static int -ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +static int ice_vsi_cfg_def(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); struct ice_pf *pf = vsi->back; @@ -2284,7 +2236,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) vsi->vsw = pf->first_sw; - ret = ice_vsi_alloc_def(vsi, params->ch); + ret = ice_vsi_alloc_def(vsi, vsi->ch); if (ret) return ret; @@ -2309,7 +2261,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi, params->flags); + ret = ice_vsi_init(vsi, vsi->flags); if (ret) goto unroll_get_qs; @@ -2317,7 +2269,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) switch (vsi->type) { case ICE_VSI_CTRL: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2430,23 +2381,16 @@ unroll_vsi_alloc: /** * ice_vsi_cfg - configure a previously allocated VSI * @vsi: pointer to VSI - * @params: parameters used to configure this VSI */ -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +int ice_vsi_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int ret; - if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; - vsi->type = params->type; - vsi->port_info = params->pi; - - /* For VSIs which don't have a connected VF, this will be NULL */ - vsi->vf = params->vf; - - ret = ice_vsi_cfg_def(vsi, params); + ret = ice_vsi_cfg_def(vsi); if (ret) return ret; @@ -2532,7 +2476,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) * a port_info structure for it. */ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || - WARN_ON(!params->pi)) + WARN_ON(!params->port_info)) return NULL; vsi = ice_vsi_alloc(pf); @@ -2541,7 +2485,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) return NULL; } - ret = ice_vsi_cfg(vsi, params); + vsi->params = *params; + ret = ice_vsi_cfg(vsi); if (ret) goto err_vsi_cfg; @@ -2750,8 +2695,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } else { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL) { ice_vsi_close(vsi); } } @@ -3089,7 +3033,6 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) */ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { - struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors; struct ice_pf *pf; @@ -3098,9 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (!vsi) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = vsi_flags; - + vsi->flags = vsi_flags; pf = vsi->back; if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; @@ -3110,7 +3051,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) goto err_vsi_cfg; ice_vsi_decfg(vsi); - ret = ice_vsi_cfg_def(vsi, ¶ms); + ret = ice_vsi_cfg_def(vsi); if (ret) goto err_vsi_cfg; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 9cd23afe5f15..94ce8964dda6 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -11,43 +11,6 @@ #define ICE_VSI_FLAG_INIT BIT(0) #define ICE_VSI_FLAG_NO_INIT 0 -/** - * struct ice_vsi_cfg_params - VSI configuration parameters - * @pi: pointer to the port_info instance for the VSI - * @ch: pointer to the channel structure for the VSI, may be NULL - * @vf: pointer to the VF associated with this VSI, may be NULL - * @type: the type of VSI to configure - * @flags: VSI flags used for rebuild and configuration - * - * Parameter structure used when configuring a new VSI. - */ -struct ice_vsi_cfg_params { - struct ice_port_info *pi; - struct ice_channel *ch; - struct ice_vf *vf; - enum ice_vsi_type type; - u32 flags; -}; - -/** - * ice_vsi_to_params - Get parameters for an existing VSI - * @vsi: the VSI to get parameters for - * - * Fill a parameter structure for reconfiguring a VSI with its current - * parameters, such as during a rebuild operation. - */ -static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi) -{ - struct ice_vsi_cfg_params params = {}; - - params.pi = vsi->port_info; - params.ch = vsi->ch; - params.vf = vsi->vf; - params.type = vsi->type; - - return params; -} - const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); bool ice_pf_state_is_nominal(struct ice_pf *pf); @@ -101,7 +64,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); +int ice_vsi_cfg(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 33a164fa325a..f60c022f7960 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -13,7 +13,8 @@ #include "ice_fltr.h" #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/devlink_port.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the @@ -36,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(ICE_DDP_PKG_FILE); @@ -1745,6 +1747,39 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_mdd_maybe_reset_vf - reset VF after MDD event + * @pf: pointer to the PF structure + * @vf: pointer to the VF structure + * @reset_vf_tx: whether Tx MDD has occurred + * @reset_vf_rx: whether Rx MDD has occurred + * + * Since the queue can get stuck on VF MDD events, the PF can be configured to + * automatically reset the VF by enabling the private ethtool flag + * mdd-auto-reset-vf. + */ +static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, + bool reset_vf_tx, bool reset_vf_rx) +{ + struct device *dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + return; + + /* VF MDD event counters will be cleared by reset, so print the event + * prior to reset. + */ + if (reset_vf_tx) + ice_print_vf_tx_mdd_event(vf); + + if (reset_vf_rx) + ice_print_vf_rx_mdd_event(vf); + + dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", + pf->hw.pf_id, vf->vf_id); + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); +} + +/** * ice_handle_mdd_event - handle malicious driver detect event * @pf: pointer to the PF structure * @@ -1837,6 +1872,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) */ mutex_lock(&pf->vfs.table_lock); ice_for_each_vf(pf, bkt, vf) { + bool reset_vf_tx = false, reset_vf_rx = false; + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); @@ -1845,6 +1882,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); @@ -1855,6 +1894,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); @@ -1865,6 +1906,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_RX(vf->vf_id)); @@ -1876,18 +1919,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", vf->vf_id); - /* Since the queue is disabled on VF Rx MDD events, the - * PF can be configured to reset the VF through ethtool - * private flag mdd-auto-reset-vf. - */ - if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { - /* VF MDD event counters will be cleared by - * reset, so print the event prior to reset. - */ - ice_print_vf_rx_mdd_event(vf); - ice_reset_vf(vf, ICE_VF_RESET_LOCK); - } + reset_vf_rx = true; } + + if (reset_vf_tx || reset_vf_rx) + ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, + reset_vf_rx); } mutex_unlock(&pf->vfs.table_lock); @@ -3648,7 +3685,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_PF; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3661,7 +3698,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CHNL; - params.pi = pi; + params.port_info = pi; params.ch = ch; params.flags = ICE_VSI_FLAG_INIT; @@ -3682,7 +3719,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CTRL; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3702,7 +3739,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_LB; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -4417,11 +4454,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf) /** * ice_request_fw - Device initialization routine * @pf: pointer to the PF instance + * @firmware: double pointer to firmware struct + * + * Return: zero when successful, negative values otherwise. */ -static void ice_request_fw(struct ice_pf *pf) +static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) { char *opt_fw_filename = ice_get_opt_fw_name(pf); - const struct firmware *firmware = NULL; struct device *dev = ice_pf_to_dev(pf); int err = 0; @@ -4430,29 +4469,95 @@ static void ice_request_fw(struct ice_pf *pf) * and warning messages for other errors. */ if (opt_fw_filename) { - err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); - if (err) { - kfree(opt_fw_filename); - goto dflt_pkg_load; - } - - /* request for firmware was successful. Download to device */ - ice_load_pkg(firmware, pf); + err = firmware_request_nowarn(firmware, opt_fw_filename, dev); kfree(opt_fw_filename); - release_firmware(firmware); - return; + if (!err) + return err; + } + err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); + if (err) + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); + + return err; +} + +/** + * ice_init_tx_topology - performs Tx topology initialization + * @hw: pointer to the hardware structure + * @firmware: pointer to firmware structure + * + * Return: zero when init was successful, negative values otherwise. + */ +static int +ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) +{ + u8 num_tx_sched_layers = hw->num_tx_sched_layers; + struct ice_pf *pf = hw->back; + struct device *dev; + u8 *buf_copy; + int err; + + dev = ice_pf_to_dev(pf); + /* ice_cfg_tx_topo buf argument is not a constant, + * so we have to make a copy + */ + buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL); + + err = ice_cfg_tx_topo(hw, buf_copy, firmware->size); + if (!err) { + if (hw->num_tx_sched_layers > num_tx_sched_layers) + dev_info(dev, "Tx scheduling layers switching feature disabled\n"); + else + dev_info(dev, "Tx scheduling layers switching feature enabled\n"); + /* if there was a change in topology ice_cfg_tx_topo triggered + * a CORER and we need to re-init hw + */ + ice_deinit_hw(hw); + err = ice_init_hw(hw); + + return err; + } else if (err == -EIO) { + dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); } -dflt_pkg_load: - err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + return 0; +} + +/** + * ice_init_ddp_config - DDP related configuration + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * This function loads DDP file from the disk, then initializes Tx + * topology. At the end DDP package is loaded on the card. + * + * Return: zero when init was successful, negative values otherwise. + */ +static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const struct firmware *firmware = NULL; + int err; + + err = ice_request_fw(pf, &firmware); if (err) { - dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); - return; + dev_err(dev, "Fail during requesting FW: %d\n", err); + return err; } - /* request for firmware was successful. Download to device */ + err = ice_init_tx_topology(hw, firmware); + if (err) { + dev_err(dev, "Fail during initialization of Tx topology: %d\n", + err); + release_firmware(firmware); + return err; + } + + /* Download firmware to device */ ice_load_pkg(firmware, pf); release_firmware(firmware); + + return 0; } /** @@ -4625,9 +4730,11 @@ int ice_init_dev(struct ice_pf *pf) ice_init_feature_support(pf); - ice_request_fw(pf); + err = ice_init_ddp_config(hw, pf); + if (err) + return err; - /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be * set in pf->state, which will cause ice_is_safe_mode to return * true */ @@ -5093,6 +5200,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + struct ice_adapter *adapter; struct ice_pf *pf; struct ice_hw *hw; int err; @@ -5145,7 +5253,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pci_set_master(pdev); + adapter = ice_adapter_get(pdev); + if (IS_ERR(adapter)) + return PTR_ERR(adapter); + pf->pdev = pdev; + pf->adapter = adapter; pci_set_drvdata(pdev, pf); set_bit(ICE_DOWN, pf->state); /* Disable service task until DOWN bit is cleared */ @@ -5179,23 +5292,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) devl_lock(priv_to_devlink(pf)); err = ice_load(pf); - devl_unlock(priv_to_devlink(pf)); if (err) goto err_load; err = ice_init_devlink(pf); if (err) goto err_init_devlink; + devl_unlock(priv_to_devlink(pf)); return 0; err_init_devlink: - devl_lock(priv_to_devlink(pf)); ice_unload(pf); - devl_unlock(priv_to_devlink(pf)); err_load: + devl_unlock(priv_to_devlink(pf)); ice_deinit(pf); err_init: + ice_adapter_put(pdev); pci_disable_device(pdev); return err; } @@ -5290,9 +5403,9 @@ static void ice_remove(struct pci_dev *pdev) if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); + devl_lock(priv_to_devlink(pf)); ice_deinit_devlink(pf); - devl_lock(priv_to_devlink(pf)); ice_unload(pf); devl_unlock(priv_to_devlink(pf)); @@ -5302,6 +5415,7 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); + ice_adapter_put(pdev); pci_disable_device(pdev); } @@ -5321,7 +5435,6 @@ static void ice_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PM /** * ice_prepare_for_shutdown - prep for PCI shutdown * @pf: board private structure @@ -5410,7 +5523,7 @@ err_reinit: * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int __maybe_unused ice_suspend(struct device *dev) +static int ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -5477,7 +5590,7 @@ static int __maybe_unused ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int __maybe_unused ice_resume(struct device *dev) +static int ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; @@ -5528,7 +5641,6 @@ static int __maybe_unused ice_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM */ /** * ice_pci_err_detected - warning that PCI error has been detected @@ -5693,16 +5805,22 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, /* required last entry */ {} }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); -static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); static const struct pci_error_handlers ice_pci_err_handler = { .error_detected = ice_pci_err_detected, @@ -5717,9 +5835,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, -#ifdef CONFIG_PM - .driver.pm = &ice_pm_ops, -#endif /* CONFIG_PM */ + .driver.pm = pm_sleep_ptr(&ice_pm_ops), .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, @@ -7055,13 +7171,11 @@ int ice_down(struct ice_vsi *vsi) WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev) { vlan_err = ice_vsi_del_vlan_zero(vsi); ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); - } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -7544,11 +7658,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_eswitch_rebuild(pf); - if (err) { - dev_err(dev, "Switchdev rebuild failed: %d\n", err); - goto err_vsi_rebuild; - } + ice_eswitch_rebuild(pf); if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); @@ -7666,7 +7776,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - netdev->mtu = (unsigned int)new_mtu; + WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); err = ice_down_up(vsi); if (err) return err; @@ -7999,12 +8109,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; /* Continue if bridge mode is not being flipped */ diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index d4e05d2cb30c..84eab92dc03c 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -18,10 +18,9 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static int -ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, - void *data, bool last_command, bool read_shadow_ram, - struct ice_sq_cd *cd) +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 774c2317967d..63cdc6bdac58 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -14,6 +14,9 @@ struct ice_orom_civd_info { int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd); int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index f6f27361c3cf..755a9c55267c 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -43,6 +43,7 @@ enum ice_protocol_type { ICE_NVGRE, ICE_GTP, ICE_GTP_NO_PAY, + ICE_PFCP, ICE_PPPOE, ICE_L2TPV3, ICE_VLAN_EX, @@ -61,6 +62,7 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_NVGRE, ICE_SW_TUN_GTPU, ICE_SW_TUN_GTPC, + ICE_SW_TUN_PFCP, ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ }; @@ -202,6 +204,15 @@ struct ice_udp_gtp_hdr { u8 rsvrd; }; +struct ice_pfcp_hdr { + u8 flags; + u8 msg_type; + __be16 length; + __be64 seid; + __be32 seq; + u8 spare; +} __packed __aligned(__alignof__(u16)); + struct ice_pppoe_hdr { u8 rsrvd_ver_type; u8 rsrvd_code; @@ -418,6 +429,7 @@ union ice_prot_hdr { struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre_hdr nvgre_hdr; struct ice_udp_gtp_hdr gtp_hdr; + struct ice_pfcp_hdr pfcp_hdr; struct ice_pppoe_hdr pppoe_hdr; struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; struct ice_hw_metadata metadata; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index c11eba07283c..0f17fc1181d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -374,6 +374,7 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) u8 tmr_idx; tmr_idx = ice_get_ptp_src_clock_index(hw); + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); /* Read the system timestamp pre PHC read */ ptp_read_system_prets(sts); @@ -1166,26 +1167,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) } /** - * ice_ptp_read_time - Read the time from the device - * @pf: Board private structure - * @ts: timespec structure to hold the current time value - * @sts: Optional parameter for holding a pair of system timestamps from - * the system clock. Will be ignored if NULL is given. - * - * This function reads the source clock registers and stores them in a timespec. - * However, since the registers are 64 bits of nanoseconds, we must convert the - * result to a timespec before we can return. - */ -static void -ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, - struct ptp_system_timestamp *sts) -{ - u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); - - *ts = ns_to_timespec64(time_ns); -} - -/** * ice_ptp_write_init - Set PHC time to provided value * @pf: Board private structure * @ts: timespec structure that holds the new time value @@ -1925,16 +1906,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; - - if (!ice_ptp_lock(hw)) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); - return -EBUSY; - } - - ice_ptp_read_time(pf, ts, sts); - ice_ptp_unlock(hw); + u64 time_ns; + time_ns = ice_ptp_read_src_clk_reg(pf, sts); + *ts = ns_to_timespec64(time_ns); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 187ce9b54e1a..2b9423a173bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -274,6 +274,9 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ice_flush(hw); } diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 5f30fb131f74..d367f4c66dcd 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -3,42 +3,51 @@ #include "ice.h" #include "ice_eswitch.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/devlink_port.h" #include "ice_sriov.h" #include "ice_tc_lib.h" #include "ice_dcb_lib.h" /** - * ice_repr_get_sw_port_id - get port ID associated with representor - * @repr: pointer to port representor + * ice_repr_inc_tx_stats - increment Tx statistic by one packet + * @repr: repr to increment stats on + * @len: length of the packet + * @xmit_status: value returned by xmit function */ -static int ice_repr_get_sw_port_id(struct ice_repr *repr) +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status) { - return repr->src_vsi->back->hw.port_info->lport; + struct ice_repr_pcpu_stats *stats; + + if (unlikely(xmit_status != NET_XMIT_SUCCESS && + xmit_status != NET_XMIT_CN)) { + this_cpu_inc(repr->stats->tx_drops); + return; + } + + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** - * ice_repr_get_phys_port_name - get phys port name - * @netdev: pointer to port representor netdev - * @buf: write here port name - * @len: max length of buf + * ice_repr_inc_rx_stats - increment Rx statistic by one packet + * @netdev: repr netdev to increment stats on + * @len: length of the packet */ -static int -ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_repr *repr = np->repr; - int res; - - /* Devlink port is registered and devlink core is taking care of name formatting. */ - if (repr->vf->devlink_port.devlink) - return -EOPNOTSUPP; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_repr_pcpu_stats *stats; - res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->id); - if (res <= 0) - return -EOPNOTSUPP; - return 0; + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** @@ -76,7 +85,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) * ice_netdev_to_repr - Get port representor for given netdevice * @netdev: pointer to port representor netdev */ -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -139,38 +148,35 @@ static int ice_repr_stop(struct net_device *netdev) * ice_repr_sp_stats64 - get slow path stats for port representor * @dev: network interface device structure * @stats: netlink stats structure - * - * RX/TX stats are being swapped here to be consistent with VF stats. In slow - * path, port representor receives data when the corresponding VF is sending it - * (and vice versa), TX and RX bytes/packets are effectively swapped on port - * representor. */ static int ice_repr_sp_stats64(const struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ice_netdev_priv *np = netdev_priv(dev); - int vf_id = np->repr->vf->vf_id; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - u64 pkts, bytes; - - tx_ring = np->vsi->tx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp, - tx_ring->ring_stats->stats, - &pkts, &bytes); - stats->rx_packets = pkts; - stats->rx_bytes = bytes; - - rx_ring = np->vsi->rx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp, - rx_ring->ring_stats->stats, - &pkts, &bytes); - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed + - rx_ring->ring_stats->rx_stats.alloc_buf_failed; - + struct ice_repr *repr = ice_netdev_to_repr(dev); + int i; + + for_each_possible_cpu(i) { + u64 tbytes, tpkts, tdrops, rbytes, rpkts; + struct ice_repr_pcpu_stats *repr_stats; + unsigned int start; + + repr_stats = per_cpu_ptr(repr->stats, i); + do { + start = u64_stats_fetch_begin(&repr_stats->syncp); + tbytes = repr_stats->tx_bytes; + tpkts = repr_stats->tx_packets; + tdrops = repr_stats->tx_drops; + rbytes = repr_stats->rx_bytes; + rpkts = repr_stats->rx_packets; + } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); + + stats->tx_bytes += tbytes; + stats->tx_packets += tpkts; + stats->tx_dropped += tdrops; + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } return 0; } @@ -240,7 +246,6 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, } static const struct net_device_ops ice_repr_netdev_ops = { - .ndo_get_phys_port_name = ice_repr_get_phys_port_name, .ndo_get_stats64 = ice_repr_get_stats64, .ndo_open = ice_repr_open, .ndo_stop = ice_repr_stop, @@ -291,7 +296,7 @@ static void ice_repr_remove_node(struct devlink_port *devlink_port) */ static void ice_repr_rem(struct ice_repr *repr) { - kfree(repr->q_vector); + free_percpu(repr->stats); free_netdev(repr->netdev); kfree(repr); } @@ -331,7 +336,6 @@ static void ice_repr_set_tx_topology(struct ice_pf *pf) static struct ice_repr * ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) { - struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; int err; @@ -346,23 +350,22 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) goto err_alloc; } + repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats); + if (!repr->stats) { + err = -ENOMEM; + goto err_stats; + } + repr->src_vsi = src_vsi; + repr->id = src_vsi->vsi_num; np = netdev_priv(repr->netdev); np->repr = repr; - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) { - err = -ENOMEM; - goto err_alloc_q_vector; - } - repr->q_vector = q_vector; - repr->q_id = repr->id; - ether_addr_copy(repr->parent_mac, parent_mac); return repr; -err_alloc_q_vector: +err_stats: free_netdev(repr->netdev); err_alloc: kfree(repr); @@ -439,15 +442,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr) netif_carrier_off(repr->netdev); netif_tx_stop_all_queues(repr->netdev); } - -/** - * ice_repr_set_traffic_vsi - set traffic VSI for port representor - * @repr: repr on with VSI will be set - * @vsi: pointer to VSI that will be used by port representor to pass traffic - */ -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) -{ - struct ice_netdev_priv *np = netdev_priv(repr->netdev); - - np->vsi = vsi; -} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index f9aede315716..cff730b15ca0 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -6,20 +6,24 @@ #include <net/dst_metadata.h> +struct ice_repr_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; +}; + struct ice_repr { struct ice_vsi *src_vsi; struct ice_vf *vf; - struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; - int q_id; + struct ice_repr_pcpu_stats __percpu *stats; u32 id; u8 parent_mac[ETH_ALEN]; -#ifdef CONFIG_ICE_SWITCHDEV - /* info about slow path rule */ - struct ice_rule_query_data sp_rule; -#endif }; struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); @@ -28,10 +32,12 @@ void ice_repr_rem_vf(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); - -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); + +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status); +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index a1525992d14b..ecf8f5d60292 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) + /* qgroup and VSI layers are same */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw) * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } - return hw->sw_entry_point_layer; + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; + else + return hw->sw_entry_point_layer; } /** @@ -1510,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; + u8 qgrp_layer, vsi_layer; u16 max_children; - u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + vsi_layer = ice_sched_get_vsi_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -1524,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (!vsi_node) return NULL; + /* If the queue group and VSI layer are same then queues + * are all attached directly to VSI + */ + if (qgrp_layer == vsi_layer) + return vsi_node; + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { @@ -3199,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, u8 profile_type; int status; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!pi || layer_num >= pi->hw->num_tx_sched_layers) return NULL; switch (rl_type) { case ICE_MIN_BW: @@ -3215,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, return NULL; } - if (!pi) - return NULL; hw = pi->hw; list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3446,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, struct ice_aqc_rl_profile_info *rl_prof_elem; int status = 0; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (layer_num >= pi->hw->num_tx_sched_layers) return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 1aef05ea5a57..7b668083be07 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -6,6 +6,17 @@ #include "ice_common.h" +/** + * DOC: ice_sched.h + * + * This header file stores everything that is needed for broadly understood + * scheduler. It consists of defines related to layers, structures related to + * aggregator, functions declarations and others. + */ + +#define ICE_SCHED_5_LAYERS 5 +#define ICE_SCHED_9_LAYERS 9 + #define SCHED_NODE_NAME_MAX_LEN 32 #define ICE_QGRP_LAYER_OFFSET 2 diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a958fcf3e6be..067712f4923f 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { @@ -227,7 +225,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_VF; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; @@ -362,13 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF */ -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { if (!vf || !q_vector) - return -EINVAL; + return; /* always add one to account for the OICR being the first MSIX */ - return vf->first_vector_idx + q_vector->v_idx + 1; + q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF; + q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; } /** @@ -833,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) pci_dev_get(vfdev); - /* set default number of MSI-X */ - vf->num_msix = pf->vfs.num_msix_per; - vf->num_vf_qs = pf->vfs.num_qps_per; - ice_vc_set_default_allowlist(vf); - hash_add_rcu(vfs->table, &vf->entry, vf_id); } @@ -897,7 +891,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } - ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -1869,6 +1862,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** + * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event + * @vf: pointer to the VF structure + */ +void ice_print_vf_tx_mdd_event(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dev_lan_addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); +} + +/** * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * @@ -1876,8 +1887,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) */ void ice_print_vfs_mdd_events(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; struct ice_vf *vf; unsigned int bkt; @@ -1904,10 +1913,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; - - dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, - vf->dev_lan_addr); + ice_print_vf_tx_mdd_event(vf); } } mutex_unlock(&pf->vfs.table_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 8488df38b586..8f22313474d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -49,7 +49,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); int ice_get_vf_stats(struct net_device *netdev, int vf_id, @@ -58,6 +58,7 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +void ice_print_vf_tx_mdd_event(struct ice_vf *vf); bool ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); @@ -69,6 +70,7 @@ static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } +static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { } static inline int @@ -130,11 +132,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline int +static inline void ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, struct ice_q_vector __always_unused *q_vector) { - return 0; } static inline int diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index b4ea935e8300..94d6670d0901 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -42,6 +42,7 @@ enum { ICE_PKT_KMALLOC = BIT(9), ICE_PKT_PPPOE = BIT(10), ICE_PKT_L2TPV3 = BIT(11), + ICE_PKT_PFCP = BIT(12), }; struct ice_dummy_pkt_offsets { @@ -1110,6 +1111,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, }; +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PFCP, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PFCP, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -1343,6 +1415,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP), ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), @@ -2075,6 +2149,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, } /** + * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported + * @hw: pointer to the hardware structure + */ +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->flash.nvm; + + hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) || + nvm->major > 0x4; +} + +/** * ice_alloc_recipe - add recipe resource * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call @@ -2083,12 +2169,16 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); u16 buf_len = __struct_size(sw_buf); + u16 res_type; int status; sw_buf->num_elems = cpu_to_le16(1); - sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << - ICE_AQC_RES_TYPE_S) | - ICE_AQC_RES_TYPE_FLAG_SHARED); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE); + if (hw->recp_reuse) + res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED; + else + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + sw_buf->res_type = cpu_to_le16(res_type); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); if (!status) @@ -2098,6 +2188,70 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) } /** + * ice_free_recipe_res - free recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID to free + * + * Return: 0 on success, and others on error + */ +static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) +{ + return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); +} + +/** + * ice_release_recipe_res - disassociate and free recipe resource + * @hw: pointer to the hardware structure + * @recp: the recipe struct resource to unassociate and free + * + * Return: 0 on success, and others on error + */ +static int ice_release_recipe_res(struct ice_hw *hw, + struct ice_sw_recipe *recp) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + struct ice_switch_info *sw = hw->switch_info; + u64 recp_assoc; + u32 rid, prof; + int status; + + for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) { + for_each_set_bit(prof, recipe_to_profile[rid], + ICE_MAX_NUM_PROFILES) { + status = ice_aq_get_recipe_to_profile(hw, prof, + &recp_assoc, + NULL); + if (status) + return status; + + bitmap_from_arr64(r_bitmap, &recp_assoc, + ICE_MAX_NUM_RECIPES); + bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap, + ICE_MAX_NUM_RECIPES); + bitmap_to_arr64(&recp_assoc, r_bitmap, + ICE_MAX_NUM_RECIPES); + ice_aq_map_recipe_to_profile(hw, prof, + recp_assoc, NULL); + + clear_bit(rid, profile_to_recipe[prof]); + clear_bit(prof, recipe_to_profile[rid]); + } + + status = ice_free_recipe_res(hw, rid); + if (status) + return status; + + sw->recp_list[rid].recp_created = false; + sw->recp_list[rid].adv_rule = false; + memset(&sw->recp_list[rid].lkup_exts, 0, + sizeof(sw->recp_list[rid].lkup_exts)); + clear_bit(rid, recp->r_bitmap); + } + + return 0; +} + +/** * ice_get_recp_to_prof_map - updates recipe to profile mapping * @hw: pointer to hardware structure * @@ -2146,6 +2300,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, * @recps: struct that we need to populate * @rid: recipe ID that we are populating * @refresh_required: true if we should get recipe to profile mapping from FW + * @is_add: flag of adding recipe * * This function is used to populate all the necessary entries into our * bookkeeping so that we have a current list of all the recipes that are @@ -2153,7 +2308,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, */ static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, - bool *refresh_required) + bool *refresh_required, bool is_add) { DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; @@ -2270,8 +2425,12 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; } - if (!is_root) + if (!is_root) { + if (hw->recp_reuse && is_add) + recps[idx].recp_created = true; + continue; + } /* Only do the following for root recipes entries */ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, @@ -2295,7 +2454,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); - recps[rid].recp_created = true; + if (is_add) + recps[rid].recp_created = true; err_unroll: kfree(tmp); @@ -2446,6 +2606,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->lan_en = true; } } + + if (fi->flag & ICE_FLTR_TX_ONLY) + fi->lan_en = false; } /** @@ -3821,6 +3984,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; + f_info.flag |= ICE_FLTR_TX_ONLY; } f_list_entry.fltr_info = f_info; @@ -4528,6 +4692,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), @@ -4561,6 +4726,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_NVGRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, { ICE_PPPOE, ICE_PPPOE_HW }, { ICE_L2TPV3, ICE_L2TPV3_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, @@ -4573,12 +4739,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match * @rinfo: information regarding the rule e.g. priority and action info + * @is_add: flag of adding recipe * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, - const struct ice_adv_rule_info *rinfo) + const struct ice_adv_rule_info *rinfo, bool is_add) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -4592,11 +4759,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, * entry update it in our SW bookkeeping and continue with the * matching. */ - if (!recp[i].recp_created) + if (hw->recp_reuse) { if (ice_get_recp_frm_fw(hw, hw->switch_info->recp_list, i, - &refresh_required)) + &refresh_required, is_add)) continue; + } /* Skip inverse action recipes */ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & @@ -5268,6 +5436,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_GTPC: prof_type = ICE_PROF_TUN_GTPC; break; + case ICE_SW_TUN_PFCP: + prof_type = ICE_PROF_TUN_PFCP; + break; case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; @@ -5278,6 +5449,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, } /** + * ice_subscribe_recipe - subscribe to an existing recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + * + * Return: 0 on success, and others on error + */ +static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid) +{ + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); + u16 res_type; + int status; + + /* Prepare buffer to allocate resource */ + sw_buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL; + sw_buf->res_type = cpu_to_le16(res_type); + + sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid); + + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); + + return status; +} + +/** + * ice_subscribable_recp_shared - share an existing subscribable recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + */ +static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid) +{ + struct ice_sw_recipe *recps = hw->switch_info->recp_list; + u16 sub_rid; + + for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES) + ice_subscribe_recipe(hw, sub_rid); +} + +/** * ice_add_adv_recipe - Add an advanced recipe that is not part of the default * @hw: pointer to hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one @@ -5299,6 +5513,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; + u16 rid_tmp; u8 i; if (!lkups_cnt) @@ -5376,10 +5591,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts, rinfo); - if (*rid < ICE_MAX_NUM_RECIPES) + *rid = ice_find_recp(hw, lkup_exts, rinfo, true); + if (*rid < ICE_MAX_NUM_RECIPES) { /* Success if found a recipe that match the existing criteria */ + if (hw->recp_reuse) + ice_subscribable_recp_shared(hw, *rid); + goto err_unroll; + } rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ @@ -5398,14 +5617,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, &recp_assoc, NULL); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, @@ -5413,7 +5632,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_release_change_lock(hw); if (status) - goto err_unroll; + goto err_free_recipe; /* Update profile to recipe bitmap array */ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, @@ -5427,6 +5646,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, *rid = rm->root_rid; memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, sizeof(*lkup_exts)); + goto err_unroll; + +err_free_recipe: + if (hw->recp_reuse) { + for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) { + if (!ice_free_recipe_res(hw, rid_tmp)) + clear_bit(rid_tmp, rm->r_bitmap); + } + } + err_unroll: list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { list_del(&r_entry->l_entry); @@ -5552,6 +5781,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_SW_TUN_VXLAN: match |= ICE_PKT_TUN_UDP; break; + case ICE_SW_TUN_PFCP: + match |= ICE_PKT_PFCP; + break; default: break; } @@ -5692,6 +5924,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_GTP: len = sizeof(struct ice_udp_gtp_hdr); break; + case ICE_PFCP: + len = sizeof(struct ice_pfcp_hdr); + break; case ICE_PPPOE: len = sizeof(struct ice_pppoe_hdr); break; @@ -6440,7 +6675,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - rid = ice_find_recp(hw, &lkup_exts, rinfo); + rid = ice_find_recp(hw, &lkup_exts, rinfo, false); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return -EINVAL; @@ -6484,14 +6719,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_aqc_opc_remove_sw_rules, NULL); if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; + struct ice_sw_recipe *r_list = sw->recp_list; mutex_lock(rule_lock); list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); devm_kfree(ice_hw_to_dev(hw), list_elem); mutex_unlock(rule_lock); - if (list_empty(&sw->recp_list[rid].filt_rules)) - sw->recp_list[rid].adv_rule = false; + if (list_empty(&r_list[rid].filt_rules)) { + r_list[rid].adv_rule = false; + + /* All rules for this recipe are now removed */ + if (hw->recp_reuse) + ice_release_recipe_res(hw, + &r_list[rid]); + } } kfree(s_rule); } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 89ffa1b51b5a..ad98e98c812d 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -8,8 +8,9 @@ #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_DFLT_VSI_INVAL 0xff -#define ICE_FLTR_RX BIT(0) -#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_ONLY BIT(2) #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF @@ -21,6 +22,8 @@ #define ICE_PROFID_IPV6_GTPC_NO_TEID 45 #define ICE_PROFID_IPV6_GTPU_TEID 46 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 +#define ICE_PROFID_IPV4_PFCP_NODE 79 +#define ICE_PROFID_IPV6_PFCP_SESSION 82 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) @@ -429,5 +432,6 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd); +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 688ccb0615ab..8bd24b33f3a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -37,7 +37,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) lkups_cnt++; - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) lkups_cnt++; if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | @@ -140,6 +143,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type) return ICE_GTP; case TNL_GTPC: return ICE_GTP_NO_PAY; + case TNL_PFCP: + return ICE_PFCP; default: return 0; } @@ -159,6 +164,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type) return ICE_SW_TUN_GTPU; case TNL_GTPC: return ICE_SW_TUN_GTPC; + case TNL_PFCP: + return ICE_SW_TUN_PFCP; default: return ICE_NON_TUN; } @@ -221,8 +228,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && - (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) { list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); if (fltr->gtp_pdu_info_masks.pdu_type) { @@ -239,6 +245,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) { + struct ice_pfcp_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.pfcp_hdr; + hdr_m = &list[i].m_u.pfcp_hdr; + list[i].type = ICE_PFCP; + + hdr_h->flags = fltr->pfcp_meta_keys.type; + hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01; + + hdr_h->seid = fltr->pfcp_meta_keys.seid; + hdr_m->seid = fltr->pfcp_meta_masks.seid; + + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { list[i].type = ice_proto_type_from_ipv4(false); @@ -374,8 +396,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, if (tc_fltr->tunnel_type != TNL_LAST) { i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); - headers = &tc_fltr->inner_headers; - inner = true; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (tc_fltr->tunnel_type != TNL_PFCP) { + headers = &tc_fltr->inner_headers; + inner = true; + } } if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { @@ -629,6 +654,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev) */ if (netif_is_gtp(tunnel_dev)) return TNL_GTPU; + if (netif_is_pfcp(tunnel_dev)) + return TNL_PFCP; return TNL_LAST; } @@ -642,13 +669,19 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev) return netif_is_ice(dev) || ice_is_tunnel_supported(dev); } -static int ice_tc_setup_redirect_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) +static int ice_tc_setup_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev, + enum ice_sw_fwd_act_type action) { struct ice_repr *repr; - fltr->action.fltr_act = ICE_FWD_TO_VSI; + if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided"); + return -EINVAL; + } + + fltr->action.fltr_act = action; if (ice_is_port_repr_netdev(filter_dev) && ice_is_port_repr_netdev(target_dev)) { @@ -696,41 +729,6 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, return 0; } -static int ice_tc_setup_mirror_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) -{ - struct ice_repr *repr; - - fltr->action.fltr_act = ICE_MIRROR_PACKET; - - if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { - repr = ice_netdev_to_repr(filter_dev); - - fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } - - return 0; -} - static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, struct ice_tc_flower_fltr *fltr, struct flow_action_entry *act) @@ -746,16 +744,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, break; case FLOW_ACTION_REDIRECT: - err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_FWD_TO_VSI); if (err) return err; break; case FLOW_ACTION_MIRRED: - err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_MIRROR_PACKET); if (err) return err; + break; default: @@ -1409,7 +1410,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, } } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { struct flow_match_enc_opts match; flow_rule_match_enc_opts(rule, &match); @@ -1420,7 +1422,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], sizeof(struct gtp_pdu_session_info)); - fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; + fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + fltr->tunnel_type == TNL_PFCP) { + struct flow_match_enc_opts match; + + flow_rule_match_enc_opts(rule, &match); + + memcpy(&fltr->pfcp_meta_keys, match.key->data, + sizeof(struct pfcp_metadata)); + memcpy(&fltr->pfcp_meta_masks, match.mask->data, + sizeof(struct pfcp_metadata)); + + fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS; } return 0; @@ -1481,10 +1497,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return err; } - /* header pointers should point to the inner headers, outer - * header were already set by ice_parse_tunnel_attr - */ - headers = &fltr->inner_headers; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (fltr->tunnel_type != TNL_PFCP) { + /* Header pointers should point to the inner headers, + * outer header were already set by + * ice_parse_tunnel_attr(). + */ + headers = &fltr->inner_headers; + } } else if (dissector->used_keys & (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | @@ -1638,6 +1658,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + fltr->extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 65d387163a46..d84f153517ec 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -4,6 +4,9 @@ #ifndef _ICE_TC_LIB_H_ #define _ICE_TC_LIB_H_ +#include <linux/bits.h> +#include <net/pfcp.h> + #define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) #define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) #define ICE_TC_FLWR_FIELD_VLAN BIT(2) @@ -22,7 +25,7 @@ #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) -#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18) +#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18) #define ICE_TC_FLWR_FIELD_CVLAN BIT(19) #define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20) #define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21) @@ -34,6 +37,7 @@ #define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27) #define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28) #define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29) +#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30) #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF @@ -161,6 +165,8 @@ struct ice_tc_flower_fltr { __be32 tenant_id; struct gtp_pdu_session_info gtp_pdu_info_keys; struct gtp_pdu_session_info gtp_pdu_info_masks; + struct pfcp_metadata pfcp_meta_keys; + struct pfcp_metadata pfcp_meta_masks; u32 flags; u8 tunnel_type; struct ice_tc_flower_action action; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 97d41d6ebf1f..8bb743f78fcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1051,8 +1051,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index af955b0e5dc5..feba314a3fe4 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -365,6 +365,7 @@ struct ice_rx_ring { u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) +#define ICE_RX_FLAGS_MULTIDEV BIT(3) u8 flags; /* CL5 - 5th cacheline starts here */ struct xdp_rxq_info xdp_rxq; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index f8f1d2bdc1be..2719f0e20933 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <linux/filter.h> +#include <linux/net/intel/libie/rx.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -39,30 +40,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) } /** - * ice_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) -{ - struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; -} - -/** * ice_get_rx_hash - get RX hash value from descriptor * @rx_desc: specific descriptor * @@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, const union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) return; hash = ice_get_rx_hash(rx_desc); if (likely(hash)) - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -114,34 +93,26 @@ static void ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { - struct ice_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); - rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); - - decoded = ice_decode_rx_desc_ptype(ptype); - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); - /* check if Rx checksum is enabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(ring->netdev, decoded)) return; + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); + /* check if HW has decoded the packet and checksum */ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { ring->vsi->back->hw_rx_eipe_error++; @@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case ICE_RX_PTYPE_INNER_PROT_TCP: - case ICE_RX_PTYPE_INNER_PROT_UDP: - case ICE_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -236,7 +198,16 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) { + struct net_device *netdev = ice_eswitch_get_target(rx_ring, + rx_desc); + + if (ice_is_port_repr_netdev(netdev)) + ice_repr_inc_rx_stats(netdev, skb->len); + skb->protocol = eth_type_trans(skb, netdev); + } else { + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -527,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) return 0; } -/* Define a ptype index -> XDP hash type lookup table. - * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], - * avoiding possible copy-paste errors. - */ -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 - -/* A few supplementary definitions for when XDP hash types do not coincide - * with what can be generated from ptype definitions - * by means of preprocessor concatenation. - */ -#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 -#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 - -static const enum xdp_rss_hash_type -ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { - ICE_PTYPES -}; - -#undef XDP_RSS_L3_NONE -#undef XDP_RSS_L4_NONE -#undef XDP_RSS_TYPE_PAY2 -#undef XDP_RSS_TYPE_PAY3 -#undef XDP_RSS_TYPE_PAY4 - -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - /** * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * @eop_desc: End of Packet descriptor @@ -570,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { static enum xdp_rss_hash_type ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) { - u16 ptype = ice_get_ptype(eop_desc); - - if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) - return 0; - - return ice_ptype_to_xdp_hash[ptype]; + return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9ff92dba5823..f0796a93f428 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -150,7 +150,6 @@ enum ice_vsi_type { ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, - ICE_VSI_SWITCHDEV_CTRL = 7, }; struct ice_link_status { @@ -204,6 +203,7 @@ struct ice_phy_info { enum ice_fltr_ptype { /* NONE - used for undef/error */ ICE_FLTR_PTYPE_NONF_NONE = 0, + ICE_FLTR_PTYPE_NONF_ETH, ICE_FLTR_PTYPE_NONF_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_SCTP, @@ -296,6 +296,7 @@ struct ice_hw_common_caps { bool pcie_reset_avoidance; /* Post update reset restriction */ bool reset_restrict_support; + bool tx_sched_topo_comp_mode_en; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -849,6 +850,8 @@ struct ice_hw { u16 max_burst_size; /* driver sets this value */ + u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */ + /* Tx Scheduler values */ u8 num_tx_sched_layers; u8 num_tx_sched_phys_layers; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index d10a4be965b5..48a8d462d76a 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -259,20 +259,18 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) int ice_vf_reconfig_vsi(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); - struct ice_vsi_cfg_params params = {}; struct ice_pf *pf = vf->pf; int err; if (WARN_ON(!vsi)) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_NO_INIT; + vsi->flags = ICE_VSI_FLAG_NO_INIT; ice_vsi_decfg(vsi); ice_fltr_remove_all(vsi); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); if (err) { dev_err(ice_pf_to_dev(pf), "Failed to reconfigure the VF%u's VSI, error %d\n", @@ -992,10 +990,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf) /* assign default capabilities */ vf->spoofchk = true; - vf->num_vf_qs = vfs->num_qps_per; ice_vc_set_default_allowlist(vf); ice_virtchnl_set_dflt_ops(vf); + /* set default number of MSI-X */ + vf->num_msix = vfs->num_msix_per; + vf->num_vf_qs = vfs->num_qps_per; + /* ctrl_vsi_idx will be set to a valid value only when iAVF * creates its first fdir rule. */ @@ -1240,7 +1241,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_CTRL; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1ff9818b4c84..1c6ce0c4ed4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1505,13 +1505,12 @@ error_param: * ice_cfg_interrupt * @vf: pointer to the VF info * @vsi: the VSI being configured - * @vector_id: vector ID * @map: vector map for mapping vectors to queues * @q_vector: structure for interrupt vector * configure the IRQ to queue map */ -static int -ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, +static enum virtchnl_status_code +ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, struct virtchnl_vector_map *map, struct ice_q_vector *q_vector) { @@ -1531,7 +1530,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_rx++; q_vector->rx.itr_idx = map->rxitr_idx; vsi->rx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_rxq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->rx.itr_idx); } @@ -1545,7 +1545,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_tx++; q_vector->tx.itr_idx = map->txitr_idx; vsi->tx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_txq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->tx.itr_idx); } @@ -1619,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) } /* lookout for the invalid queue index */ - v_ret = (enum virtchnl_status_code) - ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); + v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector); if (v_ret) goto error_param; } diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c index 4a6c850d83ac..7aae7fdcfcdb 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c @@ -72,7 +72,6 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: ice_pf_vsi_init_vlan_ops(vsi); break; case ICE_VSI_VF: diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 1857220d27fe..aa81d1162b81 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -555,8 +555,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 5d3532c27d57..52ceda6306a3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2234,7 +2234,7 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index f5bc4a278074..285da2177ee4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3005,8 +3005,7 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, /* prefetch first cache line of first page */ net_prefetch(va); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, - GFP_ATOMIC); + skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE); if (unlikely(!skb)) { idpf_rx_put_page(rx_buf); @@ -3060,7 +3059,7 @@ static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, struct sk_buff *skb; /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); + skb = napi_alloc_skb(&rxq->q_vector->napi, size); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index df76493faa75..3d046b81e507 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -8,6 +8,8 @@ #include <net/tcp.h> #include <net/netdev_queues.h> +#include "virtchnl2_lan_desc.h" + #define IDPF_LARGE_MAX_Q 256 #define IDPF_MAX_Q 16 #define IDPF_MIN_Q 2 diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 4a3c4454d25a..63deb120359c 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -4,6 +4,8 @@ #ifndef _VIRTCHNL2_H_ #define _VIRTCHNL2_H_ +#include <linux/if_ether.h> + /* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures, * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion. @@ -17,8 +19,6 @@ * must remain unchanged over time, so we specify explicit values for all enums. */ -#include "virtchnl2_lan_desc.h" - /* This macro is used to generate compilation errors if a structure * is not exactly the correct length. */ @@ -555,7 +555,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk); struct virtchnl2_queue_reg_chunks { __le16 num_chunks; u8 pad[6]; - struct virtchnl2_queue_reg_chunk chunks[]; + struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks); }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); @@ -703,7 +703,7 @@ struct virtchnl2_config_tx_queues { __le32 vport_id; __le16 num_qinfo; u8 pad[10]; - struct virtchnl2_txq_info qinfo[]; + struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo); }; VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues); @@ -782,7 +782,7 @@ struct virtchnl2_config_rx_queues { __le32 vport_id; __le16 num_qinfo; u8 pad[18]; - struct virtchnl2_rxq_info qinfo[]; + struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo); }; VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues); @@ -868,7 +868,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk); struct virtchnl2_vector_chunks { __le16 num_vchunks; u8 pad[14]; - struct virtchnl2_vector_chunk vchunks[]; + struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks); }; VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks); @@ -912,7 +912,7 @@ struct virtchnl2_rss_lut { __le16 lut_entries_start; __le16 lut_entries; u8 pad[4]; - __le32 lut[]; + __le32 lut[] __counted_by_le(lut_entries); }; VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut); @@ -977,7 +977,7 @@ struct virtchnl2_ptype { u8 ptype_id_8; u8 proto_id_count; __le16 pad; - __le16 proto_id[]; + __le16 proto_id[] __counted_by(proto_id_count); } __packed __aligned(2); VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype); @@ -1104,7 +1104,7 @@ struct virtchnl2_rss_key { __le32 vport_id; __le16 key_len; u8 pad; - u8 key_flex[]; + u8 key_flex[] __counted_by_le(key_len); } __packed; VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key); @@ -1131,7 +1131,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk); struct virtchnl2_queue_chunks { __le16 num_chunks; u8 pad[6]; - struct virtchnl2_queue_chunk chunks[]; + struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks); }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks); @@ -1195,7 +1195,7 @@ struct virtchnl2_queue_vector_maps { __le32 vport_id; __le16 num_qv_maps; u8 pad[10]; - struct virtchnl2_queue_vector qv_maps[]; + struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps); }; VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps); @@ -1247,7 +1247,7 @@ struct virtchnl2_mac_addr_list { __le32 vport_id; __le16 num_mac_addr; u8 pad[2]; - struct virtchnl2_mac_addr mac_addr_list[]; + struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr); }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 99977a22b843..61d72250c0ed 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3272,19 +3272,6 @@ static int igb_get_module_eeprom(struct net_device *netdev, return 0; } -static int igb_ethtool_begin(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - pm_runtime_get_sync(&adapter->pdev->dev); - return 0; -} - -static void igb_ethtool_complete(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - pm_runtime_put(&adapter->pdev->dev); -} - static u32 igb_get_rxfh_indir_size(struct net_device *netdev) { return IGB_RETA_SIZE; @@ -3508,8 +3495,6 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_channels = igb_set_channels, .get_priv_flags = igb_get_priv_flags, .set_priv_flags = igb_set_priv_flags, - .begin = igb_ethtool_begin, - .complete = igb_ethtool_complete, .get_link_ksettings = igb_get_link_ksettings, .set_link_ksettings = igb_set_link_ksettings, }; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a3f100769e39..fce2930ae6af 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -106,8 +106,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *); static void igb_free_all_tx_resources(struct igb_adapter *); static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); -static int igb_probe(struct pci_dev *, const struct pci_device_id *); -static void igb_remove(struct pci_dev *pdev); static void igb_init_queue_configuration(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); int igb_open(struct net_device *); @@ -178,20 +176,6 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_disable_sriov(struct pci_dev *dev, bool reinit); #endif -static int igb_suspend(struct device *); -static int igb_resume(struct device *); -static int igb_runtime_suspend(struct device *dev); -static int igb_runtime_resume(struct device *dev); -static int igb_runtime_idle(struct device *dev); -#ifdef CONFIG_PM -static const struct dev_pm_ops igb_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume) - SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume, - igb_runtime_idle) -}; -#endif -static void igb_shutdown(struct pci_dev *); -static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); #ifdef CONFIG_IGB_DCA static int igb_notify_dca(struct notifier_block *, unsigned long, void *); static struct notifier_block dca_notifier = { @@ -219,19 +203,6 @@ static const struct pci_error_handlers igb_err_handler = { static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); -static struct pci_driver igb_driver = { - .name = igb_driver_name, - .id_table = igb_pci_tbl, - .probe = igb_probe, - .remove = igb_remove, -#ifdef CONFIG_PM - .driver.pm = &igb_pm_ops, -#endif - .shutdown = igb_shutdown, - .sriov_configure = igb_pci_sriov_configure, - .err_handler = &igb_err_handler -}; - MODULE_AUTHOR("Intel Corporation, <[email protected]>"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL v2"); @@ -647,6 +618,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw) return adapter->netdev; } +static struct pci_driver igb_driver; + /** * igb_init_module - Driver Registration Routine * @@ -2624,6 +2597,9 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; @@ -6668,7 +6644,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) igb_up(adapter); @@ -9453,12 +9429,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int __maybe_unused igb_suspend(struct device *dev) +static int igb_suspend(struct device *dev) { return __igb_shutdown(to_pci_dev(dev), NULL, 0); } -static int __maybe_unused __igb_resume(struct device *dev, bool rpm) +static int __igb_resume(struct device *dev, bool rpm) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -9514,12 +9490,12 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm) return err; } -static int __maybe_unused igb_resume(struct device *dev) +static int igb_resume(struct device *dev) { return __igb_resume(dev, false); } -static int __maybe_unused igb_runtime_idle(struct device *dev) +static int igb_runtime_idle(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -9530,12 +9506,12 @@ static int __maybe_unused igb_runtime_idle(struct device *dev) return -EBUSY; } -static int __maybe_unused igb_runtime_suspend(struct device *dev) +static int igb_runtime_suspend(struct device *dev) { return __igb_shutdown(to_pci_dev(dev), NULL, 1); } -static int __maybe_unused igb_runtime_resume(struct device *dev) +static int igb_runtime_resume(struct device *dev) { return __igb_resume(dev, true); } @@ -10157,4 +10133,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter) spin_unlock(&adapter->nfc_lock); } + +static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume, + igb_runtime_suspend, igb_runtime_resume, + igb_runtime_idle); + +static struct pci_driver igb_driver = { + .name = igb_driver_name, + .id_table = igb_pci_tbl, + .probe = igb_probe, + .remove = igb_remove, + .driver.pm = pm_ptr(&igb_pm_ops), + .shutdown = igb_shutdown, + .sriov_configure = igb_pci_sriov_configure, + .err_handler = &igb_err_handler +}; + /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index b0cf310e6f7b..7661edd7d0f2 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2434,7 +2434,7 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) igbvf_up(adapter); @@ -2470,7 +2470,7 @@ static int igbvf_suspend(struct device *dev_d) return 0; } -static int __maybe_unused igbvf_resume(struct device *dev_d) +static int igbvf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); @@ -2957,7 +2957,7 @@ static const struct pci_device_id igbvf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); -static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { @@ -2965,7 +2965,7 @@ static struct pci_driver igbvf_driver = { .id_table = igbvf_pci_tbl, .probe = igbvf_probe, .remove = igbvf_remove, - .driver.pm = &igbvf_pm_ops, + .driver.pm = pm_sleep_ptr(&igbvf_pm_ops), .shutdown = igbvf_shutdown, .err_handler = &igbvf_err_handler }; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 6bc56c7c181e..8b14c029eda1 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -72,13 +72,46 @@ struct igc_rx_packet_stats { u64 other_packets; }; +enum igc_tx_buffer_type { + IGC_TX_BUFFER_TYPE_SKB, + IGC_TX_BUFFER_TYPE_XDP, + IGC_TX_BUFFER_TYPE_XSK, +}; + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igc_tx_buffer { + union igc_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + enum igc_tx_buffer_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; + bool xsk_pending_ts; +}; + struct igc_tx_timestamp_request { - struct sk_buff *skb; /* reference to the packet being timestamped */ + union { /* reference to the packet being timestamped */ + struct sk_buff *skb; + struct igc_tx_buffer *xsk_tx_buffer; + }; + enum igc_tx_buffer_type buffer_type; unsigned long start; /* when the tstamp request started (jiffies) */ u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */ u32 regl; /* which TXSTMPL_{X} register should be used */ u32 regh; /* which TXSTMPH_{X} register should be used */ u32 flags; /* flags that should be added to the tx_buffer */ + u8 xsk_queue_index; /* Tx queue which requesting timestamp */ + struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */ }; struct igc_inline_rx_tstamps { @@ -323,6 +356,9 @@ void igc_disable_tx_ring(struct igc_ring *ring); void igc_enable_tx_ring(struct igc_ring *ring); int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +/* AF_XDP TX metadata operations */ +extern const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops; + /* igc_dump declarations */ void igc_rings_dump(struct igc_adapter *adapter); void igc_regs_dump(struct igc_adapter *adapter); @@ -508,32 +544,6 @@ enum igc_boards { #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) -enum igc_tx_buffer_type { - IGC_TX_BUFFER_TYPE_SKB, - IGC_TX_BUFFER_TYPE_XDP, - IGC_TX_BUFFER_TYPE_XSK, -}; - -/* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer - */ -struct igc_tx_buffer { - union igc_adv_tx_desc *next_to_watch; - unsigned long time_stamp; - enum igc_tx_buffer_type type; - union { - struct sk_buff *skb; - struct xdp_frame *xdpf; - }; - unsigned int bytecount; - u16 gso_segs; - __be16 protocol; - - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); - u32 tx_flags; -}; - struct igc_rx_buffer { union { struct { @@ -557,6 +567,13 @@ struct igc_xdp_buff { struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */ }; +struct igc_metadata_request { + struct igc_tx_buffer *tx_buffer; + struct xsk_tx_metadata *meta; + struct igc_ring *tx_ring; + u32 cmd_type; +}; + struct igc_q_vector { struct igc_adapter *adapter; /* backlink */ void __iomem *itr_register; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 1a64f1ca6ca8..f2c4f1966bb0 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1711,21 +1711,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev, return 0; } -static int igc_ethtool_begin(struct net_device *netdev) -{ - struct igc_adapter *adapter = netdev_priv(netdev); - - pm_runtime_get_sync(&adapter->pdev->dev); - return 0; -} - -static void igc_ethtool_complete(struct net_device *netdev) -{ - struct igc_adapter *adapter = netdev_priv(netdev); - - pm_runtime_put(&adapter->pdev->dev); -} - static int igc_ethtool_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -2025,8 +2010,6 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_priv_flags = igc_ethtool_set_priv_flags, .get_eee = igc_ethtool_get_eee, .set_eee = igc_ethtool_set_eee, - .begin = igc_ethtool_begin, - .complete = igc_ethtool_complete, .get_link_ksettings = igc_ethtool_get_link_ksettings, .set_link_ksettings = igc_ethtool_set_link_ksettings, .self_test = igc_ethtool_diag_test, diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 4d975d620a8e..b5bcabab7a1d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2712,8 +2712,7 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; @@ -2874,6 +2873,89 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector, q_vector->tx.total_packets += packets; } +static void igc_xsk_request_timestamp(void *_priv) +{ + struct igc_metadata_request *meta_req = _priv; + struct igc_ring *tx_ring = meta_req->tx_ring; + struct igc_tx_timestamp_request *tstamp; + u32 tx_flags = IGC_TX_FLAGS_TSTAMP; + struct igc_adapter *adapter; + unsigned long lock_flags; + bool found = false; + int i; + + if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { + adapter = netdev_priv(tx_ring->netdev); + + spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags); + + /* Search for available tstamp regs */ + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + tstamp = &adapter->tx_tstamp[i]; + + /* tstamp->skb and tstamp->xsk_tx_buffer are in union. + * When tstamp->skb is equal to NULL, + * tstamp->xsk_tx_buffer is equal to NULL as well. + * This condition means that the particular tstamp reg + * is not occupied by other packet. + */ + if (!tstamp->skb) { + found = true; + break; + } + } + + /* Return if no available tstamp regs */ + if (!found) { + adapter->tx_hwtstamp_skipped++; + spin_unlock_irqrestore(&adapter->ptp_tx_lock, + lock_flags); + return; + } + + tstamp->start = jiffies; + tstamp->xsk_queue_index = tx_ring->queue_index; + tstamp->xsk_tx_buffer = meta_req->tx_buffer; + tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK; + + /* Hold the transmit completion until timestamp is ready */ + meta_req->tx_buffer->xsk_pending_ts = true; + + /* Keep the pointer to tx_timestamp, which is located in XDP + * metadata area. It is the location to store the value of + * tx hardware timestamp. + */ + xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta); + + /* Set timestamp bit based on the _TSTAMP(_X) bit. */ + tx_flags |= tstamp->flags; + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP, + (IGC_ADVTXD_MAC_TSTAMP)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_1, + (IGC_ADVTXD_TSTAMP_REG_1)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_2, + (IGC_ADVTXD_TSTAMP_REG_2)); + meta_req->cmd_type |= IGC_SET_FLAG(tx_flags, + IGC_TX_FLAGS_TSTAMP_3, + (IGC_ADVTXD_TSTAMP_REG_3)); + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags); + } +} + +static u64 igc_xsk_fill_timestamp(void *_priv) +{ + return *(u64 *)_priv; +} + +const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = { + .tmo_request_timestamp = igc_xsk_request_timestamp, + .tmo_fill_timestamp = igc_xsk_fill_timestamp, +}; + static void igc_xdp_xmit_zc(struct igc_ring *ring) { struct xsk_buff_pool *pool = ring->xsk_pool; @@ -2895,24 +2977,34 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) budget = igc_desc_unused(ring); while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { - u32 cmd_type, olinfo_status; + struct igc_metadata_request meta_req; + struct xsk_tx_metadata *meta = NULL; struct igc_tx_buffer *bi; + u32 olinfo_status; dma_addr_t dma; - cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | - IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | - xdp_desc.len; + meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA | + IGC_ADVTXD_DCMD_DEXT | + IGC_ADVTXD_DCMD_IFCS | + IGC_TXD_DCMD | xdp_desc.len; olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + meta = xsk_buff_get_metadata(pool, xdp_desc.addr); xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len); + bi = &ring->tx_buffer_info[ntu]; + + meta_req.tx_ring = ring; + meta_req.tx_buffer = bi; + meta_req.meta = meta; + xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops, + &meta_req); tx_desc = IGC_TX_DESC(ring, ntu); - tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); tx_desc->read.buffer_addr = cpu_to_le64(dma); - bi = &ring->tx_buffer_info[ntu]; bi->type = IGC_TX_BUFFER_TYPE_XSK; bi->protocol = 0; bi->bytecount = xdp_desc.len; @@ -2975,6 +3067,13 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) break; + /* Hold the completions while there's a pending tx hardware + * timestamp request from XDP Tx metadata. + */ + if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK && + tx_buffer->xsk_pending_ts) + break; + /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; @@ -5176,7 +5275,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu) igc_down(adapter); netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) igc_up(adapter); @@ -5930,15 +6029,6 @@ static int __igc_open(struct net_device *netdev, bool resuming) if (err) goto err_req_irq; - /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); - if (err) - goto err_set_queues; - - err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); - if (err) - goto err_set_queues; - clear_bit(__IGC_DOWN, &adapter->state); for (i = 0; i < adapter->num_q_vectors; i++) @@ -5959,8 +6049,6 @@ static int __igc_open(struct net_device *netdev, bool resuming) return IGC_SUCCESS; -err_set_queues: - igc_free_irq(adapter); err_req_irq: igc_release_hw_control(adapter); igc_power_down_phy_copper_base(&adapter->hw); @@ -5977,6 +6065,17 @@ err_setup_tx: int igc_open(struct net_device *netdev) { + struct igc_adapter *adapter = netdev_priv(netdev); + int err; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_queues(netdev, adapter->num_tx_queues, + adapter->num_rx_queues); + if (err) { + netdev_err(netdev, "error setting real queue count\n"); + return err; + } + return __igc_open(netdev, false); } @@ -6803,6 +6902,7 @@ static int igc_probe(struct pci_dev *pdev, netdev->netdev_ops = &igc_netdev_ops; netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; + netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops; igc_ethtool_set_ops(netdev); netdev->watchdog_timeo = 5 * HZ; @@ -6928,8 +7028,6 @@ static int igc_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGC_FLAG_WOL_SUPPORTED); - igc_ptp_init(adapter); - igc_tsn_clear_schedule(adapter); /* reset the hardware with the new settings */ @@ -6951,6 +7049,9 @@ static int igc_probe(struct pci_dev *pdev, /* Check if Media Autosense is enabled */ adapter->ei = *ei; + /* do hw tstamp init after resetting */ + igc_ptp_init(adapter); + /* print pcie link status and MAC address */ pcie_print_link_status(pdev); netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); @@ -7108,8 +7209,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, return 0; } -#ifdef CONFIG_PM -static int __maybe_unused igc_runtime_suspend(struct device *dev) +static int igc_runtime_suspend(struct device *dev) { return __igc_shutdown(to_pci_dev(dev), NULL, 1); } @@ -7144,7 +7244,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev) netif_rx(skb); } -static int __maybe_unused igc_resume(struct device *dev) +static int igc_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); @@ -7186,23 +7286,21 @@ static int __maybe_unused igc_resume(struct device *dev) wr32(IGC_WUS, ~0); - rtnl_lock(); - if (!err && netif_running(netdev)) + if (netif_running(netdev)) { err = __igc_open(netdev, true); - - if (!err) - netif_device_attach(netdev); - rtnl_unlock(); + if (!err) + netif_device_attach(netdev); + } return err; } -static int __maybe_unused igc_runtime_resume(struct device *dev) +static int igc_runtime_resume(struct device *dev) { return igc_resume(dev); } -static int __maybe_unused igc_suspend(struct device *dev) +static int igc_suspend(struct device *dev) { return __igc_shutdown(to_pci_dev(dev), NULL, 0); } @@ -7217,7 +7315,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev) return -EBUSY; } -#endif /* CONFIG_PM */ static void igc_shutdown(struct pci_dev *pdev) { @@ -7332,22 +7429,16 @@ static const struct pci_error_handlers igc_err_handler = { .resume = igc_io_resume, }; -#ifdef CONFIG_PM -static const struct dev_pm_ops igc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) - SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, - igc_runtime_idle) -}; -#endif +static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume, + igc_runtime_suspend, igc_runtime_resume, + igc_runtime_idle); static struct pci_driver igc_driver = { .name = igc_driver_name, .id_table = igc_pci_tbl, .probe = igc_probe, .remove = igc_remove, -#ifdef CONFIG_PM - .driver.pm = &igc_pm_ops, -#endif + .driver.pm = pm_ptr(&igc_pm_ops), .shutdown = igc_shutdown, .err_handler = &igc_err_handler, }; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 885faaa7b9de..1bb026232efc 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -11,6 +11,7 @@ #include <linux/ktime.h> #include <linux/delay.h> #include <linux/iopoll.h> +#include <net/xdp_sock_drv.h> #define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@ -545,6 +546,30 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) wr32(IGC_TSYNCRXCTL, val); } +static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter, + struct igc_tx_timestamp_request *tstamp) +{ + if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) { + /* Release the transmit completion */ + tstamp->xsk_tx_buffer->xsk_pending_ts = false; + + /* Note: tstamp->skb and tstamp->xsk_tx_buffer are in union. + * By setting tstamp->xsk_tx_buffer to NULL, tstamp->skb will + * become NULL as well. + */ + tstamp->xsk_tx_buffer = NULL; + tstamp->buffer_type = 0; + + /* Trigger txrx interrupt for transmit completion */ + igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0); + + return; + } + + dev_kfree_skb_any(tstamp->skb); + tstamp->skb = NULL; +} + static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) { unsigned long flags; @@ -555,8 +580,8 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; - dev_kfree_skb_any(tstamp->skb); - tstamp->skb = NULL; + if (tstamp->skb) + igc_ptp_free_tx_buffer(adapter, tstamp); } spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); @@ -657,8 +682,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, static void igc_ptp_tx_timeout(struct igc_adapter *adapter, struct igc_tx_timestamp_request *tstamp) { - dev_kfree_skb_any(tstamp->skb); - tstamp->skb = NULL; + if (tstamp->skb) + igc_ptp_free_tx_buffer(adapter, tstamp); + adapter->tx_hwtstamp_timeouts++; netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); @@ -729,10 +755,21 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter, shhwtstamps.hwtstamp = ktime_add_ns(shhwtstamps.hwtstamp, adjust); - tstamp->skb = NULL; + /* Copy the tx hardware timestamp into xdp metadata or skb */ + if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) { + struct xsk_buff_pool *xsk_pool; + + xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool; + if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) { + xsk_tx_metadata_complete(&tstamp->xsk_meta, + &igc_xsk_tx_metadata_ops, + &shhwtstamps.hwtstamp); + } + } else { + skb_tstamp_tx(skb, &shhwtstamps); + } - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); + igc_ptp_free_tx_buffer(adapter, tstamp); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f985252c8c8d..094653e81b97 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6847,7 +6847,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) ixgbe_reinit_locked(adapter); @@ -6974,7 +6974,7 @@ int ixgbe_close(struct net_device *netdev) return 0; } -static int __maybe_unused ixgbe_resume(struct device *dev_d) +static int ixgbe_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); @@ -7082,7 +7082,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -static int __maybe_unused ixgbe_suspend(struct device *dev_d) +static int ixgbe_suspend(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); int retval; @@ -10061,15 +10061,10 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - int status; - __u16 mode; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); + int status = ixgbe_configure_bridge_mode(adapter, mode); - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - - mode = nla_get_u16(attr); - status = ixgbe_configure_bridge_mode(adapter, mode); if (status) return status; @@ -11588,14 +11583,14 @@ static const struct pci_error_handlers ixgbe_err_handler = { .resume = ixgbe_io_resume, }; -static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, .remove = ixgbe_remove, - .driver.pm = &ixgbe_pm_ops, + .driver.pm = pm_sleep_ptr(&ixgbe_pm_ops), .shutdown = ixgbe_shutdown, .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ed440dd0c4f9..897fe357b65b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2179,7 +2179,6 @@ enum { #define IXGBE_PCI_LINK_SPEED_5000 0x2 #define IXGBE_PCI_LINK_SPEED_8000 0x3 #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E -#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 #define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 #define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d34d715c59eb..397cb773fabb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9c960017a6de..b938dc06045d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4292,7 +4292,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); @@ -4300,7 +4300,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int __maybe_unused ixgbevf_suspend(struct device *dev_d) +static int ixgbevf_suspend(struct device *dev_d) { struct net_device *netdev = dev_get_drvdata(dev_d); struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -4317,7 +4317,7 @@ static int __maybe_unused ixgbevf_suspend(struct device *dev_d) return 0; } -static int __maybe_unused ixgbevf_resume(struct device *dev_d) +static int ixgbevf_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); @@ -4854,7 +4854,7 @@ static const struct pci_error_handlers ixgbevf_err_handler = { .resume = ixgbevf_io_resume, }; -static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, @@ -4863,7 +4863,7 @@ static struct pci_driver ixgbevf_driver = { .remove = ixgbevf_remove, /* Power Management Hooks */ - .driver.pm = &ixgbevf_pm_ops, + .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops), .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig new file mode 100644 index 000000000000..480293b71dbc --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config LIBETH + tristate + select PAGE_POOL + help + libeth is a common library containing routines shared between several + drivers, but not yet promoted to the generic kernel API. diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile new file mode 100644 index 000000000000..cb99203d1dd2 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +obj-$(CONFIG_LIBETH) += libeth.o + +libeth-objs += rx.o diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c new file mode 100644 index 000000000000..6221b88c34ac --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include <net/libeth/rx.h> + +/* Rx buffer management */ + +/** + * libeth_rx_hw_len - get the actual buffer size to be passed to HW + * @pp: &page_pool_params of the netdev to calculate the size for + * @max_len: maximum buffer size for a single descriptor + * + * Return: HW-writeable length per one buffer to pass it to the HW accounting: + * MTU the @dev has, HW required alignment, minimum and maximum allowed values, + * and system's page size. + */ +static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) +{ + u32 len; + + len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN; + len = ALIGN(len, LIBETH_RX_BUF_STRIDE); + len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE), + pp->max_len); + + return len; +} + +/** + * libeth_rx_fq_create - create a PP with the default libeth settings + * @fq: buffer queue struct to fill + * @napi: &napi_struct covering this PP (no usage outside its poll loops) + * + * Return: %0 on success, -%errno on failure. + */ +int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) +{ + struct page_pool_params pp = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = LIBETH_RX_PAGE_ORDER, + .pool_size = fq->count, + .nid = fq->nid, + .dev = napi->dev->dev.parent, + .netdev = napi->dev, + .napi = napi, + .dma_dir = DMA_FROM_DEVICE, + .offset = LIBETH_SKB_HEADROOM, + }; + struct libeth_fqe *fqes; + struct page_pool *pool; + + /* HW-writeable / syncable length per one page */ + pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset); + + /* HW-writeable length per buffer */ + fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len); + /* Buffer size to allocate */ + fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset + + fq->buf_len)); + + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid); + if (!fqes) + goto err_buf; + + fq->fqes = fqes; + fq->pp = pool; + + return 0; + +err_buf: + page_pool_destroy(pool); + + return -ENOMEM; +} +EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH); + +/** + * libeth_rx_fq_destroy - destroy a &page_pool created by libeth + * @fq: buffer queue to process + */ +void libeth_rx_fq_destroy(struct libeth_fq *fq) +{ + kvfree(fq->fqes); + page_pool_destroy(fq->pp); +} +EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH); + +/** + * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context + * @page: page to recycle + * + * To be used on exceptions or rare cases not requiring fast inline recycling. + */ +void libeth_rx_recycle_slow(struct page *page) +{ + page_pool_recycle_direct(page->pp, page); +} +EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH); + +/* Converting abstract packet type numbers into a software structure with + * the packet parameters to do O(1) lookup on Rx. + */ + +static const u16 libeth_rx_pt_xdp_oip[] = { + [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4, + [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6, +}; + +static const u16 libeth_rx_pt_xdp_iprot[] = { + [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP, + [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP, + [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP, + [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP, + [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE, +}; + +static const u16 libeth_rx_pt_xdp_pl[] = { + [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4, +}; + +/** + * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT + * @pt: PT structure to evaluate + * + * Generates ```hash_type``` field with XDP RSS type values from the parsed + * packet parameters if they're obtained dynamically at runtime. + */ +void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt) +{ + pt->hash_type = 0; + pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip]; + pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot]; + pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer]; +} +EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH); + +/* Module */ + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Common Ethernet library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig new file mode 100644 index 000000000000..33aff6bc8f81 --- /dev/null +++ b/drivers/net/ethernet/intel/libie/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config LIBIE + tristate + select LIBETH + help + libie (Intel Ethernet library) is a common library built on top of + libeth and containing vendor-specific routines shared between several + Intel Ethernet drivers. diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile new file mode 100644 index 000000000000..bf42c5aeeedd --- /dev/null +++ b/drivers/net/ethernet/intel/libie/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +obj-$(CONFIG_LIBIE) += libie.o + +libie-objs += rx.o diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c new file mode 100644 index 000000000000..38201ee1e891 --- /dev/null +++ b/drivers/net/ethernet/intel/libie/rx.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include <linux/net/intel/libie/rx.h> + +/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed + * bitfield struct. + */ + +/* A few supplementary definitions for when XDP hash types do not coincide + * with what can be generated from ptype definitions by means of preprocessor + * concatenation. + */ +#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_L4 XDP_RSS_L4 + +#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \ + .outer_ip = LIBETH_RX_PT_OUTER_##oip, \ + .outer_frag = LIBETH_RX_PT_##ofrag, \ + .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \ + .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \ + .tunnel_end_frag = LIBETH_RX_PT_##tefr, \ + .inner_prot = LIBETH_RX_PT_INNER_##iprot, \ + .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \ + .hash_type = XDP_RSS_L3_##oip | \ + XDP_RSS_L4_##iprot | \ + XDP_RSS_TYPE_##pl, \ + } + +#define LIBIE_RX_PT_UNUSED { } + +#define __LIBIE_RX_PT_L2(iprot, pl) \ + LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl) +#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2) +#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2) +#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3) + +#define LIBIE_RX_PT_IP_FRAG(oip) \ + LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3) +#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \ + LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3) +#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \ + LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4) + +#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \ + LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \ + LIBIE_RX_PT_UNUSED, \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP) + +/* IPv oip --> tun --> IPv ver */ +#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \ + LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \ + LIBIE_RX_PT_IP_NOF(oip, tun, ver) + +/* Non Tunneled IPv oip */ +#define LIBIE_RX_PT_IP_RAW(oip) \ + LIBIE_RX_PT_IP_FRAG(oip), \ + LIBIE_RX_PT_IP_NOF(oip, NONE, NONE) + +/* IPv oip --> tun --> { IPv4, IPv6 } */ +#define LIBIE_RX_PT_IP_TUN(oip, tun) \ + LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \ + LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6) + +/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */ +#define LIBIE_RX_PT_IP_GRE(oip, tun) \ + LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \ + LIBIE_RX_PT_IP_TUN(oip, tun) + +/* Non Tunneled IPv oip + * IPv oip --> { IPv4, IPv6 } + * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 } + * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 } + * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 } + */ +#define LIBIE_RX_PT_IP(oip) \ + LIBIE_RX_PT_IP_RAW(oip), \ + LIBIE_RX_PT_IP_TUN(oip, IP), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN) + +/* Lookup table mapping for O(1) parsing */ +const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = { + /* L2 packet types */ + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_TS, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + + LIBIE_RX_PT_IP(4), + LIBIE_RX_PT_IP(6), +}; +EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Ethernet common library"); +MODULE_IMPORT_NS(LIBETH); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 1732ec3c3dbd..b06e24562973 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2301,7 +2301,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) { struct jme_adapter *jme = netdev_priv(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); netdev_update_features(netdev); jme_restart_rx_engine(jme); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 1d5b7bb6380f..5352fee62d2b 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -519,7 +519,7 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu) struct ltq_etop_priv *priv = netdev_priv(dev); unsigned long flags; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); spin_lock_irqsave(&priv->lock, flags); ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 8bd4def3622e..07904a528f21 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -419,7 +419,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) int curr_desc; int ret = 0; - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); priv->rx_buf_size = xrx200_buffer_size(new_mtu); priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); @@ -440,7 +440,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) buff = ch_rx->rx_buff[ch_rx->dma.desc]; ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); if (ret) { - net_dev->mtu = old_mtu; + WRITE_ONCE(net_dev->mtu, old_mtu); priv->rx_buf_size = xrx200_buffer_size(old_mtu); priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); break; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index f0bdc06d253d..f35ae2c88091 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2562,7 +2562,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) { struct mv643xx_eth_private *mp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); mv643xx_eth_recalc_skb_size(mp); tx_set_rate(mp, 1000000000, 16777216); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 40a5f1431e4e..41894834fb53 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3259,7 +3259,8 @@ static void mvneta_link_change(struct mvneta_port *pp) { u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); - phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); + phylink_pcs_change(&pp->phylink_pcs, + !!(gmac_stat & MVNETA_GMAC_LINK_UP)); } /* NAPI handler @@ -3860,7 +3861,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) return -EINVAL; } - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); if (!netif_running(dev)) { if (pp->bm_priv) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 23adf53c2aa1..e91486c48de3 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1375,7 +1375,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) } out_set: - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); dev->wanted_features = dev->features; netdev_update_features(dev); @@ -3434,12 +3434,13 @@ static void mvpp2_isr_handle_ptp(struct mvpp2_port *port) mvpp2_isr_handle_ptp_queue(port, 1); } -static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link) +static void mvpp2_isr_handle_link(struct mvpp2_port *port, + struct phylink_pcs *pcs, bool link) { struct net_device *dev = port->dev; if (port->phylink) { - phylink_mac_change(port->phylink, link); + phylink_pcs_change(pcs, link); return; } @@ -3472,7 +3473,7 @@ static void mvpp2_isr_handle_xlg(struct mvpp2_port *port) if (val & MVPP22_XLG_INT_STAT_LINK) { val = readl(port->base + MVPP22_XLG_STATUS); link = (val & MVPP22_XLG_STATUS_LINK_UP); - mvpp2_isr_handle_link(port, link); + mvpp2_isr_handle_link(port, &port->pcs_xlg, link); } } @@ -3488,7 +3489,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port) if (val & MVPP22_GMAC_INT_STAT_LINK) { val = readl(port->base + MVPP2_GMAC_STATUS0); link = (val & MVPP2_GMAC_STATUS0_LINK_UP); - mvpp2_isr_handle_link(port, link); + mvpp2_isr_handle_link(port, &port->pcs_gmac, link); } } } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 7c9faa714a10..549436efc204 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1096,7 +1096,7 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu) true); if (!err) { oct->link_info.mtu = new_mtu; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); } return err; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index dd49d0b8b494..7e6771c9cdbb 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -881,7 +881,7 @@ static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu) err = octep_vf_mbox_set_mtu(oct, new_mtu); if (!err) { oct->link_info.mtu = new_mtu; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); } return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index b86f3224f0b7..27935c54b91b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -24,6 +24,8 @@ #define DRV_NAME "Marvell-CGX/RPM" #define DRV_STRING "Marvell CGX/RPM Driver" +#define CGX_RX_STAT_GLOBAL_INDEX 9 + static LIST_HEAD(cgx_list); /* Convert firmware speed encoding to user format(Mbps) */ @@ -701,6 +703,30 @@ u64 cgx_features_get(void *cgxd) return ((struct cgx *)cgxd)->hw_features; } +int cgx_stats_reset(void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + int stat_id; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) { + if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX) + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + cgx_write(cgx, 0, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + else + cgx_write(cgx, lmac_id, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + } + + for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++) + cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0); + + return 0; +} + static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) { if (!linfo->fec) @@ -1788,6 +1814,7 @@ static struct mac_ops cgx_mac_ops = { .pfc_config = cgx_lmac_pfc_config, .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, .mac_reset = cgx_lmac_reset, + .mac_stats_reset = cgx_stats_reset, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 6f7d1dee5830..dc9ace30554a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -141,6 +141,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); +int cgx_stats_reset(void *cgxd, int lmac_id); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 0b4cba03f2e8..9ffc6790c513 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -132,6 +132,7 @@ struct mac_ops { /* FEC stats */ int (*get_fec_stats)(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); + int (*mac_stats_reset)(void *cgxd, int lmac_id); }; struct cgx { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index eb2a20b5a0d0..4a77f6fe2622 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -174,6 +174,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ cgx_set_link_mode_rsp) \ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \ M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ @@ -1213,10 +1214,8 @@ struct nix_bp_cfg_req { /* bpid_per_chan = 1 assigns separate bp id for each channel */ }; -/* PF can be mapped to either CGX or LBK interface, - * so maximum 64 channels are possible. - */ -#define NIX_MAX_BPID_CHAN 64 +/* Maximum channels any single NIX interface can have */ +#define NIX_MAX_BPID_CHAN 256 struct nix_bp_cfg_rsp { struct mbox_msghdr hdr; u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 76218f1cb459..1b34cf9c9703 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -38,6 +38,7 @@ static struct mac_ops rpm_mac_ops = { .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, + .mac_stats_reset = rpm_stats_reset, }; static struct mac_ops rpm2_mac_ops = { @@ -70,6 +71,7 @@ static struct mac_ops rpm2_mac_ops = { .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, + .mac_stats_reset = rpm_stats_reset, }; bool is_dev_rpm2(void *rpmd) @@ -443,6 +445,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) return 0; } +int rpm_stats_reset(void *rpmd, int lmac_id) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id); + rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + + return 0; +} + u8 rpm_get_lmac_type(void *rpmd, int lmac_id) { rpm_t *rpm = rpmd; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index b79cfbc6f877..34b11deb0f3c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -85,6 +85,8 @@ #define RPMX_MTI_STAT_STATN_CONTROL 0x10018 #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 #define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27) +#define RPMX_CMD_CLEAR_RX BIT_ULL(30) +#define RPMX_CMD_CLEAR_TX BIT_ULL(31) #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058 #define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618 @@ -134,4 +136,5 @@ int rpm2_get_nr_lmacs(void *rpmd); bool is_dev_rpm2(void *rpmd); int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr); +int rpm_stats_reset(void *rpmd, int lmac_id); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index e9bf9231b018..266ecbc1b97a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -604,6 +604,35 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, return rvu_lmac_get_stats(rvu, req, (void *)rsp); } +int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + struct rvu_pfvf *parent_pf; + struct mac_ops *mac_ops; + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + parent_pf = &rvu->pf[pf]; + /* To ensure reset cgx stats won't affect VF stats, + * check if it used by only PF interface. + * If not, return + */ + if (parent_pf->cgx_users > 1) { + dev_info(rvu->dev, "CGX busy, could not reset statistics\n"); + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_stats_reset(cgxd, lmac); +} + int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 2500f5ba4f5a..881d704644fb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, u16 pcifunc; int ret, lf; - cmd_buf = memdup_user(buffer, count + 1); + cmd_buf = memdup_user_nul(buffer, count); if (IS_ERR(cmd_buf)) return -ENOMEM; - cmd_buf[count] = '\0'; - cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 96c04f7d93f8..7498ab429963 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1202,7 +1202,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id, } static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1256,7 +1257,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, } static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1310,7 +1312,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 } static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1367,7 +1370,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, } static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index c181e7aa9eb6..150635de2bd5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1187,6 +1187,8 @@ static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, action.pf_func = target; action.op = NIX_RX_ACTIONOP_UCAST; } + if (req->match_id) + action.match_id = req->match_id; } entry->action = *(u64 *)&action; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 7e16a341ec58..24fbbef265a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -363,6 +363,7 @@ struct otx2_flow_config { struct list_head flow_list; u32 dmacflt_max_flows; u16 max_flows; + refcount_t mark_flows; struct list_head flow_list_tc; bool ntuple; }; @@ -465,6 +466,7 @@ struct otx2_nic { #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) +#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) u64 flags; u64 *cq_op_addr; @@ -961,6 +963,7 @@ void otx2_get_mac_from_af(struct net_device *netdev); void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); int otx2_config_pause_frm(struct otx2_nic *pfvf); void otx2_setup_segmentation(struct otx2_nic *pfvf); +int otx2_reset_mac_stats(struct otx2_nic *pfvf); /* RVU block related APIs */ int otx2_attach_npa_nix(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 4e1130496573..99ddf31269d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id, } static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 97a71e9b8563..bc5819237ed7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -252,6 +252,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + refcount_set(&flow_cfg->mark_flows, 1); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 3f46d5e0fb2e..f5bce3e326cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -67,7 +67,7 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu) netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (if_up) err = otx2_open(netdev); @@ -450,7 +450,6 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) struct mbox_msghdr *msg = NULL; int offset, vf_idx, id, err; struct otx2_mbox_dev *mdev; - struct mbox_hdr *req_hdr; struct otx2_mbox *mbox; struct mbox *vf_mbox; struct otx2_nic *pf; @@ -461,9 +460,8 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) mbox = &pf->mbox_pfvf[0].mbox; mdev = &mbox->dev[vf_idx]; - req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + @@ -494,7 +492,6 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) struct otx2_nic *pf = vf_mbox->pfvf; struct otx2_mbox_dev *mdev; int offset, id, vf_idx = 0; - struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; @@ -502,8 +499,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) mbox = &pf->mbox_pfvf[0].mbox_up; mdev = &mbox->dev[vf_idx]; - rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -1150,6 +1146,23 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) return err; } +int otx2_reset_mac_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) { struct msg_req *msg; @@ -1873,9 +1886,17 @@ int otx2_open(struct net_device *netdev) vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + int name_len; - snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, - qidx); + name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", + pf->netdev->name, qidx); + if (name_len >= NAME_SIZE) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", + rvu_get_pf(pf->pcifunc), qidx); + err = -EINVAL; + goto err_free_cints; + } err = request_irq(pci_irq_vector(pf->pdev, vec), otx2_cq_intr_handler, 0, irq_name, @@ -3038,6 +3059,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(pf); + /* reset CGX/RPM MAC stats */ + otx2_reset_mac_stats(pf); + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index f4655a8c0705..e63cc1eb6d89 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -511,7 +511,15 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, nr_police++; break; case FLOW_ACTION_MARK: + if (act->mark & ~OTX2_RX_MATCH_ID_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); + return -EOPNOTSUPP; + } mark = act->mark; + req->match_id = mark & OTX2_RX_MATCH_ID_MASK; + req->op = NIX_RX_ACTION_DEFAULT; + nic->flags |= OTX2_FLAG_TC_MARK_ENABLED; + refcount_inc(&nic->flow_cfg->mark_flows); break; case FLOW_ACTION_RX_QUEUE_MAPPING: @@ -692,10 +700,6 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, u32 val; flow_rule_match_control(rule, &match); - if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { - NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); - return -EOPNOTSUPP; - } if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { val = match.key->flags & FLOW_DIS_IS_FRAGMENT; @@ -713,6 +717,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, return -EOPNOTSUPP; } } + + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, + match.mask->flags, extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -1187,6 +1195,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, return -EINVAL; } + /* Disable TC MARK flag if they are no rules with skbedit mark action */ + if (flow_node->req.match_id) + if (!refcount_dec_and_test(&flow_cfg->mark_flows)) + nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED; + if (flow_node->is_act_police) { __clear_bit(flow_node->rq, &nic->rq_bmap); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f828d32737af..a16e9f244117 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -380,6 +380,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, if (pfvf->netdev->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; + if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) + skb->mark = parse->match_id; + skb_mark_for_recycle(skb); napi_gro_frags(napi); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index a82ffca8ce1b..3f1d2655ff77 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -62,6 +62,9 @@ #define CQ_OP_STAT_OP_ERR 63 #define CQ_OP_STAT_CQ_ERR 46 +/* Packet mark mask */ +#define OTX2_RX_MATCH_ID_MASK 0x0000ffff + struct queue_stats { u64 bytes; u64 pkts; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index cf0aa16d7540..99fcc5661674 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -456,7 +456,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (if_up) err = otx2vf_open(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 1723e9912ae0..070711df612e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -545,6 +545,20 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, return node; } +static struct otx2_qos_node +*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid) +{ + struct otx2_qos_node *node = NULL; + int bkt; + + hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) { + if (node->qid == qid) + break; + } + + return node; +} + static struct otx2_qos_node * otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid) { @@ -917,6 +931,7 @@ static void otx2_qos_enadis_sq(struct otx2_nic *pfvf, otx2_qos_disable_sq(pfvf, qid); pfvf->qos.qid_to_sqmap[qid] = node->schq; + otx2_qos_txschq_config(pfvf, node); otx2_qos_enable_sq(pfvf, qid); } @@ -1475,13 +1490,45 @@ out: return ret; } +static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf) +{ + int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues); + + return last == pfvf->hw.tc_tx_queues ? 0 : last + 1; +} + +static void otx2_reset_qdisc(struct net_device *dev, u16 qid) +{ + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + + if (!qdisc) + return; + + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); +} + +static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node, + int qid) +{ + struct otx2_qos_node *tmp; + + list_for_each_entry(tmp, &node->child_schq_list, list) + if (tmp->level == NIX_TXSCH_LVL_MDQ) { + otx2_qos_txschq_config(pfvf, tmp); + pfvf->qos.qid_to_sqmap[qid] = tmp->schq; + } +} + static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; int dwrr_del_node = false; + u16 qid, moved_qid; u64 prio; - u16 qid; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); @@ -1517,6 +1564,37 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, if (!parent->child_static_cnt) parent->max_static_prio = 0; + moved_qid = otx2_qos_cur_leaf_nodes(pfvf); + + /* last node just deleted */ + if (moved_qid == 0 || moved_qid == qid) + return 0; + + moved_qid--; + + node = otx2_sw_node_find_by_qid(pfvf, moved_qid); + if (!node) + return 0; + + /* stop traffic to the old queue and disable + * SQ associated with it + */ + node->qid = OTX2_QOS_QID_INNER; + __clear_bit(moved_qid, pfvf->qos.qos_sq_bmap); + otx2_qos_disable_sq(pfvf, moved_qid); + + otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid); + + /* enable SQ associated with qid and + * update the node + */ + otx2_cfg_smq(pfvf, node, qid); + + otx2_qos_enable_sq(pfvf, qid); + __set_bit(qid, pfvf->qos.qos_sq_bmap); + node->qid = qid; + + *classid = node->classid; return 0; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 8b9455d8a4f7..418101a93149 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -229,6 +229,10 @@ static int prestera_flower_parse(struct prestera_flow_block *block, flow_rule_match_control(f_rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) { diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c index fc6f7d2746e8..197198ba61b1 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c @@ -419,15 +419,6 @@ struct prestera_msg_vtcam_destroy_req { __le32 vtcam_id; }; -struct prestera_msg_vtcam_rule_add_req { - struct prestera_msg_cmd cmd; - __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; - __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; - __le32 vtcam_id; - __le32 prio; - __le32 n_act; -}; - struct prestera_msg_vtcam_rule_del_req { struct prestera_msg_cmd cmd; __le32 vtcam_id; @@ -471,6 +462,16 @@ struct prestera_msg_acl_action { }; }; +struct prestera_msg_vtcam_rule_add_req { + struct prestera_msg_cmd cmd; + __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX]; + __le32 vtcam_id; + __le32 prio; + __le32 n_act; + struct prestera_msg_acl_action actions_msg[] __counted_by_le(n_act); +}; + struct prestera_msg_counter_req { struct prestera_msg_cmd cmd; __le32 client; @@ -702,12 +703,6 @@ struct prestera_msg_flood_domain_destroy_req { __le32 flood_domain_idx; }; -struct prestera_msg_flood_domain_ports_set_req { - struct prestera_msg_cmd cmd; - __le32 flood_domain_idx; - __le32 ports_num; -}; - struct prestera_msg_flood_domain_ports_reset_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; @@ -725,6 +720,13 @@ struct prestera_msg_flood_domain_port { __le16 port_type; }; +struct prestera_msg_flood_domain_ports_set_req { + struct prestera_msg_cmd cmd; + __le32 flood_domain_idx; + __le32 ports_num; + struct prestera_msg_flood_domain_port ports[] __counted_by_le(ports_num); +}; + struct prestera_msg_mdb_create_req { struct prestera_msg_cmd cmd; __le32 flood_domain_idx; @@ -1371,23 +1373,18 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, struct prestera_acl_hw_action_info *act, u8 n_act, u32 *rule_id) { - struct prestera_msg_acl_action *actions_msg; struct prestera_msg_vtcam_rule_add_req *req; struct prestera_msg_vtcam_resp resp; - void *buff; - u32 size; + size_t size; int err; u8 i; - size = sizeof(*req) + sizeof(*actions_msg) * n_act; - - buff = kzalloc(size, GFP_KERNEL); - if (!buff) + size = struct_size(req, actions_msg, n_act); + req = kzalloc(size, GFP_KERNEL); + if (!req) return -ENOMEM; - req = buff; req->n_act = __cpu_to_le32(n_act); - actions_msg = buff + sizeof(*req); /* put acl matches into the message */ memcpy(req->key, key, sizeof(req->key)); @@ -1395,7 +1392,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, /* put acl actions into the message */ for (i = 0; i < n_act; i++) { - err = prestera_acl_rule_add_put_action(&actions_msg[i], + err = prestera_acl_rule_add_put_action(&req->actions_msg[i], &act[i]); if (err) goto free_buff; @@ -1411,7 +1408,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, *rule_id = __le32_to_cpu(resp.rule_id); free_buff: - kfree(buff); + kfree(req); return err; } @@ -2461,14 +2458,13 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain) { struct prestera_flood_domain_port *flood_domain_port; struct prestera_msg_flood_domain_ports_set_req *req; - struct prestera_msg_flood_domain_port *ports; struct prestera_switch *sw = domain->sw; struct prestera_port *port; u32 ports_num = 0; - int buf_size; - void *buff; + size_t buf_size; u16 lag_id; int err; + int i = 0; list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list, flood_domain_port_node) @@ -2477,15 +2473,11 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain) if (!ports_num) return -EINVAL; - buf_size = sizeof(*req) + sizeof(*ports) * ports_num; - - buff = kmalloc(buf_size, GFP_KERNEL); - if (!buff) + buf_size = struct_size(req, ports, ports_num); + req = kmalloc(buf_size, GFP_KERNEL); + if (!req) return -ENOMEM; - req = buff; - ports = buff + sizeof(*req); - req->flood_domain_idx = __cpu_to_le32(domain->idx); req->ports_num = __cpu_to_le32(ports_num); @@ -2494,31 +2486,30 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain) if (netif_is_lag_master(flood_domain_port->dev)) { if (prestera_lag_id(sw, flood_domain_port->dev, &lag_id)) { - kfree(buff); + kfree(req); return -EINVAL; } - ports->port_type = + req->ports[i].port_type = __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG); - ports->lag_id = __cpu_to_le16(lag_id); + req->ports[i].lag_id = __cpu_to_le16(lag_id); } else { port = prestera_port_dev_lower_find(flood_domain_port->dev); - ports->port_type = + req->ports[i].port_type = __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT); - ports->dev_num = __cpu_to_le32(port->dev_id); - ports->port_num = __cpu_to_le32(port->hw_id); + req->ports[i].dev_num = __cpu_to_le32(port->dev_id); + req->ports[i].port_num = __cpu_to_le32(port->hw_id); } - ports->vid = __cpu_to_le16(flood_domain_port->vid); - - ports++; + req->ports[i].vid = __cpu_to_le16(flood_domain_port->vid); + i++; } err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET, &req->cmd, buf_size); - kfree(buff); + kfree(req); return err; } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 4fb886c57cd7..63ae01954dfc 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -489,7 +489,7 @@ static int prestera_port_change_mtu(struct net_device *dev, int mtu) if (err) return err; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); return 0; } @@ -821,7 +821,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw, if (port->state_mac.oper) { if (port->phy_link) - phylink_mac_change(port->phy_link, true); + phylink_pcs_change(&port->phylink_pcs, true); else netif_carrier_on(port->dev); @@ -829,7 +829,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw, queue_delayed_work(prestera_wq, caching_dw, 0); } else { if (port->phy_link) - phylink_mac_change(port->phy_link, false); + phylink_pcs_change(&port->phylink_pcs, false); else if (netif_running(port->dev) && netif_carrier_ok(port->dev)) netif_carrier_off(port->dev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index cc2a9ae794be..39d9bf82c115 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -96,7 +96,7 @@ struct prestera_sdma { struct dma_pool *desc_pool; struct work_struct tx_work; struct napi_struct rx_napi; - struct net_device napi_dev; + struct net_device *napi_dev; u32 map_addr; u64 dma_mask; /* protect SDMA with concurrent access from multiple CPUs */ @@ -654,13 +654,21 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw) if (err) goto err_evt_register; - init_dummy_netdev(&sdma->napi_dev); + sdma->napi_dev = alloc_netdev_dummy(0); + if (!sdma->napi_dev) { + dev_err(dev, "not able to initialize dummy device\n"); + err = -ENOMEM; + goto err_alloc_dummy; + } - netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll); + netif_napi_add(sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll); napi_enable(&sdma->rx_napi); return 0; +err_alloc_dummy: + prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX, + prestera_rxtx_handle_event); err_evt_register: err_tx_init: prestera_sdma_tx_fini(sdma); @@ -677,6 +685,7 @@ static void prestera_sdma_switch_fini(struct prestera_switch *sw) napi_disable(&sdma->rx_napi); netif_napi_del(&sdma->rx_napi); + free_netdev(sdma->napi_dev); prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX, prestera_rxtx_handle_event); prestera_sdma_tx_fini(sdma); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index dd6ca2e4fd51..1a59c952aa01 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1188,7 +1188,7 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) { struct pxa168_eth_private *pep = netdev_priv(dev); - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); set_port_config_ext(pep); if (!netif_running(dev)) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 1b43704baceb..fcfb34561882 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -2905,13 +2905,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) int err; if (!netif_running(dev)) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } skge_down(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); err = skge_up(dev); if (err) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 07720841a8d7..a7a16eac1891 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -34,6 +34,7 @@ #include <linux/mii.h> #include <linux/of_net.h> #include <linux/dmi.h> +#include <linux/skbuff_ref.h> #include <asm/irq.h> @@ -2383,7 +2384,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) u32 imask; if (!netif_running(dev)) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); netdev_update_features(dev); return 0; } @@ -2406,7 +2407,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) sky2_rx_stop(sky2); sky2_rx_clean(sky2); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); netdev_update_features(dev); mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index caa13b9cedff..cae46290a7ae 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = { .tx_irq_mask = 0x461c, .tx_irq_status = 0x4618, .pdma = { - .rx_ptr = 0x6100, - .rx_cnt_cfg = 0x6104, - .pcrx_ptr = 0x6108, - .glo_cfg = 0x6204, - .rst_idx = 0x6208, - .delay_irq = 0x620c, - .irq_status = 0x6220, - .irq_mask = 0x6228, - .adma_rx_dbg0 = 0x6238, - .int_grp = 0x6250, + .rx_ptr = 0x4100, + .rx_cnt_cfg = 0x4104, + .pcrx_ptr = 0x4108, + .glo_cfg = 0x4204, + .rst_idx = 0x4208, + .delay_irq = 0x420c, + .irq_status = 0x4220, + .irq_mask = 0x4228, + .adma_rx_dbg0 = 0x4238, + .int_grp = 0x4250, }, .qdma = { .qtx_cfg = 0x4400, @@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); } @@ -1139,7 +1139,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) eth->scratch_ring = eth->sram_base; else eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, - cnt * soc->txrx.txd_size, + cnt * soc->tx.desc_size, ð->phy_scratch_ring, GFP_KERNEL); if (unlikely(!eth->scratch_ring)) @@ -1155,17 +1155,17 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) return -ENOMEM; - phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); for (i = 0; i < cnt; i++) { dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE; struct mtk_tx_dma_v2 *txd; - txd = eth->scratch_ring + i * soc->txrx.txd_size; + txd = eth->scratch_ring + i * soc->tx.desc_size; txd->txd1 = addr; if (i < cnt - 1) txd->txd2 = eth->phy_scratch_ring + - (i + 1) * soc->txrx.txd_size; + (i + 1) * soc->tx.desc_size; txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) @@ -1416,7 +1416,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (itxd == ring->last_free) return -ENOMEM; - itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); memset(itx_buf, 0, sizeof(*itx_buf)); txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, @@ -1457,7 +1457,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); txd_info.size = min_t(unsigned int, frag_size, - soc->txrx.dma_max_len); + soc->tx.dma_max_len); txd_info.qid = queue; txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && !(frag_size - txd_info.size); @@ -1470,7 +1470,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, mtk_tx_set_dma_desc(dev, txd, &txd_info); tx_buf = mtk_desc_to_tx_buf(ring, txd, - soc->txrx.txd_size); + soc->tx.desc_size); if (new_desc) memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; @@ -1513,7 +1513,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, } else { int next_idx; - next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size), ring->dma_size); mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); } @@ -1522,7 +1522,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, err_dma: do { - tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size); /* unmap dma */ mtk_tx_unmap(eth, tx_buf, NULL, false); @@ -1547,7 +1547,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nfrags += DIV_ROUND_UP(skb_frag_size(frag), - eth->soc->txrx.dma_max_len); + eth->soc->tx.dma_max_len); } } else { nfrags += skb_shinfo(skb)->nr_frags; @@ -1654,7 +1654,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) ring = ð->rx_ring[i]; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->rx.desc_size; if (rxd->rxd2 & RX_DMA_DONE) { ring->calc_idx_update = true; return ring; @@ -1710,7 +1710,7 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, if (IS_ERR(pp)) return pp; - err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, + err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id, eth->rx_napi.napi_id, PAGE_SIZE); if (err < 0) goto err_free_pp; @@ -1822,7 +1822,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, } htxd = txd; - tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size); memset(tx_buf, 0, sizeof(*tx_buf)); htx_buf = tx_buf; @@ -1841,7 +1841,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, goto unmap; tx_buf = mtk_desc_to_tx_buf(ring, txd, - soc->txrx.txd_size); + soc->tx.desc_size); memset(tx_buf, 0, sizeof(*tx_buf)); n_desc++; } @@ -1879,7 +1879,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, } else { int idx; - idx = txd_to_idx(ring, txd, soc->txrx.txd_size); + idx = txd_to_idx(ring, txd, soc->tx.desc_size); mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), MT7628_TX_CTX_IDX0); } @@ -1890,7 +1890,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, unmap: while (htxd != txd) { - tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size); mtk_tx_unmap(eth, tx_buf, NULL, false); htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; @@ -2021,14 +2021,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto rx_done; idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); - rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + rxd = ring->dma + idx * eth->soc->rx.desc_size; data = ring->data[idx]; if (!mtk_rx_get_desc(eth, &trxd, rxd)) break; /* find out which mac the packet come from. values start at 1 */ - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5); switch (val) { @@ -2140,7 +2140,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb->dev = netdev; bytes += skb->len; - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; if (hash != MTK_RXD5_FOE_ENTRY) @@ -2156,7 +2156,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, rxdcsum = &trxd.rxd4; } - if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) + if (*rxdcsum & eth->soc->rx.dma_l4_valid) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -2280,7 +2280,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, break; tx_buf = mtk_desc_to_tx_buf(ring, desc, - eth->soc->txrx.txd_size); + eth->soc->tx.desc_size); if (!tx_buf->data) break; @@ -2331,7 +2331,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, } mtk_tx_unmap(eth, tx_buf, &bq, true); - desc = ring->dma + cpu * eth->soc->txrx.txd_size; + desc = ring->dma + cpu * eth->soc->tx.desc_size; ring->last_free = desc; atomic_inc(&ring->free_count); @@ -2421,7 +2421,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) do { int rx_done; - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.irq_status); rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); rx_done_total += rx_done; @@ -2437,10 +2437,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget) return budget; } while (mtk_r32(eth, reg_map->pdma.irq_status) & - eth->soc->txrx.rx_irq_done_mask); + eth->soc->rx.irq_done_mask); if (napi_complete_done(napi, rx_done_total)) - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); return rx_done_total; } @@ -2449,7 +2449,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; struct mtk_tx_ring *ring = ð->tx_ring; - int i, sz = soc->txrx.txd_size; + int i, sz = soc->tx.desc_size; struct mtk_tx_dma_v2 *txd; int ring_size; u32 ofs, val; @@ -2572,14 +2572,14 @@ static void mtk_tx_clean(struct mtk_eth *eth) } if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * soc->txrx.txd_size, + ring->dma_size * soc->tx.desc_size, ring->dma, ring->phys); ring->dma = NULL; } if (ring->dma_pdma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * soc->txrx.txd_size, + ring->dma_size * soc->tx.desc_size, ring->dma_pdma, ring->phys_pdma); ring->dma_pdma = NULL; } @@ -2634,15 +2634,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) || rx_flag != MTK_RX_FLAGS_NORMAL) { ring->dma = dma_alloc_coherent(eth->dma_dev, - rx_dma_size * eth->soc->txrx.rxd_size, - &ring->phys, GFP_KERNEL); + rx_dma_size * eth->soc->rx.desc_size, + &ring->phys, GFP_KERNEL); } else { struct mtk_tx_ring *tx_ring = ð->tx_ring; ring->dma = tx_ring->dma + tx_ring_size * - eth->soc->txrx.txd_size * (ring_no + 1); + eth->soc->tx.desc_size * (ring_no + 1); ring->phys = tx_ring->phys + tx_ring_size * - eth->soc->txrx.txd_size * (ring_no + 1); + eth->soc->tx.desc_size * (ring_no + 1); } if (!ring->dma) @@ -2653,7 +2653,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) dma_addr_t dma_addr; void *data; - rxd = ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->rx.desc_size; if (ring->page_pool) { data = mtk_page_pool_get_buff(ring->page_pool, &dma_addr, GFP_KERNEL); @@ -2690,7 +2690,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) rxd->rxd3 = 0; rxd->rxd4 = 0; - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { rxd->rxd5 = 0; rxd->rxd6 = 0; rxd->rxd7 = 0; @@ -2744,7 +2744,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_ if (!ring->data[i]) continue; - rxd = ring->dma + i * eth->soc->txrx.rxd_size; + rxd = ring->dma + i * eth->soc->rx.desc_size; if (!rxd->rxd1) continue; @@ -2761,7 +2761,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_ if (!in_sram && ring->dma) { dma_free_coherent(eth->dma_dev, - ring->dma_size * eth->soc->txrx.rxd_size, + ring->dma_size * eth->soc->rx.desc_size, ring->dma, ring->phys); ring->dma = NULL; } @@ -3124,7 +3124,7 @@ static void mtk_dma_free(struct mtk_eth *eth) netdev_reset_queue(eth->netdev[i]); if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, - MTK_QDMA_RING_SIZE * soc->txrx.txd_size, + MTK_QDMA_RING_SIZE * soc->tx.desc_size, eth->scratch_ring, eth->phy_scratch_ring); eth->scratch_ring = NULL; eth->phy_scratch_ring = 0; @@ -3174,7 +3174,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) eth->rx_events++; if (likely(napi_schedule_prep(ð->rx_napi))) { - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); __napi_schedule(ð->rx_napi); } @@ -3200,9 +3200,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth) const struct mtk_reg_map *reg_map = eth->soc->reg_map; if (mtk_r32(eth, reg_map->pdma.irq_mask) & - eth->soc->txrx.rx_irq_done_mask) { + eth->soc->rx.irq_done_mask) { if (mtk_r32(eth, reg_map->pdma.irq_status) & - eth->soc->txrx.rx_irq_done_mask) + eth->soc->rx.irq_done_mask) mtk_handle_irq_rx(irq, _eth); } if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { @@ -3220,10 +3220,10 @@ static void mtk_poll_controller(struct net_device *dev) struct mtk_eth *eth = mac->hw; mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); mtk_handle_irq_rx(eth->irq[2], dev); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask); } #endif @@ -3387,7 +3387,7 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); + mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); refcount_set(ð->dma_refcnt, 1); } else @@ -3471,7 +3471,7 @@ static int mtk_stop(struct net_device *dev) mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); - mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); @@ -3893,7 +3893,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); - if (mtk_is_netsys_v2_or_greater(eth)) { + if (mtk_is_netsys_v3_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); @@ -3947,9 +3947,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) /* FE int grouping */ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4); mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); - mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); + mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4); mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); if (mtk_is_netsys_v3_or_greater(eth)) { @@ -4055,7 +4055,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu) } mtk_set_mcr_max_rx(mac, length); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -4188,6 +4188,8 @@ static int mtk_free_dev(struct mtk_eth *eth) metadata_dst_free(eth->dsa_meta[i]); } + free_netdev(eth->dummy_dev); + return 0; } @@ -4983,9 +4985,14 @@ static int mtk_probe(struct platform_device *pdev) /* we run 2 devices on the same DMA ring so we need a dummy device * for NAPI to work */ - init_dummy_netdev(ð->dummy_dev); - netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); + eth->dummy_dev = alloc_netdev_dummy(0); + if (!eth->dummy_dev) { + err = -ENOMEM; + dev_err(eth->dev, "failed to allocated dummy device\n"); + goto err_unreg_netdev; + } + netif_napi_add(eth->dummy_dev, ð->tx_napi, mtk_napi_tx); + netif_napi_add(eth->dummy_dev, ð->rx_napi, mtk_napi_rx); platform_set_drvdata(pdev, eth); schedule_delayed_work(ð->reset.monitor_work, @@ -4993,6 +5000,8 @@ static int mtk_probe(struct platform_device *pdev) return 0; +err_unreg_netdev: + mtk_unreg_dev(eth); err_deinit_ppe: mtk_ppe_deinit(eth); mtk_mdio_cleanup(eth); @@ -5039,11 +5048,15 @@ static const struct mtk_soc_data mt2701_data = { .required_clks = MT7623_CLKS_BITMAP, .required_pctl = true, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5059,11 +5072,15 @@ static const struct mtk_soc_data mt7621_data = { .offload_version = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5081,11 +5098,15 @@ static const struct mtk_soc_data mt7622_data = { .hash_offset = 2, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5102,11 +5123,15 @@ static const struct mtk_soc_data mt7623_data = { .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .disable_pll_modes = true, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5121,11 +5146,15 @@ static const struct mtk_soc_data mt7629_data = { .required_pctl = false, .has_accounting = true, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5143,14 +5172,18 @@ static const struct mtk_soc_data mt7981_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, }; static const struct mtk_soc_data mt7986_data = { @@ -5165,14 +5198,18 @@ static const struct mtk_soc_data mt7986_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, }; static const struct mtk_soc_data mt7988_data = { @@ -5187,11 +5224,15 @@ static const struct mtk_soc_data mt7988_data = { .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma_v2), - .rxd_size = sizeof(struct mtk_rx_dma_v2), - .rx_irq_done_mask = MTK_RX_DONE_INT_V2, - .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma_v2), + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma_v2), + .irq_done_mask = MTK_RX_DONE_INT_V2, + .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, }, @@ -5204,11 +5245,15 @@ static const struct mtk_soc_data rt5350_data = { .required_clks = MT7628_CLKS_BITMAP, .required_pctl = false, .version = 1, - .txrx = { - .txd_size = sizeof(struct mtk_tx_dma), - .rxd_size = sizeof(struct mtk_rx_dma), - .rx_irq_done_mask = MTK_RX_DONE_INT, - .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA, + .tx = { + .desc_size = sizeof(struct mtk_tx_dma), + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, + .rx = { + .desc_size = sizeof(struct mtk_rx_dma), + .irq_done_mask = MTK_RX_DONE_INT, + .dma_l4_valid = RX_DMA_L4_VALID_PDMA, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 9ae3b8a71d0e..4eab30b44070 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -327,8 +327,8 @@ /* QDMA descriptor txd3 */ #define TX_DMA_OWNER_CPU BIT(31) #define TX_DMA_LS0 BIT(30) -#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) +#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset) +#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len) #define TX_DMA_SWC BIT(14) #define TX_DMA_PQID GENMASK(3, 0) #define TX_DMA_ADDR64_MASK GENMASK(3, 0) @@ -348,8 +348,8 @@ /* QDMA descriptor rxd2 */ #define RX_DMA_DONE BIT(31) #define RX_DMA_LSO BIT(30) -#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) -#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset) +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len) #define RX_DMA_VTAG BIT(15) #define RX_DMA_ADDR64_MASK GENMASK(3, 0) #if IS_ENABLED(CONFIG_64BIT) @@ -1153,10 +1153,9 @@ struct mtk_reg_map { * @foe_entry_size Foe table entry size. * @has_accounting Bool indicating support for accounting of * offloaded flows. - * @txd_size Tx DMA descriptor size. - * @rxd_size Rx DMA descriptor size. - * @rx_irq_done_mask Rx irq done register mask. - * @rx_dma_l4_valid Rx DMA valid register mask. + * @desc_size Tx/Rx DMA descriptor size. + * @irq_done_mask Rx irq done register mask. + * @dma_l4_valid Rx DMA valid register mask. * @dma_max_len Max DMA tx/rx buffer length. * @dma_len_offset Tx/Rx DMA length field offset. */ @@ -1174,13 +1173,17 @@ struct mtk_soc_data { bool has_accounting; bool disable_pll_modes; struct { - u32 txd_size; - u32 rxd_size; - u32 rx_irq_done_mask; - u32 rx_dma_l4_valid; + u32 desc_size; u32 dma_max_len; u32 dma_len_offset; - } txrx; + } tx; + struct { + u32 desc_size; + u32 irq_done_mask; + u32 dma_l4_valid; + u32 dma_max_len; + u32 dma_len_offset; + } rx; }; #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000) @@ -1242,7 +1245,7 @@ struct mtk_eth { spinlock_t page_lock; spinlock_t tx_irq_lock; spinlock_t rx_irq_lock; - struct net_device dummy_dev; + struct net_device *dummy_dev; struct net_device *netdev[MTK_MAX_DEVS]; struct mtk_mac *mac[MTK_MAX_DEVS]; int irq[3]; diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index 6ce0db3a1a92..0acee405a749 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -580,7 +580,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) idle = cur_idle; entry->data.ib1 &= ~ib1_ts_mask; - entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; + entry->data.ib1 |= ib1 & ib1_ts_mask; } } diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index fbb5e9d5af13..aa262e6f4b85 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -273,6 +273,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } else { return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5d3fde63b273..4c089cfa027a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1649,7 +1649,7 @@ int mlx4_en_start_port(struct net_device *dev) sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); /* Calculate Rx buf size */ - dev->mtu = min(dev->mtu, priv->max_mtu); + WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu)); mlx4_en_calc_rx_buf(dev); en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); @@ -2394,7 +2394,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) !mlx4_en_check_xdp_mtu(dev, new_mtu)) return -EOPNOTSUPP; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { mutex_lock(&mdev->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac49657bd07..8328df8645d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -42,6 +42,7 @@ #include <linux/if_vlan.h> #include <linux/vmalloc.h> #include <linux/irq.h> +#include <linux/skbuff_ref.h> #include <net/ip.h> #if IS_ENABLED(CONFIG_IPV6) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7b02ff61126d..98688e4dbec5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -185,7 +185,8 @@ static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, } static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { mlx4_internal_err_reset = ctx->val.vbool; return 0; @@ -202,7 +203,8 @@ static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id, } static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx4_priv *priv = devlink_priv(devlink); struct mlx4_dev *dev = &priv->dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 4957412ff1f6..20768ef2e9d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work) bool poll_cmd = ent->polling; struct mlx5_cmd_layout *lay; struct mlx5_core_dev *dev; - unsigned long cb_timeout; - struct semaphore *sem; + unsigned long timeout; unsigned long flags; int alloc_ret; int cmd_mode; + complete(&ent->handling); + dev = container_of(cmd, struct mlx5_core_dev, cmd); - cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); + timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); - complete(&ent->handling); - sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; - down(sem); if (!ent->page_queue) { + if (down_timeout(&cmd->vars.sem, timeout)) { + mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n", + mlx5_command_str(ent->op), ent->op); + if (ent->callback) { + ent->callback(-EBUSY, ent->context); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + cmd_ent_put(ent); + } else { + ent->ret = -EBUSY; + complete(&ent->done); + } + complete(&ent->slotted); + return; + } alloc_ret = cmd_alloc_index(cmd, ent); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); @@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work) ent->ret = -EAGAIN; complete(&ent->done); } - up(sem); + up(&cmd->vars.sem); return; } } else { + down(&cmd->vars.pages_sem); ent->idx = cmd->vars.max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); clear_bit(ent->idx, &cmd->vars.bitmask); @@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work) spin_unlock_irqrestore(&cmd->alloc_lock, flags); } + complete(&ent->slotted); + lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); @@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work) ent->ts1 = ktime_get_ns(); cmd_mode = cmd->mode; - if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) + if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout)) cmd_ent_get(ent); set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); @@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) ent->ret = -ECANCELED; goto out_err; } + + wait_for_completion(&ent->slotted); + if (cmd->mode == CMD_MODE_POLLING || ent->polling) wait_for_completion(&ent->done); else if (!wait_for_completion_timeout(&ent->done, timeout)) @@ -1157,6 +1176,9 @@ out_err: } else if (err == -ECANCELED) { mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", mlx5_command_str(ent->op), ent->op); + } else if (err == -EBUSY) { + mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n", + mlx5_command_str(ent->op), ent->op); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, ent->polling = force_polling; init_completion(&ent->handling); + init_completion(&ent->slotted); if (!callback) init_completion(&ent->done); @@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, return 0; /* mlx5_cmd_comp_handler() will put(ent) */ err = wait_func(dev, ent); - if (err == -ETIMEDOUT || err == -ECANCELED) + if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY) goto out_free; ds = ent->ts2 - ent->ts1; @@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb, dev = container_of(cmd, struct mlx5_core_dev, cmd); eqe = data; + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + return NOTIFY_DONE; + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); return NOTIFY_OK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 09652dc89115..36806e813c33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -143,8 +143,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count, return simple_read_from_buffer(buf, count, pos, tbuf, ret); } -static ssize_t average_write(struct file *filp, const char __user *buf, - size_t count, loff_t *pos) +static ssize_t reset_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) { struct mlx5_cmd_stats *stats; @@ -152,6 +152,11 @@ static ssize_t average_write(struct file *filp, const char __user *buf, spin_lock_irq(&stats->lock); stats->sum = 0; stats->n = 0; + stats->failed = 0; + stats->failed_mbox_status = 0; + stats->last_failed_errno = 0; + stats->last_failed_mbox_status = 0; + stats->last_failed_syndrome = 0; spin_unlock_irq(&stats->lock); *pos += count; @@ -159,11 +164,16 @@ static ssize_t average_write(struct file *filp, const char __user *buf, return count; } -static const struct file_operations stats_fops = { +static const struct file_operations reset_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = reset_write, +}; + +static const struct file_operations average_fops = { .owner = THIS_MODULE, .open = simple_open, .read = average_read, - .write = average_write, }; static ssize_t slots_read(struct file *filp, char __user *buf, size_t count, @@ -228,8 +238,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev) continue; stats->root = debugfs_create_dir(namep, *cmd); + debugfs_create_file("reset", 0200, stats->root, stats, + &reset_fops); debugfs_create_file("average", 0400, stats->root, stats, - &stats_fops); + &average_fops); debugfs_create_u64("n", 0400, stats->root, &stats->n); debugfs_create_u64("failed", 0400, stats->root, &stats->failed); debugfs_create_u64("failed_mbox_status", 0400, stats->root, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 84db05fb9389..e85fb71bf0b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -320,6 +320,8 @@ struct mlx5e_params { bool scatter_fcs_en; bool rx_dim_enabled; bool tx_dim_enabled; + bool rx_moder_use_cqe_mode; + bool tx_moder_use_cqe_mode; u32 pflags; struct bpf_prog *xdp_prog; struct mlx5e_xsk *xsk; @@ -430,7 +432,7 @@ struct mlx5e_txqsq { u16 cc; u16 skb_fifo_cc; u32 dma_fifo_cc; - struct dim dim; /* Adaptive Moderation */ + struct dim *dim; /* Adaptive Moderation */ /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; @@ -722,7 +724,7 @@ struct mlx5e_rq { int ix; unsigned int hw_mtu; - struct dim dim; /* Dynamic Interrupt Moderation */ + struct dim *dim; /* Dynamic Interrupt Moderation */ /* XDP */ struct bpf_prog __rcu *xdp_prog; @@ -797,6 +799,10 @@ struct mlx5e_channel { int cpu; /* Sync between icosq recovery and XSK enable/disable. */ struct mutex icosq_recovery_lock; + + /* coalescing configuration */ + struct dim_cq_moder rx_cq_moder; + struct dim_cq_moder tx_cq_moder; }; struct mlx5e_ptp; @@ -1040,6 +1046,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq); int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); void mlx5e_destroy_rq(struct mlx5e_rq *rq); +bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled); +bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state); + struct mlx5e_sq_param; int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, @@ -1060,6 +1071,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq); +int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u8 cq_period_mode); +int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u16 cq_period, u16 cq_max_count, u8 cq_period_mode); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); @@ -1087,6 +1102,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, void *context, bool reset); int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context); +int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); @@ -1118,6 +1134,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); +bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled); +bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state); + static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { return MLX5_CAP_ETH(mdev, swp) && @@ -1143,7 +1164,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); -int mlx5e_update_nic_rx(struct mlx5e_priv *priv); void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); @@ -1160,7 +1180,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, - uint32_t stringset, uint8_t *data); + u32 stringset, u8 *data); int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, struct ethtool_stats *stats, u64 *data); @@ -1180,23 +1200,16 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack); -int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, - struct ethtool_link_ksettings *link_ksettings); -int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, - const struct ethtool_link_ksettings *link_ksettings); -int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh); -int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack); +int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal); +int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal); u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); -void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, - struct ethtool_pauseparam *pauseparam); -int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, - struct ethtool_pauseparam *pauseparam); /* mlx5e generic netdev management API */ static inline bool @@ -1222,8 +1235,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); -void mlx5e_rx_dim_work(struct work_struct *work); -void mlx5e_tx_dim_work(struct work_struct *work); void mlx5e_set_xdp_feature(struct net_device *netdev); netdev_features_t mlx5e_features_check(struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c index 874a1016623c..66e719e88503 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -3,6 +3,7 @@ #include "channels.h" #include "en.h" +#include "en/dim.h" #include "en/ptp.h" unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) @@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) *rqn = c->rq.rqn; return true; } + +int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable) +{ + int i; + + for (i = 0; i < chs->num; i++) { + int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable); + + if (err) + return err; + } + + return 0; +} + +int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable) +{ + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable); + + if (err) + return err; + } + } + + return 0; +} + +int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) { + /* If dim is enabled for the channel, reset the dim state so the + * collected statistics will be reset. This is useful for + * supporting legacy interfaces that allow things like changing + * the CQ period mode for all channels without disturbing + * individual channel configurations. + */ + if (chs->c[i]->rq.dim) { + int err; + + mlx5e_dim_rx_change(&chs->c[i]->rq, false); + err = mlx5e_dim_rx_change(&chs->c[i]->rq, true); + if (err) + return err; + } + } + + return 0; +} + +int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs) +{ + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + int err; + + /* If dim is enabled for the channel, reset the dim + * state so the collected statistics will be reset. This + * is useful for supporting legacy interfaces that allow + * things like changing the CQ period mode for all + * channels without disturbing individual channel + * configurations. + */ + if (!chs->c[i]->sq[tc].dim) + continue; + + mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false); + err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true); + if (err) + return err; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h index 6715aa9383b9..eda80f8c6c02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, u32 *vhca_id); bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); +int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled); +int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled); +int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs); +int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs); #endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h new file mode 100644 index 000000000000..110e2c6b7e51 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */ + +#ifndef __MLX5_EN_DIM_H__ +#define __MLX5_EN_DIM_H__ + +#include <linux/dim.h> +#include <linux/types.h> +#include <linux/mlx5/mlx5_ifc.h> + +/* Forward declarations */ +struct mlx5e_rq; +struct mlx5e_txqsq; +struct work_struct; + +/* convert a boolean value for cqe mode to appropriate dim constant + * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE + * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE + */ +static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe) +{ + return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static inline enum mlx5_cq_period_mode +mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode) +{ + switch (cq_period_mode) { + case DIM_CQ_PERIOD_MODE_START_FROM_EQE: + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + case DIM_CQ_PERIOD_MODE_START_FROM_CQE: + return MLX5_CQ_PERIOD_MODE_START_FROM_CQE; + default: + WARN_ON_ONCE(true); + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + } +} + +void mlx5e_rx_dim_work(struct work_struct *work); +void mlx5e_tx_dim_work(struct work_struct *work); +int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled); +int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled); + +#endif /* __MLX5_EN_DIM_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index a3f31d9d527e..ec819dfc98be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -6,6 +6,7 @@ #include "en/port.h" #include "en_accel/en_accel.h" #include "en_accel/ipsec.h" +#include <linux/dim.h> #include <net/page_pool/types.h> #include <net/xdp_sock_drv.h> @@ -513,77 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param return 0; } -static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder = {}; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder = {}; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) -{ - return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? - DIM_CQ_PERIOD_MODE_START_FROM_CQE : - DIM_CQ_PERIOD_MODE_START_FROM_EQE; -} - -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->tx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); - } else { - params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); - } -} - -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->rx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); - } else { - params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); - } -} - -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_tx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, - params->tx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); -} - -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_rx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, - params->rx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); -} - bool slow_pci_heuristic(struct mlx5_core_dev *mdev) { u32 link_speed = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 9a781f18b57f..749b2ec0436e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, /* Parameter calculations */ -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); - bool slow_pci_heuristic(struct mlx5_core_dev *mdev); int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params); int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index dbe2b19a9570..b4efc780e297 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -292,10 +292,15 @@ enum mlx5e_fec_supported_link_mode { MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X, MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X, MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X, + MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X, + MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X, + MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X, + MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X, MLX5E_MAX_FEC_SUPPORTED_LINK_MODE, }; #define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X +#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X #define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \ do { \ @@ -308,6 +313,17 @@ enum mlx5e_fec_supported_link_mode { *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ } while (0) +/* Returns true if FEC can be set for a given link mode. */ +static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev, + enum mlx5e_fec_supported_link_mode link_mode) +{ + return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE || + (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) || + (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)); +} + /* get/set FEC admin field for a given speed */ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, enum mlx5e_fec_supported_link_mode link_mode) @@ -340,6 +356,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x); + break; default: return -EINVAL; } @@ -381,6 +409,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap, case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x); + break; default: return -EINVAL; } @@ -389,7 +429,6 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap, bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy) { - bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm); u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(pplm_reg); @@ -407,7 +446,7 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy) for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) { u16 fec_caps; - if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane) + if (!mlx5e_is_fec_supported_link_mode(dev, i)) break; mlx5e_get_fec_cap_field(out, &fec_caps, i); @@ -420,7 +459,6 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy) int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, u16 *fec_configured_mode) { - bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm); u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; int sz = MLX5_ST_SZ_BYTES(pplm_reg); @@ -445,7 +483,7 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, *fec_configured_mode = 0; for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) { - if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane) + if (!mlx5e_is_fec_supported_link_mode(dev, i)) break; mlx5e_fec_admin_field(out, fec_configured_mode, 0, i); @@ -489,13 +527,13 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy) u16 conf_fec = fec_policy; u16 fec_caps = 0; - if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane) + if (!mlx5e_is_fec_supported_link_mode(dev, i)) break; /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514 * to link modes up to 25G per lane and to * MLX5E_FEC_RS_544_514 in the new link modes based on - * 50 G per lane + * 50G or 100G per lane */ if (conf_fec == (1 << MLX5E_FEC_RS_528_514) && i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index d0af7271da34..afd654583b6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -169,6 +169,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq, WARN_ON_ONCE(!pos->inuse); pos->inuse = false; list_del(&pos->entry); + ptpsq->cq_stats->lost_cqe++; } spin_unlock_bh(&cqe_list->tracker_list_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 92065568bb19..6873c1201803 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -117,7 +117,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a, bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b, - __be16 tun_flags); + u32 tun_type); #endif /* CONFIG_MLX5_ESWITCH */ #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index f1d1e1542e81..878cbdbf5ec8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -587,7 +587,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a, bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b, - __be16 tun_flags) + u32 tun_type) { struct ip_tunnel_info *a_info; struct ip_tunnel_info *b_info; @@ -596,8 +596,8 @@ bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a, if (!mlx5e_tc_tun_encap_info_equal_generic(a, b)) return false; - a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags); - b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags); + a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags); + b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags); /* keys are equal when both don't have any options attached */ if (!a_has_opts && !b_has_opts) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index 2bcd10b6d653..bf969212cc77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -106,12 +106,13 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[], memset(geneveh, 0, sizeof(*geneveh)); geneveh->ver = MLX5E_GENEVE_VER; geneveh->opt_len = tun_info->options_len / 4; - geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM); - geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT); + geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags); + geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, + tun_info->key.tun_flags); mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni); geneveh->proto_type = htons(ETH_P_TEB); - if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) { + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) { if (!geneveh->opt_len) return -EOPNOTSUPP; ip_tunnel_info_opts_get(geneveh->options, tun_info); @@ -188,7 +189,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv, /* make sure that we're talking about GENEVE options */ - if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) { + if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) { NL_SET_ERR_MSG_MOD(extack, "Matching on GENEVE options: option type is not GENEVE"); netdev_warn(priv->netdev, @@ -337,7 +338,8 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv, static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b) { - return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT); + return mlx5e_tc_tun_encap_info_equal_options(a, b, + IP_TUNNEL_GENEVE_OPT_BIT); } struct mlx5e_tc_tunnel geneve_tunnel = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c index ada14f0574dc..579eda89fc76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c @@ -31,12 +31,16 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[], const struct ip_tunnel_key *tun_key = &e->tun_info->key; struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf); __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); + IP_TUNNEL_DECLARE_FLAGS(unsupp) = { }; int hdr_len; *ip_proto = IPPROTO_GRE; /* the HW does not calculate GRE csum or sequences */ - if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ)) + __set_bit(IP_TUNNEL_CSUM_BIT, unsupp); + __set_bit(IP_TUNNEL_SEQ_BIT, unsupp); + + if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp)) return -EOPNOTSUPP; greh->protocol = htons(ETH_P_TEB); @@ -44,7 +48,7 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[], /* GRE key */ hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e); greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags); - if (tun_key->tun_flags & TUNNEL_KEY) { + if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); *ptr = tun_id; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index a184d739d5f8..e4e487c8431b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -90,7 +90,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[], const struct vxlan_metadata *md; struct vxlanhdr *vxh; - if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) && + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) && e->tun_info->options_len != sizeof(*md)) return -EOPNOTSUPP; vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); @@ -99,7 +99,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[], udp->dest = tun_key->tp_dst; vxh->vx_flags = VXLAN_HF_VNI; vxh->vx_vni = vxlan_vni_field(tun_id); - if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) { + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) { md = ip_tunnel_info_opts(e->tun_info); vxlan_build_gbp_hdr(vxh, md); } @@ -125,7 +125,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) { + if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) { NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP"); return -EOPNOTSUPP; } @@ -208,7 +208,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a, struct mlx5e_encap_key *b) { - return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT); + return mlx5e_tc_tun_encap_info_equal_options(a, b, + IP_TUNNEL_VXLAN_OPT_BIT); } static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 82b5ca1be4f3..4610621a340e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -565,7 +565,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, linear = !!(dma_len - inline_hdr_sz); ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz; - /* check_result must be 0 if sinfo is passed. */ + /* check_result must be 0 if xdptxd->has_frags is true. */ if (!check_result) { int stop_room = 1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 06592b9f0424..9240cfe25d10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5_core_dev *mdev) { - /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ - if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { + /* AF_XDP doesn't support frames larger than PAGE_SIZE, + * and xsk->chunk_size is limited to 65535 bytes. + */ + if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) { mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size, MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE); return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index c7d191f66ad1..4f83e3172767 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag) + u32 flow_tag) { struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs); struct mlx5_flow_destination dest = {}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h index a032bff482a6..7e899c716267 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h @@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs); void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs); struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag); + u32 flow_tag); void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule); #else static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; } static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {} static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag) + u32 flow_tag) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index dd36b04e30a0..92bf3fa44a3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -78,13 +78,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw) unsigned int i; if (!priv->ipsec) - return idx; + return; for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_ipsec_hw_stats_desc[i].format); - - return idx; + ethtool_puts(data, mlx5e_ipsec_hw_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) @@ -92,14 +89,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw) int i; if (!priv->ipsec) - return idx; + return; mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats); for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats, - mlx5e_ipsec_hw_stats_desc, i); - - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats, + mlx5e_ipsec_hw_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) @@ -115,9 +112,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw) if (priv->ipsec) for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_ipsec_sw_stats_desc[i].format); - return idx; + ethtool_puts(data, mlx5e_ipsec_sw_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) @@ -126,9 +121,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) if (priv->ipsec) for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats, - mlx5e_ipsec_sw_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR_ATOMIC64( + &priv->ipsec->sw_stats, + mlx5e_ipsec_sw_stats_desc, i)); } MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index adc6d8ea0960..07a04a142a2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -95,8 +95,8 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv); void mlx5e_ktls_cleanup(struct mlx5e_priv *priv); int mlx5e_ktls_get_count(struct mlx5e_priv *priv); -int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data); -int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data); +void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data); +void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data); #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) @@ -144,15 +144,9 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; } -static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) -{ - return 0; -} +static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { } -static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) -{ - return 0; -} +static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { } #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c index 7c1c0eb16787..60be2d72eb9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c @@ -58,35 +58,31 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv) return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); } -int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { - unsigned int i, n, idx = 0; + unsigned int i, n; if (!priv->tls) - return 0; + return; n = mlx5e_ktls_get_count(priv); for (i = 0; i < n; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_ktls_sw_stats_desc[i].format); - - return n; + ethtool_puts(data, mlx5e_ktls_sw_stats_desc[i].format); } -int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data) +void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { - unsigned int i, n, idx = 0; + unsigned int i, n; if (!priv->tls) - return 0; + return; n = mlx5e_ktls_get_count(priv); for (i = 0; i < n; i++) - data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, - mlx5e_ktls_sw_stats_desc, - i); - - return n; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, + mlx5e_ktls_sw_stats_desc, i)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c index 4559ee16a11a..4bb47d48061d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c @@ -38,16 +38,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw) unsigned int i; if (!priv->macsec) - return idx; + return; if (!mlx5e_is_macsec_device(priv->mdev)) - return idx; + return; for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - mlx5e_macsec_hw_stats_desc[i].format); - - return idx; + ethtool_puts(data, mlx5e_macsec_hw_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) @@ -56,19 +53,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw) int i; if (!priv->macsec) - return idx; + return; if (!mlx5e_is_macsec_device(priv->mdev)) - return idx; + return; macsec_fs = priv->mdev->macsec_fs; mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs)); for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs), - mlx5e_macsec_hw_stats_desc, - i); - - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + mlx5_macsec_fs_get_stats(macsec_fs), + mlx5e_macsec_hw_stats_desc, i)); } MLX5E_DEFINE_STATS_GRP(macsec_hw, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index ca9cfbf57d8f..298bb74ec5e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -30,21 +30,22 @@ * SOFTWARE. */ -#include <linux/dim.h> #include "en.h" +#include "en/dim.h" static void mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder, struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) { - mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); + mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts, + mlx5e_cq_period_mode(moder.cq_period_mode)); dim->state = DIM_START_MEASURE; } void mlx5e_rx_dim_work(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); - struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); + struct mlx5e_rq *rq = dim->priv; struct dim_cq_moder cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); @@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work) void mlx5e_tx_dim_work(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); - struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); + struct mlx5e_txqsq *sq = dim->priv; struct dim_cq_moder cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); } + +static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev, + void (*work_fun)(struct work_struct *), int cpu, + u8 cq_period_mode, struct mlx5_core_cq *mcq, + void *queue) +{ + struct dim *dim; + int err; + + dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu)); + if (!dim) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&dim->work, work_fun); + + dim->mode = cq_period_mode; + dim->priv = queue; + + err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode); + if (err) { + kvfree(dim); + return ERR_PTR(err); + } + + return dim; +} + +static void mlx5e_dim_disable(struct dim *dim) +{ + cancel_work_sync(&dim->work); + kvfree(dim); +} + +int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) +{ + if (enable == !!rq->dim) + return 0; + + if (enable) { + struct mlx5e_channel *c = rq->channel; + struct dim *dim; + + dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu, + c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq); + if (IS_ERR(dim)) + return PTR_ERR(dim); + + rq->dim = dim; + + __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); + } else { + __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); + + mlx5e_dim_disable(rq->dim); + rq->dim = NULL; + } + + return 0; +} + +int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) +{ + if (enable == !!sq->dim) + return 0; + + if (enable) { + struct mlx5e_channel *c = sq->channel; + struct dim *dim; + + dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu, + c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); + if (IS_ERR(dim)) + return PTR_ERR(dim); + + sq->dim = dim; + + __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); + } else { + __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); + + mlx5e_dim_disable(sq->dim); + sq->dim = NULL; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 67a29826bb57..3320f12ba2db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -30,9 +30,12 @@ * SOFTWARE. */ +#include <linux/dim.h> #include <linux/ethtool_netlink.h> #include "en.h" +#include "en/channels.h" +#include "en/dim.h" #include "en/port.h" #include "en/params.h" #include "en/ptp.h" @@ -219,6 +222,13 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_8_800GBASE_CR8_KR8, ext, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT); } static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, @@ -269,8 +279,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_PRIV_FLAGS: for (i = 0; i < MLX5E_NUM_PFLAGS; i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx5e_priv_flags[i].name); + ethtool_puts(&data, mlx5e_priv_flags[i].name); break; case ETH_SS_TEST: @@ -559,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, coal->rx_coalesce_usecs = rx_moder->usec; coal->rx_max_coalesced_frames = rx_moder->pkts; coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode; tx_moder = &priv->channels.params.tx_cq_moderation; coal->tx_coalesce_usecs = tx_moder->usec; coal->tx_max_coalesced_frames = tx_moder->pkts; coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; - - kernel_coal->use_cqe_mode_rx = - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER); - kernel_coal->use_cqe_mode_tx = - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode; return 0; } @@ -583,11 +589,73 @@ static int mlx5e_get_coalesce(struct net_device *netdev, return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); } +static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, + struct ethtool_coalesce *coal) +{ + struct dim_cq_moder cur_moder; + struct mlx5e_channels *chs; + struct mlx5e_channel *c; + + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) + return -EOPNOTSUPP; + + mutex_lock(&priv->state_lock); + + chs = &priv->channels; + if (chs->num <= queue) { + mutex_unlock(&priv->state_lock); + return -EINVAL; + } + + c = chs->c[queue]; + + coal->use_adaptive_rx_coalesce = !!c->rq.dim; + if (coal->use_adaptive_rx_coalesce) { + cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode, + c->rq.dim->profile_ix); + + coal->rx_coalesce_usecs = cur_moder.usec; + coal->rx_max_coalesced_frames = cur_moder.pkts; + } else { + coal->rx_coalesce_usecs = c->rx_cq_moder.usec; + coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts; + } + + coal->use_adaptive_tx_coalesce = !!c->sq[0].dim; + if (coal->use_adaptive_tx_coalesce) { + /* NOTE: Will only display DIM coalesce profile information of + * first channel. The current interface cannot display this + * information for all tc. + */ + cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode, + c->sq[0].dim->profile_ix); + + coal->tx_coalesce_usecs = cur_moder.usec; + coal->tx_max_coalesced_frames = cur_moder.pkts; + + } else { + coal->tx_coalesce_usecs = c->tx_cq_moder.usec; + coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts; + } + + mutex_unlock(&priv->state_lock); + + return 0; +} + +int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal); +} + #define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT static void -mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) { int tc; int i; @@ -595,38 +663,35 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal for (i = 0; i < priv->channels.num; ++i) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5_core_dev *mdev = c->mdev; + enum mlx5_cq_period_mode mode; + + mode = mlx5e_cq_period_mode(moder->cq_period_mode); + c->tx_cq_moder = *moder; for (tc = 0; tc < c->num_tc; tc++) { - mlx5_core_modify_cq_moderation(mdev, - &c->sq[tc].cq.mcq, - coal->tx_coalesce_usecs, - coal->tx_max_coalesced_frames); + mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, + moder->usec, moder->pkts, + mode); } } } static void -mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) { int i; for (i = 0; i < priv->channels.num; ++i) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5_core_dev *mdev = c->mdev; + enum mlx5_cq_period_mode mode; - mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, - coal->rx_coalesce_usecs, - coal->rx_max_coalesced_frames); - } -} + mode = mlx5e_cq_period_mode(moder->cq_period_mode); + c->rx_cq_moder = *moder; -/* convert a boolean value of cq_mode to mlx5 period mode - * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE - * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE - */ -static int cqe_mode_to_period_mode(bool val) -{ - return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts, + mode); + } } int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, @@ -636,13 +701,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, { struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; + bool rx_dim_enabled, tx_dim_enabled; struct mlx5e_params new_params; bool reset_rx, reset_tx; - bool reset = true; u8 cq_period_mode; int err = 0; - if (!MLX5_CAP_GEN(mdev, cq_moderation)) + if (!MLX5_CAP_GEN(mdev, cq_moderation) || + !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) return -EOPNOTSUPP; if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || @@ -665,60 +731,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + mutex_lock(&priv->state_lock); new_params = priv->channels.params; - rx_moder = &new_params.rx_cq_moderation; - rx_moder->usec = coal->rx_coalesce_usecs; - rx_moder->pkts = coal->rx_max_coalesced_frames; - new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx); + reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, + rx_dim_enabled, false); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode); - tx_moder = &new_params.tx_cq_moderation; - tx_moder->usec = coal->tx_coalesce_usecs; - tx_moder->pkts = coal->tx_max_coalesced_frames; - new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx); + reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, + tx_dim_enabled, false); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode); - reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; - reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; + reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled; + reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled; - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx); - if (cq_period_mode != rx_moder->cq_period_mode) { - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); - reset_rx = true; - } + /* Solely used for global ethtool get coalesce */ + rx_moder = &new_params.rx_cq_moderation; + new_params.rx_dim_enabled = rx_dim_enabled; + new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx; - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx); - if (cq_period_mode != tx_moder->cq_period_mode) { - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); - reset_tx = true; - } + tx_moder = &new_params.tx_cq_moderation; + new_params.tx_dim_enabled = tx_dim_enabled; + new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx; if (reset_rx) { - u8 mode = MLX5E_GET_PFLAG(&new_params, - MLX5E_PFLAG_RX_CQE_BASED_MODER); + mlx5e_channels_rx_change_dim(&priv->channels, false); + mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode, + rx_dim_enabled); + + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); + } else if (!rx_dim_enabled) { + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; - mlx5e_reset_rx_moderation(&new_params, mode); + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); } + if (reset_tx) { - u8 mode = MLX5E_GET_PFLAG(&new_params, - MLX5E_PFLAG_TX_CQE_BASED_MODER); + mlx5e_channels_tx_change_dim(&priv->channels, false); + mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode, + tx_dim_enabled); - mlx5e_reset_tx_moderation(&new_params, mode); - } + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); + } else if (!tx_dim_enabled) { + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; - /* If DIM state hasn't changed, it's possible to modify interrupt - * moderation parameters on the fly, even if the channels are open. - */ - if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) { - if (!coal->use_adaptive_rx_coalesce) - mlx5e_set_priv_channels_rx_coalesce(priv, coal); - if (!coal->use_adaptive_tx_coalesce) - mlx5e_set_priv_channels_tx_coalesce(priv, coal); - reset = false; + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); } - err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); + /* DIM enable/disable Rx and Tx channels */ + err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled); + if (err) + goto state_unlock; + err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled); + if (err) + goto state_unlock; + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); +state_unlock: mutex_unlock(&priv->state_lock); return err; } @@ -733,6 +809,88 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); } +static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue, + struct ethtool_coalesce *coal) +{ + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_dim_enabled, tx_dim_enabled; + struct mlx5e_channels *chs; + struct mlx5e_channel *c; + int err = 0; + int tc; + + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -EOPNOTSUPP; + + if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || + coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n", + __func__, MLX5E_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n", + __func__, MLX5E_MAX_COAL_FRAMES); + return -ERANGE; + } + + rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + + mutex_lock(&priv->state_lock); + + chs = &priv->channels; + if (chs->num <= queue) { + mutex_unlock(&priv->state_lock); + return -EINVAL; + } + + c = chs->c[queue]; + + err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled); + if (err) + goto state_unlock; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled); + if (err) + goto state_unlock; + } + + if (!rx_dim_enabled) { + c->rx_cq_moder.usec = coal->rx_coalesce_usecs; + c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames; + + mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, + coal->rx_coalesce_usecs, + coal->rx_max_coalesced_frames); + } + + if (!tx_dim_enabled) { + c->tx_cq_moder.usec = coal->tx_coalesce_usecs; + c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, + coal->tx_coalesce_usecs, + coal->tx_max_coalesced_frames); + } + +state_unlock: + mutex_unlock(&priv->state_lock); + return err; +} + +int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal); +} + static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, unsigned long *supported_modes, u32 eth_proto_cap) @@ -1018,8 +1176,8 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } -int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, - struct ethtool_link_ksettings *link_ksettings) +static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5_core_dev *mdev = priv->mdev; u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; @@ -1189,8 +1347,8 @@ static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_suppo return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported; } -int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, - const struct ethtool_link_ksettings *link_ksettings) +static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_port_eth_proto eproto; @@ -1290,7 +1448,7 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) +static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct mlx5e_priv *priv = netdev_priv(netdev); u32 rss_context = rxfh->rss_context; @@ -1303,8 +1461,8 @@ int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) return err; } -int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(dev); u32 *rss_context = &rxfh->rss_context; @@ -1446,8 +1604,8 @@ static void mlx5e_get_pause_stats(struct net_device *netdev, mlx5e_stats_pause_get(priv, pause_stats); } -void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, - struct ethtool_pauseparam *pauseparam) +static void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -1468,8 +1626,8 @@ static void mlx5e_get_pauseparam(struct net_device *netdev, mlx5e_ethtool_get_pauseparam(priv, pauseparam); } -int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, - struct ethtool_pauseparam *pauseparam) +static int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -1908,7 +2066,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) return -EOPNOTSUPP; - cq_period_mode = cqe_mode_to_period_mode(enable); + cq_period_mode = mlx5e_dim_cq_period_mode(enable); current_cq_period_mode = is_rx_cq ? priv->channels.params.rx_cq_moderation.cq_period_mode : @@ -1918,12 +2076,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, return 0; new_params = priv->channels.params; - if (is_rx_cq) - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); - else - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); + if (is_rx_cq) { + mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, + false, true); + mlx5e_channels_rx_toggle_dim(&priv->channels); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, + cq_period_mode); + } else { + mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, + false, true); + mlx5e_channels_tx_toggle_dim(&priv->channels); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + cq_period_mode); + } - return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + /* Update pflags of existing channels without resetting them */ + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -2124,7 +2292,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) */ err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_num_channels_changed_ctx, NULL, true); + mlx5e_update_tc_and_tx_queues_ctx, NULL, true); if (!err) priv->tx_ptp_opened = true; @@ -2422,6 +2590,14 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev, mlx5e_stats_rmon_get(priv, rmon_stats, ranges); } +static void mlx5e_get_ts_stats(struct net_device *netdev, + struct ethtool_ts_stats *ts_stats) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_stats_ts_get(priv, ts_stats); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -2440,6 +2616,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, + .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce, + .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce, .get_link_ksettings = mlx5e_get_link_ksettings, .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, @@ -2471,5 +2649,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_eth_mac_stats = mlx5e_get_eth_mac_stats, .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats, .get_rmon_stats = mlx5e_get_rmon_stats, + .get_ts_stats = mlx5e_get_ts_stats, .get_link_ext_stats = mlx5e_get_link_ext_stats }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 777d311d44ef..8c5b291a171f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; @@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 319930c04093..b758bc72ac36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/dim.h> #include <net/tc_act/tc_gact.h> #include <linux/mlx5/fs.h> #include <net/vxlan.h> @@ -43,6 +44,7 @@ #include <net/xdp_sock_drv.h> #include "eswitch.h" #include "en.h" +#include "en/dim.h" #include "en/txrx.h" #include "en_tc.h" #include "en_rep.h" @@ -960,17 +962,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, } } - INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); - - switch (params->rx_cq_moderation.cq_period_mode) { - case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; - break; - case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: - default: - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; - } - return 0; err_destroy_page_pool: @@ -1020,6 +1011,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) mlx5e_free_wqe_alloc_info(rq); } + kvfree(rq->dim); xdp_rxq_info_unreg(&rq->xdp_rxq); page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); @@ -1300,8 +1292,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); - if (params->rx_dim_enabled) - __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); + if (rq->channel && !params->rx_dim_enabled) { + rq->channel->rx_cq_moder = params->rx_cq_moderation; + } else if (rq->channel) { + u8 cq_period_mode; + + cq_period_mode = params->rx_moder_use_cqe_mode ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode, + params->rx_dim_enabled); + + err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled); + if (err) + goto err_destroy_rq; + } /* We disable csum_complete when XDP is enabled since * XDP programs might manipulate packets which will render @@ -1347,7 +1352,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { - cancel_work_sync(&rq->dim.work); + if (rq->dim) + cancel_work_sync(&rq->dim->work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1623,9 +1629,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); - sq->dim.mode = params->tx_cq_moderation.cq_period_mode; - return 0; err_sq_wq_destroy: @@ -1636,6 +1639,7 @@ err_sq_wq_destroy: void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) { + kvfree(sq->dim); mlx5e_free_txqsq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); } @@ -1791,11 +1795,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, if (tx_rate) mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); - if (params->tx_dim_enabled) - sq->state |= BIT(MLX5E_SQ_STATE_DIM); + if (sq->channel && !params->tx_dim_enabled) { + sq->channel->tx_cq_moder = params->tx_cq_moderation; + } else if (sq->channel) { + u8 cq_period_mode; + + cq_period_mode = params->tx_moder_use_cqe_mode ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder, + cq_period_mode, + params->tx_dim_enabled); + + err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled); + if (err) + goto err_destroy_sq; + } return 0; +err_destroy_sq: + mlx5e_destroy_sq(c->mdev, sq->sqn); err_free_txqsq: mlx5e_free_txqsq(sq); @@ -1847,7 +1867,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_rate_limit rl = {0}; - cancel_work_sync(&sq->dim.work); + if (sq->dim) + cancel_work_sync(&sq->dim->work); cancel_work_sync(&sq->recover_work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { @@ -1866,6 +1887,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_tx_err_cqe(sq); } +static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) +{ + return (struct dim_cq_moder) { + .cq_period_mode = cq_period_mode, + .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS, + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE : + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC, + }; +} + +bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled) +{ + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; + + if (dim_enabled) + *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode); + else + *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode); + + return reset_needed; +} + +bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state) +{ + bool reset = false; + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + if (keep_dim_state) + dim_enabled = !!chs->c[i]->sq[tc].dim; + + reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder, + cq_period_mode, dim_enabled); + } + } + + return reset; +} + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, work_func_t recover_work_func) @@ -2089,7 +2153,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode)); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -2127,8 +2192,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, if (err) goto err_free_cq; - if (MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); + if (MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify)) + mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts, + mlx5e_cq_period_mode(moder.cq_period_mode)); return 0; err_free_cq: @@ -2143,6 +2210,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } +int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u8 cq_period_mode) +{ + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode)); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD_MODE); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); +} + +int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u16 cq_period, u16 cq_max_count, u8 cq_period_mode) +{ + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period, cq_period); + MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); + MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_create_cq_param *ccp, @@ -2901,7 +3002,28 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) return err; } -static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) +static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, + struct mlx5e_params *params) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int num_comp_vectors, ix, irq; + + num_comp_vectors = mlx5_comp_vectors_max(mdev); + + for (ix = 0; ix < params->num_channels; ix++) { + cpumask_clear(priv->scratchpad.cpumask); + + for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { + int cpu = mlx5_comp_vector_get_cpu(mdev, irq); + + cpumask_set_cpu(cpu, priv->scratchpad.cpumask); + } + + netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); + } +} + +static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv) { struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq; struct net_device *netdev = priv->netdev; @@ -2925,22 +3047,10 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) err = mlx5e_update_tx_netdev_queues(priv); if (err) goto err_tcs; - err = netif_set_real_num_rx_queues(netdev, nch); - if (err) { - netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); - goto err_txqs; - } + mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); return 0; -err_txqs: - /* netif_set_real_num_rx_queues could fail only when nch increased. Only - * one of nch and ntc is changed in this function. That means, the call - * to netif_set_real_num_tx_queues below should not fail, because it - * decreases the number of TX queues. - */ - WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); - err_tcs: WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, old_tc_to_txq)); @@ -2948,42 +3058,32 @@ err_out: return err; } -static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues); - -static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, - struct mlx5e_params *params) -{ - int ix; - - for (ix = 0; ix < params->num_channels; ix++) { - int num_comp_vectors, irq, vec_ix; - struct mlx5_core_dev *mdev; - - mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix); - num_comp_vectors = mlx5_comp_vectors_max(mdev); - cpumask_clear(priv->scratchpad.cpumask); - vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix); - - for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) { - int cpu = mlx5_comp_vector_get_cpu(mdev, irq); - - cpumask_set_cpu(cpu, priv->scratchpad.cpumask); - } - - netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); - } -} +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues); static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) { u16 count = priv->channels.params.num_channels; + struct net_device *netdev = priv->netdev; + int old_num_rxqs; int err; - err = mlx5e_update_netdev_queues(priv); - if (err) + old_num_rxqs = netdev->real_num_rx_queues; + err = netif_set_real_num_rx_queues(netdev, count); + if (err) { + netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n", + __func__, err); return err; - - mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); + } + err = mlx5e_update_tc_and_tx_queues(priv); + if (err) { + /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases. + * Since channel number changed, it increased. That means, the call to + * netif_set_real_num_rx_queues below should not fail, because it + * decreases the number of RX queues. + */ + WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs)); + return err; + } /* This function may be called on attach, before priv->rx_res is created. */ if (priv->rx_res) { @@ -3516,7 +3616,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1); err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_num_channels_changed_ctx, NULL, true); + mlx5e_update_tc_and_tx_queues_ctx, NULL, true); if (!err && priv->mqprio_rl) { mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); @@ -3617,10 +3717,8 @@ static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { - mlx5e_fp_preactivate preactivate; struct mlx5e_params new_params; struct mlx5e_mqprio_rl *rl; - bool nch_changed; int err; err = mlx5e_mqprio_channel_validate(priv, mqprio); @@ -3634,10 +3732,8 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, new_params = priv->channels.params; mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl); - nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; - preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : - mlx5e_update_netdev_queues_ctx; - err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_update_tc_and_tx_queues_ctx, NULL, true); if (err) { if (rl) { mlx5e_mqprio_rl_cleanup(rl); @@ -3960,6 +4056,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable) return mlx5_set_port_fcs(mdev, !enable); } +static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) +{ + return (struct dim_cq_moder) { + .cq_period_mode = cq_period_mode, + .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS, + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC, + }; +} + +bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled) +{ + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; + + if (dim_enabled) + *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode); + else + *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode); + + return reset_needed; +} + +bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state) +{ + bool reset = false; + int i; + + for (i = 0; i < chs->num; i++) { + if (keep_dim_state) + dim_enabled = !!chs->c[i]->rq.dim; + + reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder, + cq_period_mode, dim_enabled); + } + + return reset; +} + static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) { u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; @@ -4383,7 +4520,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset); out: - netdev->mtu = params->sw_mtu; + WRITE_ONCE(netdev->mtu, params->sw_mtu); mutex_unlock(&priv->state_lock); return err; } @@ -4950,10 +5087,7 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { mode = nla_get_u16(attr); if (mode > BRIDGE_MODE_VEPA) return -EINVAL; @@ -5027,7 +5161,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 { struct mlx5e_params *params = &priv->channels.params; struct mlx5_core_dev *mdev = priv->mdev; - u8 rx_cq_period_mode; params->sw_mtu = mtu; params->hard_mtu = MLX5E_ETH_HARD_MTU; @@ -5061,13 +5194,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ - rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); - mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify); + params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify); + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); + params->tx_moder_use_cqe_mode = false; + mlx5e_reset_rx_moderation(¶ms->rx_cq_moderation, params->rx_moder_use_cqe_mode, + params->rx_dim_enabled); + mlx5e_reset_tx_moderation(¶ms->tx_cq_moderation, params->tx_moder_use_cqe_mode, + params->tx_dim_enabled); /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); @@ -5565,7 +5701,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) mlx5e_ipsec_cleanup(priv); } -int mlx5e_update_nic_rx(struct mlx5e_priv *priv) +static int mlx5e_update_nic_rx(struct mlx5e_priv *priv) { return mlx5e_refresh_tirs(priv, false, false); } @@ -6058,7 +6194,7 @@ static int mlx5e_resume(struct auxiliary_device *adev) return 0; } -static int _mlx5e_suspend(struct auxiliary_device *adev) +static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg) { struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev); struct mlx5e_priv *priv = mlx5e_dev->priv; @@ -6067,7 +6203,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev) struct mlx5_core_dev *pos; int i; - if (!netif_device_present(netdev)) { + if (!pre_netdev_reg && !netif_device_present(netdev)) { if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) mlx5_sd_for_each_dev(i, mdev, pos) mlx5e_destroy_mdev_resources(pos); @@ -6090,7 +6226,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx); if (actual_adev) - err = _mlx5e_suspend(actual_adev); + err = _mlx5e_suspend(actual_adev, false); mlx5_sd_cleanup(mdev); return err; @@ -6157,7 +6293,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev) return 0; err_resume: - _mlx5e_suspend(adev); + _mlx5e_suspend(adev, true); err_profile_cleanup: profile->cleanup(priv); err_destroy_netdev: @@ -6197,7 +6333,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev) mlx5_core_uplink_netdev_set(mdev, NULL); mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - _mlx5e_suspend(adev); + _mlx5e_suspend(adev, false); priv->profile->cleanup(priv); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 05527418fa64..8790d57dc6db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/dim.h> #include <linux/debugfs.h> #include <linux/mlx5/fs.h> #include <net/switchdev.h> @@ -40,6 +41,7 @@ #include "eswitch.h" #include "en.h" +#include "en/dim.h" #include "en_rep.h" #include "en/params.h" #include "en/txrx.h" @@ -135,9 +137,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep) int i; for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - sw_rep_stats_desc[i].format); - return idx; + ethtool_puts(data, sw_rep_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) @@ -145,9 +145,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep) int i; for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, - sw_rep_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_rep_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep) @@ -176,11 +176,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep) int i; for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format); + ethtool_puts(data, vport_rep_stats_desc[i].format); for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_rep_loopback_stats_desc[i].format); - return idx; + ethtool_puts(data, vport_rep_loopback_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) @@ -188,12 +186,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) int i; for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, - vport_rep_stats_desc, i); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, + vport_rep_stats_desc, i)); for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, - vport_rep_loopback_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats, + vport_rep_loopback_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) @@ -275,8 +275,42 @@ out: kvfree(out); } +static int mlx5e_rep_query_aggr_q_counter(struct mlx5_core_dev *dev, int vport, void *out) +{ + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, other_vport, 1); + MLX5_SET(query_q_counter_in, in, vport_number, vport); + MLX5_SET(query_q_counter_in, in, aggregate, 1); + + return mlx5_cmd_exec_inout(dev, query_q_counter, in, out); +} + +static void mlx5e_rep_update_vport_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + int err; + + if (!MLX5_CAP_GEN(priv->mdev, q_counter_other_vport) || + !MLX5_CAP_GEN(priv->mdev, q_counter_aggregation)) + return; + + err = mlx5e_rep_query_aggr_q_counter(priv->mdev, rep->vport, out); + if (err) { + netdev_warn(priv->netdev, "failed reading stats on vport %d, error %d\n", + rep->vport, err); + return; + } + + rep_stats->rx_vport_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer); +} + static void mlx5e_rep_get_strings(struct net_device *dev, - u32 stringset, uint8_t *data) + u32 stringset, u8 *data) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -394,6 +428,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .set_channels = mlx5e_rep_set_channels, .get_coalesce = mlx5e_rep_get_coalesce, .set_coalesce = mlx5e_rep_set_coalesce, + .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce, + .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce, .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, }; @@ -804,10 +840,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev) struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params *params; - u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params = &priv->channels.params; params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; @@ -835,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, cq_period_mode); + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); params->mqprio.num_tc = 1; if (rep->vport != MLX5_VPORT_UPLINK) @@ -1231,6 +1263,12 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv) return 0; } +static void mlx5e_rep_stats_update_ndo_stats(struct mlx5e_priv *priv) +{ + mlx5e_stats_update_ndo_stats(priv); + mlx5e_rep_update_vport_q_counter(priv); +} + static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -1423,7 +1461,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .enable = mlx5e_rep_enable, .disable = mlx5e_rep_disable, .update_rx = mlx5e_update_rep_rx, - .update_stats = mlx5e_stats_update_ndo_stats, + .update_stats = mlx5e_rep_stats_update_ndo_stats, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = 1, .stats_grps = mlx5e_rep_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 08a75654f5f1..5bf8318cc48b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -359,7 +359,7 @@ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data) if (st.cond_func && st.cond_func(priv)) continue; if (data) - strcpy(data + count * ETH_GSTRING_LEN, st.name); + ethtool_puts(&data, st.name); count++; } return count; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index f3d0898bdbc6..e211c41cec06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -41,6 +41,11 @@ #include <net/page_pool/helpers.h> #endif +void mlx5e_ethtool_put_stat(u64 **data, u64 val) +{ + *(*data)++ = val; +} + static unsigned int stats_grps_num(struct mlx5e_priv *priv) { return !priv->profile->stats_grps_num ? 0 : @@ -90,17 +95,17 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) int i; for (i = 0; i < num_stats_grps; i++) - idx = stats_grps[i]->fill_stats(priv, data, idx); + stats_grps[i]->fill_stats(priv, &data); } void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) { mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; const unsigned int num_stats_grps = stats_grps_num(priv); - int i, idx = 0; + int i; for (i = 0; i < num_stats_grps; i++) - idx = stats_grps[i]->fill_strings(priv, data, idx); + stats_grps[i]->fill_strings(priv, &data); } /* Concrete NIC Stats */ @@ -257,8 +262,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) int i; for (i = 0; i < NUM_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); - return idx; + ethtool_puts(data, sw_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) @@ -266,8 +270,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) int i; for (i = 0; i < NUM_SW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat(data, + MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_stats_desc, i)); } static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, @@ -591,14 +596,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) int i; for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - q_stats_desc[i].format); + ethtool_puts(data, q_stats_desc[i].format); for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - drop_rq_stats_desc[i].format); - - return idx; + ethtool_puts(data, drop_rq_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) @@ -606,12 +607,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) int i; for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++) - data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, - q_stats_desc, i); + mlx5e_ethtool_put_stat(data, + MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + q_stats_desc, i)); for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++) - data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, - drop_rq_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + drop_rq_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) @@ -685,18 +687,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) int i; for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_steer_desc[i].format); + ethtool_puts(data, vnic_env_stats_steer_desc[i].format); for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_dev_oob_desc[i].format); + ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format); for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_drop_desc[i].format); - - return idx; + ethtool_puts(data, vnic_env_stats_drop_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) @@ -704,18 +701,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) int i; for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) - data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_steer_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_steer_desc, i)); for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) - data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_dev_oob_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_dev_oob_desc, i)); for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++) - data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_drop_desc, i); - - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_drop_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) @@ -798,13 +799,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) int i; for (i = 0; i < NUM_VPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format); + ethtool_puts(data, vport_stats_desc[i].format); for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_loopback_stats_desc[i].format); - - return idx; + ethtool_puts(data, vport_loopback_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) @@ -812,14 +810,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) int i; for (i = 0; i < NUM_VPORT_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, - vport_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, + vport_stats_desc, i)); for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++) - data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, - vport_loopback_stats_desc, i); - - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, + vport_loopback_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) @@ -868,8 +868,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) int i; for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format); - return idx; + ethtool_puts(data, pport_802_3_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) @@ -877,9 +876,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) int i; for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, - pport_802_3_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_BE( + &priv->stats.pport.IEEE_802_3_counters, + pport_802_3_stats_desc, i)); } #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ @@ -1029,8 +1029,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) int i; for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); - return idx; + ethtool_puts(data, pport_2863_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) @@ -1038,9 +1037,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) int i; for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, - pport_2863_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_BE( + &priv->stats.pport.RFC_2863_counters, + pport_2863_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) @@ -1088,8 +1088,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) int i; for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); - return idx; + ethtool_puts(data, pport_2819_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) @@ -1097,9 +1096,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) int i; for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, - pport_2819_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_BE( + &priv->stats.pport.RFC_2819_counters, + pport_2819_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) @@ -1172,6 +1172,51 @@ void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, *ranges = mlx5e_rmon_ranges; } +void mlx5e_stats_ts_get(struct mlx5e_priv *priv, + struct ethtool_ts_stats *ts_stats) +{ + int i, j; + + mutex_lock(&priv->state_lock); + + if (priv->tx_ptp_opened) { + struct mlx5e_ptp *ptp = priv->channels.ptp; + + ts_stats->pkts = 0; + ts_stats->err = 0; + ts_stats->lost = 0; + + /* Aggregate stats across all TCs */ + for (i = 0; i < ptp->num_tc; i++) { + struct mlx5e_ptp_cq_stats *stats = + ptp->ptpsq[i].cq_stats; + + ts_stats->pkts += stats->cqe; + ts_stats->err += stats->abort + stats->err_cqe + + stats->late_cqe; + ts_stats->lost += stats->lost_cqe; + } + } else { + /* DMA layer will always successfully timestamp packets. Other + * counters do not make sense for this layer. + */ + ts_stats->pkts = 0; + + /* Aggregate stats across all SQs */ + for (j = 0; j < priv->channels.num; j++) { + struct mlx5e_channel *c = priv->channels.c[j]; + + for (i = 0; i < c->num_tc; i++) { + struct mlx5e_sq_stats *stats = c->sq[i].stats; + + ts_stats->pkts += stats->timestamps; + } + } + } + + mutex_unlock(&priv->state_lock); +} + #define PPORT_PHY_STATISTICAL_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -1215,21 +1260,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) struct mlx5_core_dev *mdev = priv->mdev; int i; - strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); + ethtool_puts(data, "link_down_events_phy"); if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return idx; + return; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_stats_desc[i].format); + ethtool_puts(data, pport_phy_statistical_stats_desc[i].format); if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_err_lanes_stats_desc[i].format); - - return idx; + ethtool_puts(data, + pport_phy_statistical_err_lanes_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) @@ -1238,24 +1280,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) int i; /* link_down_events_phy has special handling since it is not stored in __be64 format */ - data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events); + mlx5e_ethtool_put_stat( + data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events)); if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return idx; + return; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i)); if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_err_lanes_stats_desc, - i); - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport + .phy_statistical_counters, + pport_phy_statistical_err_lanes_stats_desc, + i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) @@ -1436,9 +1483,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_eth_ext_stats_desc[i].format); - return idx; + ethtool_puts(data, pport_eth_ext_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) @@ -1447,10 +1492,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, - pport_eth_ext_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport.eth_ext_counters, + pport_eth_ext_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) @@ -1516,19 +1562,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stats_desc[i].format); + ethtool_puts(data, pcie_perf_stats_desc[i].format); if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stats_desc64[i].format); + ethtool_puts(data, pcie_perf_stats_desc64[i].format); if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stall_stats_desc[i].format); - return idx; + ethtool_puts(data, + pcie_perf_stall_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) @@ -1537,22 +1580,27 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE( + &priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i)); if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stats_desc64, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc64, i)); if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stall_stats_desc, i); - return idx; + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE( + &priv->stats.pcie.pcie_perf_counters, + pcie_perf_stall_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) @@ -1609,18 +1657,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) int i, prio; if (!MLX5_CAP_GEN(mdev, sbcam_reg)) - return idx; + return; for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_tc_prio_stats_desc[i].format, prio); + ethtool_sprintf(data, + pport_per_tc_prio_stats_desc[i].format, + prio); for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_tc_congest_prio_stats_desc[i].format, prio); + ethtool_sprintf(data, + pport_per_tc_congest_prio_stats_desc[i].format, + prio); } - - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) @@ -1630,20 +1678,24 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) int i, prio; if (!MLX5_CAP_GEN(mdev, sbcam_reg)) - return idx; + return; for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], - pport_per_tc_prio_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &pport->per_tc_prio_counters[prio], + pport_per_tc_prio_stats_desc, i)); for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], - pport_per_tc_congest_prio_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &pport->per_tc_congest_prio_counters + [prio], + pport_per_tc_congest_prio_stats_desc, + i)); } - - return idx; } static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) @@ -1728,35 +1780,33 @@ static int mlx5e_grp_per_prio_traffic_get_num_stats(void) return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; } -static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, - u8 *data, - int idx) +static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, + u8 **data) { int i, prio; for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_traffic_stats_desc[i].format, prio); + ethtool_sprintf(data, + pport_per_prio_traffic_stats_desc[i].format, + prio); } - - return idx; } -static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, - u64 *data, - int idx) +static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, + u64 **data) { int i, prio; for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], - pport_per_prio_traffic_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport + .per_prio_counters[prio], + pport_per_prio_traffic_stats_desc, i)); } - - return idx; } static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { @@ -1816,9 +1866,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) NUM_PPORT_PFC_STALL_COUNTERS(priv); } -static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, - u8 *data, - int idx) +static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, + u8 **data) { unsigned long pfc_combined; int i, prio; @@ -1829,28 +1878,26 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, char pfc_string[ETH_GSTRING_LEN]; snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, pfc_string); + ethtool_sprintf(data, + pport_per_prio_pfc_stats_desc[i].format, + pfc_string); } } if (mlx5e_query_global_pause_combined(priv)) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, "global"); + ethtool_sprintf(data, + pport_per_prio_pfc_stats_desc[i].format, + "global"); } } for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_pfc_stall_stats_desc[i].format); - - return idx; + ethtool_puts(data, pport_pfc_stall_stats_desc[i].format); } -static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, - u64 *data, - int idx) +static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, + u64 **data) { unsigned long pfc_combined; int i, prio; @@ -1858,25 +1905,30 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], - pport_per_prio_pfc_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport + .per_prio_counters[prio], + pport_per_prio_pfc_stats_desc, i)); } } if (mlx5e_query_global_pause_combined(priv)) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], - pport_per_prio_pfc_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport.per_prio_counters[0], + pport_per_prio_pfc_stats_desc, i)); } } for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], - pport_pfc_stall_stats_desc, i); - - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_BE( + &priv->stats.pport.per_prio_counters[0], + pport_pfc_stall_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) @@ -1887,16 +1939,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) { - idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); - idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); - return idx; + mlx5e_grp_per_prio_traffic_fill_strings(priv, data); + mlx5e_grp_per_prio_pfc_fill_strings(priv, data); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) { - idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); - idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); - return idx; + mlx5e_grp_per_prio_traffic_fill_stats(priv, data); + mlx5e_grp_per_prio_pfc_fill_stats(priv, data); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) @@ -1944,12 +1994,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) int i; for (i = 0; i < NUM_PME_STATUS_STATS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); + ethtool_puts(data, mlx5e_pme_status_desc[i].format); for (i = 0; i < NUM_PME_ERR_STATS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); - - return idx; + ethtool_puts(data, mlx5e_pme_error_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) @@ -1960,14 +2008,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) mlx5_get_pme_stats(priv->mdev, &pme_stats); for (i = 0; i < NUM_PME_STATUS_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, - mlx5e_pme_status_desc, i); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters, + mlx5e_pme_status_desc, i)); for (i = 0; i < NUM_PME_ERR_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, - mlx5e_pme_error_desc, i); - - return idx; + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters, + mlx5e_pme_error_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } @@ -1979,12 +2027,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) { - return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN); + mlx5e_ktls_get_strings(priv, data); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) { - return idx + mlx5e_ktls_get_stats(priv, data + idx); + mlx5e_ktls_get_stats(priv, data); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } @@ -2063,6 +2111,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, #ifdef CONFIG_MLX5_EN_TLS @@ -2175,6 +2224,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = { { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) }, }; static const struct counter_desc ptp_rq_stats_desc[] = { @@ -2214,6 +2264,7 @@ static const struct counter_desc qos_sq_stats_desc[] = { { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, #ifdef CONFIG_MLX5_EN_TLS @@ -2264,10 +2315,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) for (qid = 0; qid < max_qos_sqs; qid++) for (i = 0; i < NUM_QOS_SQ_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - qos_sq_stats_desc[i].format, qid); - - return idx; + ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) @@ -2284,10 +2332,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); for (i = 0; i < NUM_QOS_SQ_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i)); } - - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; } @@ -2312,29 +2360,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) int i, tc; if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) - return idx; + return; for (i = 0; i < NUM_PTP_CH_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - "%s", ptp_ch_stats_desc[i].format); + ethtool_puts(data, ptp_ch_stats_desc[i].format); if (priv->tx_ptp_opened) { for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < NUM_PTP_SQ_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_sq_stats_desc[i].format, tc); + ethtool_sprintf(data, + ptp_sq_stats_desc[i].format, + tc); for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < NUM_PTP_CQ_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_cq_stats_desc[i].format, tc); + ethtool_sprintf(data, + ptp_cq_stats_desc[i].format, + tc); } if (priv->rx_ptp_opened) { for (i = 0; i < NUM_PTP_RQ_STATS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX); + ethtool_sprintf(data, ptp_rq_stats_desc[i].format, + MLX5E_PTP_CHANNEL_IX); } - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) @@ -2342,33 +2390,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) int i, tc; if (!priv->tx_ptp_opened && !priv->rx_ptp_opened) - return idx; + return; for (i = 0; i < NUM_PTP_CH_STATS; i++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, - ptp_ch_stats_desc, i); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch, + ptp_ch_stats_desc, i)); if (priv->tx_ptp_opened) { for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < NUM_PTP_SQ_STATS; i++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc], - ptp_sq_stats_desc, i); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->ptp_stats.sq[tc], + ptp_sq_stats_desc, i)); for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < NUM_PTP_CQ_STATS; i++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc], - ptp_cq_stats_desc, i); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->ptp_stats.cq[tc], + ptp_cq_stats_desc, i)); } if (priv->rx_ptp_opened) { for (i = 0; i < NUM_PTP_RQ_STATS; i++) - data[idx++] = + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq, - ptp_rq_stats_desc, i); + ptp_rq_stats_desc, i)); } - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } @@ -2394,38 +2444,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - ch_stats_desc[j].format, i); + ethtool_sprintf(data, ch_stats_desc[j].format, i); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - rq_stats_desc[j].format, i); + ethtool_sprintf(data, rq_stats_desc[j].format, i); for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - xskrq_stats_desc[j].format, i); + ethtool_sprintf(data, xskrq_stats_desc[j].format, i); for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - rq_xdpsq_stats_desc[j].format, i); + ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i); } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - sq_stats_desc[j].format, - i + tc * max_nch); + ethtool_sprintf(data, sq_stats_desc[j].format, + i + tc * max_nch); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - xsksq_stats_desc[j].format, i); + ethtool_sprintf(data, xsksq_stats_desc[j].format, i); for (j = 0; j < NUM_XDPSQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - xdpsq_stats_desc[j].format, i); + ethtool_sprintf(data, xdpsq_stats_desc[j].format, i); } - - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) @@ -2436,44 +2477,50 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch, - ch_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->ch, + ch_stats_desc, j)); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_RQ_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq, - rq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->rq, + rq_stats_desc, j)); for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq, - xskrq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->xskrq, + xskrq_stats_desc, j)); for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq, - rq_xdpsq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->rq_xdpsq, + rq_xdpsq_stats_desc, j)); } for (tc = 0; tc < priv->max_opened_tc; tc++) for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc], - sq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->sq[tc], + sq_stats_desc, j)); for (i = 0; i < max_nch; i++) { for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq, - xsksq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->xsksq, + xsksq_stats_desc, j)); for (j = 0; j < NUM_XDPSQ_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq, - xdpsq_stats_desc, j); + mlx5e_ethtool_put_stat( + data, MLX5E_READ_CTR64_CPU( + &priv->channel_stats[i]->xdpsq, + xdpsq_stats_desc, j)); } - - return idx; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 12b3607afecd..650732288616 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -71,11 +71,13 @@ struct mlx5e_priv; struct mlx5e_stats_grp { u16 update_stats_mask; int (*get_num_stats)(struct mlx5e_priv *priv); - int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); - int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); + void (*fill_strings)(struct mlx5e_priv *priv, u8 **data); + void (*fill_stats)(struct mlx5e_priv *priv, u64 **data); void (*update_stats)(struct mlx5e_priv *priv); }; +void mlx5e_ethtool_put_stat(u64 **data, u64 val); + typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t; #define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name @@ -87,10 +89,10 @@ typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t; void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv) #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \ - int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx) + void MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 **data) #define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \ - int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx) + void MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 **data) #define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp @@ -126,6 +128,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, struct ethtool_rmon_stats *rmon, const struct ethtool_rmon_hist_range **ranges); +void mlx5e_stats_ts_get(struct mlx5e_priv *priv, + struct ethtool_ts_stats *ts_stats); void mlx5e_get_link_ext_stats(struct net_device *dev, struct ethtool_link_ext_stats *stats); @@ -429,6 +433,7 @@ struct mlx5e_sq_stats { u64 stopped; u64 dropped; u64 recover; + u64 timestamps; /* dirtied @completion */ u64 cqes ____cacheline_aligned_in_smp; u64 wake; @@ -461,6 +466,7 @@ struct mlx5e_ptp_cq_stats { u64 abort; u64 abort_abs_diff_ns; u64 late_cqe; + u64 lost_cqe; }; struct mlx5e_rep_stats { @@ -478,6 +484,7 @@ struct mlx5e_rep_stats { u64 tx_vport_rdma_multicast_bytes; u64 vport_loopback_packets; u64 vport_loopback_bytes; + u64 rx_vport_out_of_buffer; }; struct mlx5e_stats { @@ -498,6 +505,7 @@ static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport vf_vport->tx_packets = rep_stats->vport_tx_packets; vf_vport->rx_bytes = rep_stats->vport_rx_bytes; vf_vport->tx_bytes = rep_stats->vport_tx_bytes; + vf_vport->rx_missed_errors = rep_stats->rx_vport_out_of_buffer; } extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 31ed26cac9bf..30673292e15f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; for (tt = 0; tt < MLX5_NUM_TT; tt++) { ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; ttc_params->dests[tt].tir_num = @@ -2802,12 +2801,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; - /* the HW doesn't support frag first/later */ - if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { - NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported"); - return -EOPNOTSUPP; - } - if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, @@ -2820,6 +2813,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, else *match_level = MLX5_MATCH_L3; } + + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, + match.mask->flags, extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { @@ -5464,6 +5461,7 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct tunnel_match_enc_opts enc_opts = {}; struct mlx5_rep_uplink_priv *uplink_priv; + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; struct mlx5e_rep_priv *uplink_rpriv; struct metadata_dst *tun_dst; struct tunnel_match_key key; @@ -5471,6 +5469,8 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb struct net_device *dev; int err; + __set_bit(IP_TUNNEL_KEY_BIT, flags); + enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK; tun_id = tunnel_id >> ENC_OPTS_BITS; @@ -5503,14 +5503,14 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb case FLOW_DISSECTOR_KEY_IPV4_ADDRS: tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, key.enc_ip.tos, key.enc_ip.ttl, - key.enc_tp.dst, TUNNEL_KEY, + key.enc_tp.dst, flags, key32_to_tunnel_id(key.enc_key_id.keyid), enc_opts.key.len); break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, key.enc_ip.tos, key.enc_ip.ttl, - key.enc_tp.dst, 0, TUNNEL_KEY, + key.enc_tp.dst, 0, flags, key32_to_tunnel_id(key.enc_key_id.keyid), enc_opts.key.len); break; @@ -5528,11 +5528,16 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; - if (enc_opts.key.len) + if (enc_opts.key.len) { + ip_tunnel_flags_zero(flags); + if (enc_opts.key.dst_opt_type) + __set_bit(enc_opts.key.dst_opt_type, flags); + ip_tunnel_info_opts_set(&tun_dst->u.tun_info, enc_opts.key.data, enc_opts.key.len, - enc_opts.key.dst_opt_type); + flags); + } skb_dst_set(skb, (struct dst_entry *)tun_dst); dev = dev_get_by_index(&init_net, key.filter_ifindex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index e21a3b4128ce..099bf1078889 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -749,11 +749,13 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, u64 ts = get_cqe_ts(cqe); hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); - if (sq->ptpsq) + if (sq->ptpsq) { mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, hwts.hwtstamp, sq->ptpsq->cq_stats); - else + } else { skb_tstamp_tx(skb, &hwts); + sq->stats->timestamps++; + } } napi_consume_skb(skb, napi_budget); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a7d9b7cb4297..5873fde65c2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) return; dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); - net_dim(&sq->dim, dim_sample); + net_dim(sq->dim, dim_sample); } static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) @@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) return; dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); - net_dim(&rq->dim, dim_sample); + net_dim(rq->dim, dim_sample); } void mlx5e_trigger_irq(struct mlx5e_icosq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 40a6cb052a2d..5693986ae656 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -688,6 +688,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev) if (err) goto err2; + /* Skip page eq creation when the device does not request for page requests */ + if (MLX5_CAP_GEN(dev, page_request_disable)) { + mlx5_core_dbg(dev, "Skip page EQ creation\n"); + return 0; + } + param = (struct mlx5_eq_param) { .irq = table->ctrl_irq, .nent = /* TODO: sriov max_vf + */ 1, @@ -716,7 +722,8 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; - cleanup_async_eq(dev, &table->pages_eq, "pages"); + if (!MLX5_CAP_GEN(dev, page_request_disable)) + cleanup_async_eq(dev, &table->pages_eq, "pages"); cleanup_async_eq(dev, &table->async_eq, "async"); mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ); mlx5_cmd_use_polling(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 1b9bc32efd6f..c5ea1d1d2b03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n", addr, vid, vport_num); NL_SET_ERR_MSG_FMT_MOD(extack, - "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n", + "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n", addr, vid, vport_num); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d8e739cbcbce..f8869c9b6802 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = { .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get, .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set, #endif /* CONFIG_XFRM_OFFLOAD */ + .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, + .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, }; static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw, @@ -143,6 +145,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, #endif + .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, + .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, }; int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1789800faaeb..17f78091ad30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1805,7 +1805,8 @@ err: } static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 349e28a6dd8d..88745dc6aed5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -573,6 +573,13 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, struct netlink_ext_ack *extack); #endif /* CONFIG_XFRM_OFFLOAD */ +int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, + u32 *max_io_eqs, + struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, + u32 max_io_eqs, + struct netlink_ext_ack *extack); + void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, @@ -833,7 +840,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw, int max_slaves); void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, struct mlx5_eswitch *slave_esw); -int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); +int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); @@ -925,7 +932,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } static inline int -mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 844d3e3a65dd..592143d5e1da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -67,6 +67,8 @@ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) +#define MLX5_ESW_MAX_CTRL_EQS 4 + static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { .max_fte = MLX5_ESW_VPORT_TBL_SIZE, .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, @@ -2411,7 +2413,8 @@ err: } static int esw_port_metadata_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; @@ -2502,6 +2505,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) esw_offloads_cleanup_reps(esw); } +static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + if (atomic_cmpxchg(&rep->rep_data[rep_type].state, + REP_REGISTERED, REP_LOADED) == REP_REGISTERED) + return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); + + return 0; +} + static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u8 rep_type) { @@ -2526,13 +2539,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) int err; rep = mlx5_eswitch_get_rep(esw, vport_num); - for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) - if (atomic_cmpxchg(&rep->rep_data[rep_type].state, - REP_REGISTERED, REP_LOADED) == REP_REGISTERED) { - err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep); - if (err) - goto err_reps; - } + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_reps; + } return 0; @@ -3277,7 +3288,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw) esw_vport_destroy_offloads_acl_tables(esw, vport); } -int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) { struct mlx5_eswitch_rep *rep; unsigned long i; @@ -3290,13 +3301,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) return 0; - ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); + ret = __esw_offloads_load_rep(esw, rep, REP_IB); if (ret) return ret; mlx5_esw_for_each_rep(esw, i, rep) { if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) - mlx5_esw_offloads_rep_load(esw, rep->vport); + __esw_offloads_load_rep(esw, rep, REP_IB); } return 0; @@ -4568,3 +4579,98 @@ unlock: return err; } #endif /* CONFIG_XFRM_OFFLOAD */ + +int +mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs, + struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); + int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + u16 vport_num = vport->vport; + struct mlx5_eswitch *esw; + void *query_ctx; + void *hca_caps; + u32 max_eqs; + int err; + + esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support VHCA management"); + return -EOPNOTSUPP; + } + + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); + if (!query_ctx) + return -ENOMEM; + + mutex_lock(&esw->state_lock); + err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, + MLX5_CAP_GENERAL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); + goto out; + } + + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); + max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs); + if (max_eqs < MLX5_ESW_MAX_CTRL_EQS) + *max_io_eqs = 0; + else + *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS; +out: + mutex_unlock(&esw->state_lock); + kfree(query_ctx); + return err; +} + +int +mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, + struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port); + int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + u16 vport_num = vport->vport; + struct mlx5_eswitch *esw; + void *query_ctx; + void *hca_caps; + u16 max_eqs; + int err; + + esw = mlx5_devlink_eswitch_nocheck_get(port->devlink); + if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) { + NL_SET_ERR_MSG_MOD(extack, + "Device doesn't support VHCA management"); + return -EOPNOTSUPP; + } + + if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) { + NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range"); + return -EINVAL; + } + + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); + if (!query_ctx) + return -ENOMEM; + + mutex_lock(&esw->state_lock); + err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx, + MLX5_CAP_GENERAL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps"); + goto out; + } + + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); + MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs); + + err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE); + if (err) + NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); + +out: + mutex_unlock(&esw->state_lock); + kfree(query_ctx); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index cf085a478e3e..32cdacc34a0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -3332,7 +3332,8 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id, } static int mlx5_fs_mode_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); enum mlx5_flow_steering_mode mode; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index e7faf7e73ca4..2d95a9b7b44e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -283,7 +283,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return 0; } -int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id) { u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 2911aa34a5be..979c49ae6b5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -52,7 +52,8 @@ static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev) } static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_fw_reset *fw_reset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index d77be1b4dd9c..8e0404c0d1ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -531,7 +531,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) if (err) goto out; - netdev->mtu = new_params.sw_mtu; + WRITE_ONCE(netdev->mtu, new_params.sw_mtu); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index f87471306f6b..028a76944d82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -280,7 +280,7 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu) struct mlx5e_priv *priv = mlx5i_epriv(netdev); mutex_lock(&priv->state_lock); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); mutex_unlock(&priv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 69d482f7c5a2..f7f0476a4a58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -713,7 +713,6 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev) return 0; } -#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4 bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) { #ifdef CONFIG_MLX5_ESWITCH @@ -739,8 +738,6 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode) return false; - if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS) - return false; #else for (i = 0; i < ldev->ports; i++) if (mlx5_sriov_is_enabled(ldev->pf[i].dev)) @@ -814,7 +811,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) if (shared_fdb) for (i = 0; i < ldev->ports; i++) if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) @@ -922,7 +919,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_rescan_drivers_locked(dev0); for (i = 0; i < ldev->ports; i++) { - err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) break; } @@ -933,7 +930,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) mlx5_deactivate_lag(ldev); mlx5_lag_add_devices(ldev); for (i = 0; i < ldev->ports; i++) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_core_err(dev0, "Failed to enable lag\n"); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index 82889f30506e..571ea26edd0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev) dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); for (i = 0; i < ldev->ports; i++) { - err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); if (err) goto err_rescan_drivers; } @@ -113,7 +113,7 @@ err_rescan_drivers: err_add_devices: mlx5_lag_add_devices(ldev); for (i = 0; i < ldev->ports; i++) - mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch); + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); mlx5_mpesw_metadata_cleanup(ldev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 101b3bb90863..c16b462ddedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel, static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, struct ttc_params *ttc_params) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_table_attr *ft_attr; int tt; - ttc_params->ns = mlx5_get_flow_namespace(dev, - MLX5_FLOW_NAMESPACE_PORT_SEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL; ft_attr = &ttc_params->ft_attr; ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC; @@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, struct ttc_params *ttc_params) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_table_attr *ft_attr; int tt; - ttc_params->ns = mlx5_get_flow_namespace(dev, - MLX5_FLOW_NAMESPACE_PORT_SEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL; ft_attr = &ttc_params->ft_attr; ft_attr->level = MLX5_LAG_FT_LEVEL_TTC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index b78f2ba25c19..9f13cea16446 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -9,21 +9,24 @@ #include "mlx5_core.h" #include "lib/fs_ttc.h" -#define MLX5_TTC_NUM_GROUPS 3 -#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT) -#define MLX5_TTC_GROUP2_SIZE BIT(1) -#define MLX5_TTC_GROUP3_SIZE BIT(0) -#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\ - MLX5_TTC_GROUP2_SIZE +\ - MLX5_TTC_GROUP3_SIZE) - -#define MLX5_INNER_TTC_NUM_GROUPS 3 -#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\ - MLX5_INNER_TTC_GROUP2_SIZE +\ - MLX5_INNER_TTC_GROUP3_SIZE) +#define MLX5_TTC_MAX_NUM_GROUPS 4 +#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1) + +struct mlx5_fs_ttc_groups { + bool use_l4_type; + int num_groups; + int group_size[MLX5_TTC_MAX_NUM_GROUPS]; +}; + +static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups) +{ + int i, sz = 0; + + for (i = 0; i < groups->num_groups; i++) + sz += groups->group_size[i]; + + return sz; +} /* L3/L4 traffic type classifier */ struct mlx5_ttc_table { @@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = { }; +enum TTC_GROUP_TYPE { + TTC_GROUPS_DEFAULT = 0, + TTC_GROUPS_USE_L4_TYPE = 1, +}; + +static const struct mlx5_fs_ttc_groups ttc_groups[] = { + [TTC_GROUPS_DEFAULT] = { + .num_groups = 3, + .group_size = { + BIT(3) + MLX5_NUM_TUNNEL_TT, + BIT(1), + BIT(0), + }, + }, + [TTC_GROUPS_USE_L4_TYPE] = { + .use_l4_type = true, + .num_groups = 4, + .group_size = { + MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(1), + BIT(0), + }, + }, +}; + +static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = { + [TTC_GROUPS_DEFAULT] = { + .num_groups = 3, + .group_size = { + BIT(3), + BIT(1), + BIT(0), + }, + }, + [TTC_GROUPS_USE_L4_TYPE] = { + .use_l4_type = true, + .num_groups = 4, + .group_size = { + MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(1), + BIT(0), + }, + }, +}; + u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt) { return ttc_tunnel_rules[tt].proto; @@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype) return 0; } +static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v, + u8 proto, bool use_l4_type) +{ + int l4_type; + + if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) { + if (proto == IPPROTO_TCP) + l4_type = MLX5_PACKET_L4_TYPE_TCP; + else + l4_type = MLX5_PACKET_L4_TYPE_UDP; + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type); + } else { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto); + } +} + static struct mlx5_flow_handle * mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, u16 etype, u8 proto) + struct mlx5_flow_destination *dest, u16 etype, u8 proto, + bool use_l4_type) { int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(dev, @@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, if (proto) { spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); + mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + outer_headers), + MLX5_ADDR_OF(fte_match_param, + spec->match_value, + outer_headers), + proto, use_l4_type); } ipv = mlx5_etype_to_ipv(etype); @@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, struct ttc_params *params, - struct mlx5_ttc_table *ttc) + struct mlx5_ttc_table *ttc, + bool use_l4_type) { struct mlx5_flow_handle **trules; struct mlx5_ttc_rule *rules; @@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, continue; rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, - ttc_rules[tt].proto); + ttc_rules[tt].proto, + use_l4_type); if (IS_ERR(rule->rule)) { err = PTR_ERR(rule->rule); rule->rule = NULL; @@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, trules[tt] = mlx5_generate_ttc_rule(dev, ft, ¶ms->tunnel_dests[tt], ttc_tunnel_rules[tt].etype, - ttc_tunnel_rules[tt].proto); + ttc_tunnel_rules[tt].proto, + use_l4_type); if (IS_ERR(trules[tt])) { err = PTR_ERR(trules[tt]); trules[tt] = NULL; @@ -289,7 +367,8 @@ del_rules: } static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, - bool use_ipv) + bool use_ipv, + const struct mlx5_fs_ttc_groups *groups) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int ix = 0; @@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, int err; u8 *mc; - ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL); + ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL); if (!ttc->g) return -ENOMEM; in = kvzalloc(inlen, GFP_KERNEL); @@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, return -ENOMEM; } - /* L4 Group */ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); if (use_ipv) MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); else MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + + /* TCP UDP group */ + if (groups->use_l4_type) { + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += groups->group_size[ttc->num_groups]; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0); + } + + /* L4 Group */ + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP1_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, /* L3 Group */ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP2_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, /* Any Group */ memset(in, 0, inlen); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP3_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -358,7 +452,7 @@ static struct mlx5_flow_handle * mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct mlx5_flow_destination *dest, - u16 etype, u8 proto) + u16 etype, u8 proto, bool use_l4_type) { MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; @@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, if (proto) { spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); + mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + inner_headers), + MLX5_ADDR_OF(fte_match_param, + spec->match_value, + inner_headers), + proto, use_l4_type); } rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); @@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, struct ttc_params *params, - struct mlx5_ttc_table *ttc) + struct mlx5_ttc_table *ttc, + bool use_l4_type) { struct mlx5_ttc_rule *rules; struct mlx5_flow_table *ft; @@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, - ttc_rules[tt].proto); + ttc_rules[tt].proto, + use_l4_type); if (IS_ERR(rule->rule)) { err = PTR_ERR(rule->rule); rule->rule = NULL; @@ -430,7 +531,8 @@ del_rules: return err; } -static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) +static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc, + const struct mlx5_fs_ttc_groups *groups) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int ix = 0; @@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) int err; u8 *mc; - ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), - GFP_KERNEL); + ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL); if (!ttc->g) return -ENOMEM; in = kvzalloc(inlen, GFP_KERNEL); @@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) return -ENOMEM; } - /* L4 Group */ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); + + /* TCP UDP group */ + if (groups->use_l4_type) { + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += groups->group_size[ttc->num_groups]; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0); + } + + /* L4 Group */ + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP1_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) /* L3 Group */ MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP2_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) /* Any Group */ memset(in, 0, inlen); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP3_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -496,27 +612,47 @@ err: struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, struct ttc_params *params) { + const struct mlx5_fs_ttc_groups *groups; + struct mlx5_flow_namespace *ns; struct mlx5_ttc_table *ttc; + bool use_l4_type; int err; ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); if (!ttc) return ERR_PTR(-ENOMEM); + switch (params->ns_type) { + case MLX5_FLOW_NAMESPACE_PORT_SEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type); + break; + case MLX5_FLOW_NAMESPACE_KERNEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type); + break; + default: + return ERR_PTR(-EINVAL); + } + + ns = mlx5_get_flow_namespace(dev, params->ns_type); + groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] : + &inner_ttc_groups[TTC_GROUPS_DEFAULT]; + WARN_ON_ONCE(params->ft_attr.max_fte); - params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE; - ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups); + ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr); if (IS_ERR(ttc->t)) { err = PTR_ERR(ttc->t); kvfree(ttc); return ERR_PTR(err); } - err = mlx5_create_inner_ttc_table_groups(ttc); + err = mlx5_create_inner_ttc_table_groups(ttc, groups); if (err) goto destroy_ft; - err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc); + err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type); if (err) goto destroy_ft; @@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_field_support.outer_ip_version); + const struct mlx5_fs_ttc_groups *groups; + struct mlx5_flow_namespace *ns; struct mlx5_ttc_table *ttc; + bool use_l4_type; int err; ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); if (!ttc) return ERR_PTR(-ENOMEM); + switch (params->ns_type) { + case MLX5_FLOW_NAMESPACE_PORT_SEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type); + break; + case MLX5_FLOW_NAMESPACE_KERNEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type); + break; + default: + return ERR_PTR(-EINVAL); + } + + ns = mlx5_get_flow_namespace(dev, params->ns_type); + groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] : + &ttc_groups[TTC_GROUPS_DEFAULT]; + WARN_ON_ONCE(params->ft_attr.max_fte); - params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE; - ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups); + ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr); if (IS_ERR(ttc->t)) { err = PTR_ERR(ttc->t); kvfree(ttc); return ERR_PTR(err); } - err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer); + err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups); if (err) goto destroy_ft; - err = mlx5_generate_ttc_table_rules(dev, params, ttc); + err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type); if (err) goto destroy_ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h index 85fef0cd1c07..92eea6bea310 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -40,7 +40,7 @@ struct mlx5_ttc_rule { struct mlx5_ttc_table; struct ttc_params { - struct mlx5_flow_namespace *ns; + enum mlx5_flow_namespace_type ns_type; struct mlx5_flow_table_attr ft_attr; struct mlx5_flow_destination dests[MLX5_NUM_TT]; DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 331ce47f51a1..6574c145dc1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1680,6 +1680,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) struct devlink *devlink = priv_to_devlink(dev); int err; + devl_lock(devlink); + devl_register(devlink); dev->state = MLX5_DEVICE_STATE_UP; err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT)); if (err) { @@ -1693,27 +1695,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) goto query_hca_caps_err; } - devl_lock(devlink); - devl_register(devlink); - err = mlx5_devlink_params_register(priv_to_devlink(dev)); if (err) { mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err); - goto params_reg_err; + goto query_hca_caps_err; } devl_unlock(devlink); return 0; -params_reg_err: - devl_unregister(devlink); - devl_unlock(devlink); query_hca_caps_err: - devl_unregister(devlink); - devl_unlock(devlink); mlx5_function_disable(dev, true); out: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; + devl_unregister(devlink); + devl_unlock(devlink); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 58732f44940f..c38342b9f320 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -205,7 +205,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev); void mlx5_cmd_disable(struct mlx5_core_dev *dev); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state); -int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index dcf58efac159..d894a88fa9f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -660,6 +660,9 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); + if (!npages) + return 0; + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 6bac8ad70ba6..fb8787e30d3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -508,58 +508,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, } /** - * mlx5_msix_alloc - allocate msix interrupt - * @dev: mlx5 device from which to request - * @handler: interrupt handler - * @affdesc: affinity descriptor - * @name: interrupt name - * - * Returns: struct msi_map with result encoded. - * Note: the caller must make sure to release the irq by calling - * mlx5_msix_free() if shutdown was initiated. - */ -struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev, - irqreturn_t (*handler)(int, void *), - const struct irq_affinity_desc *affdesc, - const char *name) -{ - struct msi_map map; - int err; - - if (!dev->pdev) { - map.virq = 0; - map.index = -EINVAL; - return map; - } - - map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc); - if (!map.virq) - return map; - - err = request_irq(map.virq, handler, 0, name, NULL); - if (err) { - mlx5_core_warn(dev, "err %d\n", err); - pci_msix_free_irq(dev->pdev, map); - map.virq = 0; - map.index = -ENOMEM; - } - return map; -} -EXPORT_SYMBOL(mlx5_msix_alloc); - -/** - * mlx5_msix_free - free a previously allocated msix interrupt - * @dev: mlx5 device associated with interrupt - * @map: map previously returned by mlx5_msix_alloc() - */ -void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map) -{ - free_irq(map.virq, NULL); - pci_msix_free_irq(dev->pdev, map); -} -EXPORT_SYMBOL(mlx5_msix_free); - -/** * mlx5_irq_release_vector - release one IRQ back to the system. * @irq: the irq to release. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 7ebe71280827..b2986175d9af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto remap_err; } + /* Peer devlink logic expects to work on unregistered devlink instance. */ + err = mlx5_core_peer_devlink_set(sf_dev, devlink); + if (err) { + mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); + goto peer_devlink_set_err; + } + if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev)) err = mlx5_init_one_light(mdev); else @@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto init_one_err; } - err = mlx5_core_peer_devlink_set(sf_dev, devlink); - if (err) { - mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err); - goto peer_devlink_set_err; - } - return 0; -peer_devlink_set_err: - if (mlx5_dev_is_lightweight(sf_dev->mdev)) - mlx5_uninit_one_light(sf_dev->mdev); - else - mlx5_uninit_one(sf_dev->mdev); init_one_err: +peer_devlink_set_err: iounmap(mdev->iseg); remap_err: mlx5_mdev_uninit(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index f708b029425a..e9f6c7ed7a7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1883,7 +1883,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc5 *misc5 = &value->misc5; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index dd856cde188d..1d49704b9542 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -1897,7 +1897,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc5 *misc5 = &value->misc5; @@ -2129,7 +2129,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0; struct mlx5dr_match_misc *misc = &value->misc; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index ba303868686a..b157f0f1c5a8 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -436,7 +436,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev) priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX); priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX); - phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0); + phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0); if (phy_irq < 0) { dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead"); phy_irq = PHY_POLL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index f42a1b1c9368..bf66d996e32e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -8,7 +8,6 @@ #include <linux/device.h> #include <linux/pci.h> #include <linux/interrupt.h> -#include <linux/wait.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> @@ -36,6 +35,11 @@ enum mlxsw_pci_queue_type { #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 +enum mlxsw_pci_cq_type { + MLXSW_PCI_CQ_SDQ, + MLXSW_PCI_CQ_RDQ, +}; + static const u16 mlxsw_pci_doorbell_type_offset[] = { MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ @@ -78,18 +82,15 @@ struct mlxsw_pci_queue { u8 num; /* queue number */ u8 elem_size; /* size of one element */ enum mlxsw_pci_queue_type type; - struct tasklet_struct tasklet; /* queue processing tasklet */ struct mlxsw_pci *pci; union { struct { - u32 comp_sdq_count; - u32 comp_rdq_count; enum mlxsw_pci_cqe_v v; + struct mlxsw_pci_queue *dq; + struct napi_struct napi; } cq; struct { - u32 ev_cmd_count; - u32 ev_comp_count; - u32 ev_other_count; + struct tasklet_struct tasklet; } eq; } u; }; @@ -120,9 +121,6 @@ struct mlxsw_pci { struct mlxsw_pci_mem_item out_mbox; struct mlxsw_pci_mem_item in_mbox; struct mutex lock; /* Lock access to command registers */ - bool nopoll; - wait_queue_head_t wait; - bool wait_done; struct { u8 status; u64 out_param; @@ -131,13 +129,43 @@ struct mlxsw_pci { struct mlxsw_bus_info bus_info; const struct pci_device_id *id; enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ - u8 num_sdq_cqs; /* Number of CQs used for SDQs */ + u8 num_cqs; /* Number of CQs */ + u8 num_sdqs; /* Number of SDQs */ bool skip_reset; + struct net_device *napi_dev_tx; + struct net_device *napi_dev_rx; }; -static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) +static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0); + if (!mlxsw_pci->napi_dev_tx) + return -ENOMEM; + strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx", + sizeof(mlxsw_pci->napi_dev_tx->name)); + + mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0); + if (!mlxsw_pci->napi_dev_rx) { + err = -ENOMEM; + goto err_alloc_rx; + } + strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx", + sizeof(mlxsw_pci->napi_dev_rx->name)); + dev_set_threaded(mlxsw_pci->napi_dev_rx, true); + + return 0; + +err_alloc_rx: + free_netdev(mlxsw_pci->napi_dev_tx); + return err; +} + +static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci) { - tasklet_schedule(&q->tasklet); + free_netdev(mlxsw_pci->napi_dev_rx); + free_netdev(mlxsw_pci->napi_dev_tx); } static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, @@ -187,25 +215,6 @@ mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, return &mlxsw_pci->queues[q_type]; } -static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, - enum mlxsw_pci_queue_type q_type) -{ - struct mlxsw_pci_queue_type_group *queue_group; - - queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); - return queue_group->count; -} - -static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); -} - -static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); -} - static struct mlxsw_pci_queue * __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type, u8 q_num) @@ -220,23 +229,16 @@ static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); } -static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, - u8 q_num) -{ - return __mlxsw_pci_queue_get(mlxsw_pci, - MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); -} - static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, u8 q_num) { return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); } -static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, - u8 q_num) +static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci) { - return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num); + /* There is only one EQ at index 0. */ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0); } static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, @@ -291,7 +293,9 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { + struct mlxsw_pci_queue *cq; int tclass; + u8 cq_num; int lp; int i; int err; @@ -304,7 +308,8 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; /* Set CQ of same number of this SDQ. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + cq_num = q->num; + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ @@ -317,6 +322,9 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); if (err) return err; + + cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); + cq->u.cq.dq = q; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return 0; } @@ -399,7 +407,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; - u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); + u8 sdq_count = mlxsw_pci->num_sdqs; + struct mlxsw_pci_queue *cq; + u8 cq_num; int i; int err; @@ -409,7 +419,8 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, /* Set CQ of same number of this RDQ with base * above SDQ count as the lower ones are assigned to SDQs. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); + cq_num = sdq_count + q->num; + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -421,6 +432,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, if (err) return err; + cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); + cq->u.cq.dq = q; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); for (i = 0; i < q->count; i++) { @@ -441,6 +455,7 @@ rollback: elem_info = mlxsw_pci_queue_elem_info_get(q, i); mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); } + cq->u.cq.dq = NULL; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); return err; @@ -465,54 +480,11 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, q->u.cq.v = mlxsw_pci->max_cqe_ver; if (q->u.cq.v == MLXSW_PCI_CQE_V2 && - q->num < mlxsw_pci->num_sdq_cqs && + q->num < mlxsw_pci->num_sdqs && !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core)) q->u.cq.v = MLXSW_PCI_CQE_V1; } -static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_pci_queue *q) -{ - int i; - int err; - - q->consumer_counter = 0; - - for (i = 0; i < q->count; i++) { - char *elem = mlxsw_pci_queue_elem_get(q, i); - - mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); - } - - if (q->u.cq.v == MLXSW_PCI_CQE_V1) - mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, - MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); - else if (q->u.cq.v == MLXSW_PCI_CQE_V2) - mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, - MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); - - mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); - mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); - mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); - for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { - dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); - - mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); - } - err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); - if (err) - return err; - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); - mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - return 0; -} - -static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue *q) -{ - mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); -} - static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci, ptrdiff_t off) { @@ -692,9 +664,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); out: - /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; - mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return; } @@ -714,58 +684,165 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) return elem; } -static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t) +static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q) { - struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); + struct mlxsw_pci_queue_elem_info *elem_info; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem); + return !mlxsw_pci_elem_hw_owned(q, owner_bit); +} + +static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget) +{ + struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue, + u.cq.napi); + struct mlxsw_pci_queue *rdq = q->u.cq.dq; struct mlxsw_pci *mlxsw_pci = q->pci; + int work_done = 0; char *cqe; - int items = 0; - int credits = q->count >> 1; + + /* If the budget is 0, Rx processing should be skipped. */ + if (unlikely(!budget)) + return 0; while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); - char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; - memcpy(ncqe, cqe, q->elem_size); - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + if (unlikely(sendq)) { + WARN_ON_ONCE(1); + continue; + } - if (sendq) { - struct mlxsw_pci_queue *sdq; + if (unlikely(dqn != rdq->num)) { + WARN_ON_ONCE(1); + continue; + } - sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); - mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, q->u.cq.v, ncqe); - q->u.cq.comp_sdq_count++; - } else { - struct mlxsw_pci_queue *rdq; + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, + wqe_counter, q->u.cq.v, cqe); - rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); - mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, q->u.cq.v, ncqe); - q->u.cq.comp_rdq_count++; - } - if (++items == credits) + if (++work_done == budget) break; } - if (items) + + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq); + + if (work_done < budget) + goto processing_completed; + + /* The driver still has outstanding work to do, budget was exhausted. + * Return exactly budget. In that case, the NAPI instance will be polled + * again. + */ + if (mlxsw_pci_cq_cqe_to_handle(q)) + goto out; + + /* The driver processed all the completions and handled exactly + * 'budget'. Return 'budget - 1' to distinguish from the case that + * driver still has completions to handle. + */ + if (work_done == budget) + work_done--; + +processing_completed: + if (napi_complete_done(napi, work_done)) mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); +out: + return work_done; } -static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) +static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget) { - return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : - MLXSW_PCI_CQE01_COUNT; + struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue, + u.cq.napi); + struct mlxsw_pci_queue *sdq = q->u.cq.dq; + struct mlxsw_pci *mlxsw_pci = q->pci; + int work_done = 0; + char *cqe; + + while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; + + if (unlikely(!sendq)) { + WARN_ON_ONCE(1); + continue; + } + + if (unlikely(dqn != sdq->num)) { + WARN_ON_ONCE(1); + continue; + } + + memcpy(ncqe, cqe, q->elem_size); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, + wqe_counter, q->u.cq.v, ncqe); + + work_done++; + } + + /* If the budget is 0 napi_complete_done() should never be called. */ + if (unlikely(!budget)) + goto processing_completed; + + work_done = min(work_done, budget - 1); + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; + +processing_completed: + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); +out: + return work_done; } -static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +static enum mlxsw_pci_cq_type +mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue *q) { - return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : - MLXSW_PCI_CQE01_SIZE; + /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used + * for SDQs and the rest are used for RDQs. + */ + if (q->num < mlxsw_pci->num_sdqs) + return MLXSW_PCI_CQ_SDQ; + + return MLXSW_PCI_CQ_RDQ; } -static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, +static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + struct mlxsw_pci *mlxsw_pci = q->pci; + + switch (cq_type) { + case MLXSW_PCI_CQ_SDQ: + netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi, + mlxsw_pci_napi_poll_cq_tx); + break; + case MLXSW_PCI_CQ_RDQ: + netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi, + mlxsw_pci_napi_poll_cq_rx); + break; + } + + napi_enable(&q->u.cq.napi); +} + +static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q) +{ + napi_disable(&q->u.cq.napi); + netif_napi_del(&q->u.cq.napi); +} + +static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { int i; @@ -776,39 +853,50 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { char *elem = mlxsw_pci_queue_elem_get(q, i); - mlxsw_pci_eqe_owner_set(elem, 1); + mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); } - mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ - mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ - mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + if (q->u.cq.v == MLXSW_PCI_CQE_V1) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); + else if (q->u.cq.v == MLXSW_PCI_CQE_V2) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); + + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); + mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); - mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); + mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); } - err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); if (err) return err; + mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q)); mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); return 0; } -static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, +static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { - mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); + mlxsw_pci_cq_napi_teardown(q); + mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } -static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) +static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) { - mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); - mlxsw_pci->cmd.comp.out_param = - ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 | - mlxsw_pci_eqe_cmd_out_param_l_get(eqe); - mlxsw_pci->cmd.wait_done = true; - wake_up(&mlxsw_pci->cmd.wait); + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : + MLXSW_PCI_CQE01_COUNT; +} + +static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : + MLXSW_PCI_CQE01_SIZE; } static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) @@ -829,52 +917,79 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t) { - struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); - struct mlxsw_pci *mlxsw_pci = q->pci; - u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; - char *eqe; - u8 cqn; - bool cq_handle = false; - int items = 0; + struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet); + struct mlxsw_pci *mlxsw_pci = q->pci; int credits = q->count >> 1; + u8 cqn, cq_count; + int items = 0; + char *eqe; memset(&active_cqns, 0, sizeof(active_cqns)); while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { + cqn = mlxsw_pci_eqe_cqn_get(eqe); + set_bit(cqn, active_cqns); - /* Command interface completion events are always received on - * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events - * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1). - */ - switch (q->num) { - case MLXSW_PCI_EQ_ASYNC_NUM: - mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); - q->u.eq.ev_cmd_count++; - break; - case MLXSW_PCI_EQ_COMP_NUM: - cqn = mlxsw_pci_eqe_cqn_get(eqe); - set_bit(cqn, active_cqns); - cq_handle = true; - q->u.eq.ev_comp_count++; - break; - default: - q->u.eq.ev_other_count++; - } if (++items == credits) break; } - if (items) { - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); - mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - } - if (!cq_handle) + if (!items) return; + + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + + cq_count = mlxsw_pci->num_cqs; for_each_set_bit(cqn, active_cqns, cq_count) { q = mlxsw_pci_cq_get(mlxsw_pci, cqn); - mlxsw_pci_queue_tasklet_schedule(q); + napi_schedule(&q->u.cq.napi); + } +} + +static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + /* We expect to initialize only one EQ, which gets num=0 as it is + * located at index zero. We use the EQ as EQ1, so set the number for + * future use. + */ + WARN_ON_ONCE(q->num); + q->num = MLXSW_PCI_EQ_COMP_NUM; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_eqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ + mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ + mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); } + err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); } struct mlxsw_pci_queue_ops { @@ -886,7 +1001,6 @@ struct mlxsw_pci_queue_ops { struct mlxsw_pci_queue *q); void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); - void (*tasklet)(struct tasklet_struct *t); u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); u16 elem_count; @@ -914,7 +1028,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .pre_init = mlxsw_pci_cq_pre_init, .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, - .tasklet = mlxsw_pci_cq_tasklet, .elem_count_f = mlxsw_pci_cq_elem_count, .elem_size_f = mlxsw_pci_cq_elem_size }; @@ -923,7 +1036,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_EQ, .init = mlxsw_pci_eq_init, .fini = mlxsw_pci_eq_fini, - .tasklet = mlxsw_pci_eq_tasklet, .elem_count = MLXSW_PCI_EQE_COUNT, .elem_size = MLXSW_PCI_EQE_SIZE }; @@ -948,9 +1060,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->type = q_ops->type; q->pci = mlxsw_pci; - if (q_ops->tasklet) - tasklet_setup(&q->tasklet, q_ops->tasklet); - mem_item->size = MLXSW_PCI_AQ_SIZE; mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, mem_item->size, &mem_item->mapaddr, @@ -1074,7 +1183,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) if (num_sdqs + num_rdqs > num_cqs || num_sdqs < MLXSW_PCI_SDQS_MIN || - num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { + num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) { dev_err(&pdev->dev, "Unsupported number of queues\n"); return -EINVAL; } @@ -1089,10 +1198,11 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) return -EINVAL; } - mlxsw_pci->num_sdq_cqs = num_sdqs; + mlxsw_pci->num_cqs = num_cqs; + mlxsw_pci->num_sdqs = num_sdqs; err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, - num_eqs); + MLXSW_PCI_EQS_COUNT); if (err) { dev_err(&pdev->dev, "Failed to initialize event queues\n"); return err; @@ -1119,8 +1229,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) goto err_rdqs_init; } - /* We have to poll in command interface until queues are initialized */ - mlxsw_pci->cmd.nopoll = true; return 0; err_rdqs_init: @@ -1134,7 +1242,6 @@ err_cqs_init: static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) { - mlxsw_pci->cmd.nopoll = false; mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); @@ -1432,12 +1539,9 @@ static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) { struct mlxsw_pci *mlxsw_pci = dev_id; struct mlxsw_pci_queue *q; - int i; - for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { - q = mlxsw_pci_eq_get(mlxsw_pci, i); - mlxsw_pci_queue_tasklet_schedule(q); - } + q = mlxsw_pci_eq_get(mlxsw_pci); + tasklet_schedule(&q->u.eq.tasklet); return IRQ_HANDLED; } @@ -1709,6 +1813,10 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_requery_resources; + err = mlxsw_pci_napi_devs_init(mlxsw_pci); + if (err) + goto err_napi_devs_init; + err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); if (err) goto err_aqs_init; @@ -1726,6 +1834,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: + mlxsw_pci_napi_devs_fini(mlxsw_pci); +err_napi_devs_init: err_requery_resources: err_config_profile: err_cqe_v_check: @@ -1753,6 +1863,7 @@ static void mlxsw_pci_fini(void *bus_priv) free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); + mlxsw_pci_napi_devs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); } @@ -1761,7 +1872,7 @@ static struct mlxsw_pci_queue * mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, const struct mlxsw_tx_info *tx_info) { - u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1; + u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1; u8 sdqn; if (tx_info->is_emad) { @@ -1860,9 +1971,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, { struct mlxsw_pci *mlxsw_pci = bus_priv; dma_addr_t in_mapaddr = 0, out_mapaddr = 0; - bool evreq = mlxsw_pci->cmd.nopoll; unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); - bool *p_wait_done = &mlxsw_pci->cmd.wait_done; + unsigned long end; + bool wait_done; int err; *p_status = MLXSW_CMD_STATUS_OK; @@ -1886,36 +1997,28 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); - *p_wait_done = false; + wait_done = false; wmb(); /* all needs to be written before we write control register */ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, MLXSW_PCI_CIR_CTRL_GO_BIT | - (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | opcode); - if (!evreq) { - unsigned long end; - - end = jiffies + timeout; - do { - u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); + end = jiffies + timeout; + do { + u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); - if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { - *p_wait_done = true; - *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; - break; - } - cond_resched(); - } while (time_before(jiffies, end)); - } else { - wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); - *p_status = mlxsw_pci->cmd.comp.status; - } + if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { + wait_done = true; + *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; + break; + } + cond_resched(); + } while (time_before(jiffies, end)); err = 0; - if (*p_wait_done) { + if (wait_done) { if (*p_status) err = -EIO; } else { @@ -1929,14 +2032,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, */ __be32 tmp; - if (!evreq) { - tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, - CIR_OUT_PARAM_HI)); - memcpy(out_mbox, &tmp, sizeof(tmp)); - tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, - CIR_OUT_PARAM_LO)); - memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); - } + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_HI)); + memcpy(out_mbox, &tmp, sizeof(tmp)); + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_LO)); + memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); } else if (!err && out_mbox) { memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); } @@ -2015,7 +2116,6 @@ static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) int err; mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 7cdf0ce24f28..6bed495dcf0f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -42,8 +42,8 @@ ((offset) + (type_offset) + (num) * 4) #define MLXSW_PCI_CQS_MAX 96 -#define MLXSW_PCI_EQS_COUNT 2 -#define MLXSW_PCI_EQ_ASYNC_NUM 0 +#define MLXSW_PCI_EQS_MAX 2 +#define MLXSW_PCI_EQS_COUNT 1 #define MLXSW_PCI_EQ_COMP_NUM 1 #define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8892654c685f..8adf86a6f5cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4786,8 +4786,11 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4); #define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR BIT(11) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2 BIT(13) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4 BIT(16) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19) /* reg_ptys_ext_eth_proto_cap diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bb642e9bb6cf..030ed71f945d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -825,7 +825,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); return 0; err_port_mtu_set: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 92a406f02eae..b1d08e958bf9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1504,7 +1504,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id, static int mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_sp_acl_tcam_vregion *vregion; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 0f29e9c19411..a755b0a901d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1649,6 +1649,18 @@ mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, @@ -1661,6 +1673,18 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_400gaui_8[] = { ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, @@ -1673,6 +1697,18 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_800gaui_8[] = { ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT, ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT, @@ -1817,6 +1853,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 2, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X, + .speed = SPEED_100000, + .width = 1, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, @@ -1826,6 +1870,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 4, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X, + .speed = SPEED_200000, + .width = 2, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, @@ -1834,6 +1886,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 8, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X, + .speed = SPEED_400000, + .width = 4, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8, .mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 9fd1ca079258..f07955b5439f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -595,6 +595,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 3340b4a694c3..d761a1235994 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -8,7 +8,7 @@ #include "spectrum_ipip.h" #include "reg.h" -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); @@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms) { - return !!(parms->i_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); } static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) { - return !!(parms->i_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); } -static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms) { - return !!(parms->o_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); } static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) { - return !!(parms->o_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); } -static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? be32_to_cpu(parms->i_key) : 0; @@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? be32_to_cpu(parms->o_key) : 0; @@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } @@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } @@ -96,7 +98,7 @@ union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms4; + + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } @@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) static struct mlxsw_sp_ipip_parms mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); return (struct mlxsw_sp_ipip_parms) { .proto = MLXSW_SP_L3_PROTO_IPV4, @@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + struct ip_tunnel_parm_kern parms; char rtdp_pl[MLXSW_REG_RTDP_LEN]; - struct ip_tunnel_parm parms; unsigned int type_check; bool has_ikey; u32 daddr4; @@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { struct ip_tunnel *tunnel = netdev_priv(ol_dev); - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ bool inherit_ttl = tunnel->parms.iph.ttl == 0; bool inherit_tos = tunnel->parms.iph.tos & 0x1; + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; + + /* We can't offload any other features. */ + __set_bit(IP_TUNNEL_KEY_BIT, okflags); - return (tunnel->parms.i_flags & ~okflags) == 0 && - (tunnel->parms.o_flags & ~okflags) == 0 && + return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) && + ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) && inherit_ttl && inherit_tos && mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev); } @@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? @@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; bool inherit_ttl = tparm.hop_limit == 0; - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; + + /* We can't offload any other features. */ + __set_bit(IP_TUNNEL_KEY_BIT, okflags); - return (tparm.i_flags & ~okflags) == 0 && - (tparm.o_flags & ~okflags) == 0 && + return ip_tunnel_flags_subset(tparm.i_flags, okflags) && + ip_tunnel_flags_subset(tparm.o_flags, okflags) && inherit_ttl && inherit_tos && mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index a35f009da561..a66173779641 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -9,7 +9,7 @@ #include <linux/if_tunnel.h> #include <net/ip6_tunnel.h> -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); struct __ip6_tnl_parm mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index af50ff9e5f26..4b5fd71c897d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, __be32 *saddrp, __be32 *daddrp) { struct ip_tunnel *tun = netdev_priv(to_dev); + struct ip_tunnel_parm_kern parms; struct net_device *dev = NULL; - struct ip_tunnel_parm parms; struct rtable *rt = NULL; struct flowi4 fl4; @@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { - struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); + struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; bool inherit_tos = tparm.iph.tos & 0x1; @@ -461,7 +461,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, if (!(to_dev->flags & IFF_UP) || /* Reject tunnels with GRE keys, checksums, etc. */ - tparm.i_flags || tparm.o_flags || + !ip_tunnel_flags_empty(tparm.i_flags) || + !ip_tunnel_flags_empty(tparm.o_flags) || /* Require a fixed TTL and a TOS copied from the mirrored packet. */ inherit_ttl || !inherit_tos || /* A destination address may not be "any". */ @@ -539,7 +540,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, if (!dst || dst->error) goto out; - rt6 = container_of(dst, struct rt6_info, dst); + rt6 = dst_rt6_info(dst); dev = dst->dev; *saddrp = fl6.saddr; @@ -565,7 +566,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, if (!(to_dev->flags & IFF_UP) || /* Reject tunnels with GRE keys, checksums, etc. */ - tparm.i_flags || tparm.o_flags || + !ip_tunnel_flags_empty(tparm.i_flags) || + !ip_tunnel_flags_empty(tparm.o_flags) || /* Require a fixed TTL and a TOS copied from the mirrored packet. */ inherit_ttl || !inherit_tos || /* A destination address may not be "any". */ diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index d4cdf3d4f552..502518cdb461 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -234,12 +234,13 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) /** * ks8851_rx_pkts - receive packets from the host * @ks: The device information. + * @rxq: Queue of packets received in this function. * * This is called from the IRQ work queue when the system detects that there * are packets in the receive queue. Find out how many packets there are and * read them from the FIFO. */ -static void ks8851_rx_pkts(struct ks8851_net *ks) +static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq) { struct sk_buff *skb; unsigned rxfc; @@ -299,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) ks8851_dbg_dumpkkt(ks, rxpkt); skb->protocol = eth_type_trans(skb, ks->netdev); - __netif_rx(skb); + __skb_queue_tail(rxq, skb); ks->netdev->stats.rx_packets++; ks->netdev->stats.rx_bytes += rxlen; @@ -326,11 +327,11 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) static irqreturn_t ks8851_irq(int irq, void *_ks) { struct ks8851_net *ks = _ks; + struct sk_buff_head rxq; unsigned handled = 0; unsigned long flags; unsigned int status; - - local_bh_disable(); + struct sk_buff *skb; ks8851_lock(ks, &flags); @@ -384,7 +385,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) * from the device so do not bother masking just the RX * from the device. */ - ks8851_rx_pkts(ks); + __skb_queue_head_init(&rxq); + ks8851_rx_pkts(ks, &rxq); } /* if something stopped the rx process, probably due to wanting @@ -408,7 +410,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (status & IRQ_LCI) mii_check_link(&ks->mii); - local_bh_enable(); + if (status & IRQ_RXI) + while ((skb = __skb_dequeue(&rxq))) + netif_rx(skb); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index c5aeeb964c17..dc1d9f774565 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5427,7 +5427,7 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu) } hw_mtu = (hw_mtu + 3) & ~3; hw_priv->mtu = hw_mtu; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 443128adbcb6..3885d6fbace1 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -75,7 +75,7 @@ static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val, if (unlikely(ret)) return ret; } else { - /* Translate registers that are more effecient using + /* Translate registers that are more efficient using * 3-byte SPI commands */ switch (reg) { @@ -129,7 +129,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx, if (unlikely(ret)) return ret; } else { - /* Translate registers that are more effecient using + /* Translate registers that are more efficient using * 3-byte SPI commands */ switch (reg) { diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index d7c8aa77ec75..b011bf5c2305 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -569,7 +569,7 @@ static void encx24j600_dump_config(struct encx24j600_priv *priv, pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv, MABBIPG)); - /* PHY configuation */ + /* PHY configuration */ pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1)); pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2)); pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA)); @@ -837,7 +837,9 @@ static void encx24j600_hw_tx(struct encx24j600_priv *priv) dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data); if (encx24j600_read_reg(priv, EIR) & TXABTIF) - /* Last transmition aborted due to error. Reset TX interface */ + /* Last transmission aborted due to error. + * Reset TX interface + */ encx24j600_reset_hw_tx(priv); /* Clear the TXIF flag if were previously set */ @@ -1112,7 +1114,6 @@ MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table); static struct spi_driver encx24j600_spi_net_driver = { .driver = { .name = DRV_NAME, - .owner = THIS_MODULE, .bus = &spi_bus_type, }, .probe = encx24j600_spi_probe, diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h index 34c5a289898c..2522f4f48b67 100644 --- a/drivers/net/ethernet/microchip/encx24j600_hw.h +++ b/drivers/net/ethernet/microchip/encx24j600_hw.h @@ -243,7 +243,7 @@ int devm_regmap_init_encx24j600(struct device *dev, /* MAIPG */ /* value of the high byte is given by the reserved bits, - * value of the low byte is recomended setting of the + * value of the low byte is recommended setting of the * IPG parameter. */ #define MAIPGH_VAL 0x0C diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 8a6ae171e375..d0f4ff4ee075 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1076,15 +1076,10 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, buf = lan743x_csr_read(adapter, MAC_CR); if (buf & MAC_CR_EEE_EN_) { - eee->eee_enabled = true; - eee->tx_lpi_enabled = true; /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT); eee->tx_lpi_timer = buf; } else { - eee->eee_enabled = false; - eee->eee_active = false; - eee->tx_lpi_enabled = false; eee->tx_lpi_timer = 0; } @@ -1097,7 +1092,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev, struct lan743x_adapter *adapter; struct phy_device *phydev; u32 buf = 0; - int ret = 0; if (!netdev) return -EINVAL; @@ -1114,23 +1108,8 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev, } if (eee->eee_enabled) { - ret = phy_init_eee(phydev, false); - if (ret) { - netif_err(adapter, drv, adapter->netdev, - "EEE initialization failed\n"); - return ret; - } - buf = (u32)eee->tx_lpi_timer; lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf); - - buf = lan743x_csr_read(adapter, MAC_CR); - buf |= MAC_CR_EEE_EN_; - lan743x_csr_write(adapter, MAC_CR, buf); - } else { - buf = lan743x_csr_read(adapter, MAC_CR); - buf &= ~MAC_CR_EEE_EN_; - lan743x_csr_write(adapter, MAC_CR, buf); } return phy_ethtool_set_eee(phydev, eee); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 75a988c0bd79..6be8a43c908a 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -803,7 +803,7 @@ static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index) u32 val, mii_access; int ret; - /* comfirm MII not busy */ + /* confirm MII not busy */ ret = lan743x_mac_mii_wait_till_not_busy(adapter); if (ret < 0) return ret; @@ -868,7 +868,7 @@ static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id, u32 mmd_access; int ret; - /* comfirm MII not busy */ + /* confirm MII not busy */ ret = lan743x_mac_mii_wait_till_not_busy(adapter); if (ret < 0) return ret; @@ -1462,6 +1462,13 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) phydev->interface == PHY_INTERFACE_MODE_1000BASEX || phydev->interface == PHY_INTERFACE_MODE_2500BASEX) lan743x_sgmii_config(adapter); + + data = lan743x_csr_read(adapter, MAC_CR); + if (phydev->enable_tx_lpi) + data |= MAC_CR_EEE_EN_; + else + data &= ~MAC_CR_EEE_EN_; + lan743x_csr_write(adapter, MAC_CR, data); } } @@ -3177,7 +3184,7 @@ static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) ret = lan743x_mac_set_mtu(adapter, new_mtu); if (!ret) - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return ret; } diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 2801f08bf1c9..dcea6652d56d 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -58,7 +58,7 @@ int lan743x_gpio_init(struct lan743x_adapter *adapter) static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter, u32 bit_mask) { - int timeout = 1000; + int timeout = PTP_CMD_CTL_TIMEOUT_CNT; u32 data = 0; while (timeout && @@ -555,7 +555,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on, if (half == wf_high) { /* It's 50% match. Use the toggle option */ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_; - /* In this case, devide period value by 2 */ + /* In this case, divide period value by 2 */ ts_period = ns_to_timespec64(div_s64(period64, 2)); period_sec = ts_period.tv_sec; period_nsec = ts_period.tv_nsec; diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h index e26d4eff7133..0d29914cd460 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.h +++ b/drivers/net/ethernet/microchip/lan743x_ptp.h @@ -21,6 +21,7 @@ #define LAN743X_PTP_N_EXTTS 4 #define LAN743X_PTP_N_PPS 0 #define PCI11X1X_PTP_IO_MAX_CHANNELS 8 +#define PTP_CMD_CTL_TIMEOUT_CNT 50 struct lan743x_adapter; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h index f3b1e0d31826..e706163ce9cc 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h @@ -78,7 +78,7 @@ /* Classified internal priority for queuing */ #define IFH_POS_QOS_CLASS 100 -/* Bit mask with eight cpu copy classses */ +/* Bit mask with eight cpu copy classes */ #define IFH_POS_CPUQ 92 /* Relearn + learn flags (*) */ diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 2635ef8958c8..b12d3b8a64fd 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -276,7 +276,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb, ++i; } - /* Inidcate EOF and valid bytes in the last word */ + /* Indicate EOF and valid bytes in the last word */ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) | QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ? 0 : last) | @@ -402,7 +402,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!lan966x->fdma) return 0; @@ -520,7 +520,7 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb) u32 val; /* The IGMP and MLD frames are not forward by the HW if - * multicast snooping is enabled, therefor don't mark as + * multicast snooping is enabled, therefore don't mark as * offload to allow the SW to forward the frames accordingly. */ val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port)); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index caa9e0533c96..f8bebbcf77b2 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -326,7 +326,7 @@ struct lan966x { u8 base_mac[ETH_ALEN]; - spinlock_t tx_lock; /* lock for frame transmition */ + spinlock_t tx_lock; /* lock for frame transmission */ struct net_device *bridge; u16 bridge_mask; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 2e83bbb9477e..fdfa4040d9ee 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -88,7 +88,7 @@ static void lan966x_port_link_down(struct lan966x_port *port) SYS_FRONT_PORT_MODE_HDX_MODE, lan966x, SYS_FRONT_PORT_MODE(port->chip_port)); - /* 8: Flush the queues accociated with the port */ + /* 8: Flush the queues associated with the port */ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3), QSYS_SW_PORT_MODE_AGING_MODE, lan966x, QSYS_SW_PORT_MODE(port->chip_port)); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c index d696cf9dbd19..43913d6204e1 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c @@ -45,6 +45,7 @@ static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st, static int lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) { + struct netlink_ext_ack *extack = st->fco->common.extack; struct flow_match_control match; int err = 0; @@ -59,7 +60,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) VCAP_KF_L3_FRAGMENT, VCAP_BIT_0); if (err) - goto out; + goto bad_frag_out; } if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { @@ -72,15 +73,20 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) VCAP_KF_L3_FRAG_OFS_GT0, VCAP_BIT_1); if (err) - goto out; + goto bad_frag_out; } + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT | + FLOW_DIS_FIRST_FRAG, + match.mask->flags, extack)) + return -EOPNOTSUPP; + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); return err; -out: - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error"); +bad_frag_out: + NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error"); return err; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c index 3c44660128da..fa34a739c748 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -157,7 +157,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port) pvid = lan966x_vlan_port_get_pvid(port); - /* Ingress clasification (ANA_PORT_VLAN_CFG) */ + /* Ingress classification (ANA_PORT_VLAN_CFG) */ /* Default vlan to classify for untagged frames (may be zero) */ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid); if (port->vlan_aware) diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index 1cb1cc3f1a85..b68fe9c9a656 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -10,7 +10,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \ sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \ - sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o + sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \ + sparx5_psfp.o sparx5_mirror.o sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 141897dfe388..1915998f6079 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -143,7 +143,7 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) { - /* Dectivate the RX channel */ + /* Deactivate the RX channel */ spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, sparx5, FDMA_CH_ACTIVATE); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 3c066b62e689..b64c814eac11 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -899,6 +899,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev) dev_err(sparx5->dev, "PTP failed\n"); goto cleanup_ports; } + + INIT_LIST_HEAD(&sparx5->mall_entries); + goto cleanup_config; cleanup_ports: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 316fed5f2735..1982ae03b4fe 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -18,6 +18,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/hrtimer.h> #include <linux/debugfs.h> +#include <net/flow_offload.h> #include "sparx5_main_regs.h" @@ -173,6 +174,7 @@ struct sparx5_port { struct phylink_config phylink_config; struct phylink *phylink; struct phylink_pcs phylink_pcs; + struct flow_stats mirror_stats; u16 portno; /* Ingress default VLAN (pvid) */ u16 pvid; @@ -227,6 +229,22 @@ struct sparx5_mdb_entry { u16 pgid_idx; }; +struct sparx5_mall_mirror_entry { + u32 idx; + struct sparx5_port *port; +}; + +struct sparx5_mall_entry { + struct list_head list; + struct sparx5_port *port; + unsigned long cookie; + enum flow_action_id type; + bool ingress; + union { + struct sparx5_mall_mirror_entry mirror; + }; +}; + #define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10) #define SPARX5_SKB_CB(skb) \ ((struct sparx5_skb_cb *)((skb)->cb)) @@ -295,6 +313,7 @@ struct sparx5 { struct vcap_control *vcap_ctrl; /* PGID allocation map */ u8 pgid_map[PGID_TABLE_SIZE]; + struct list_head mall_entries; /* Common root for debugfs */ struct dentry *debugfs_root; }; @@ -541,6 +560,12 @@ void sparx5_psfp_init(struct sparx5 *sparx5); void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time, const ktime_t org_base_time, ktime_t *new_base_time); +/* sparx5_mirror.c */ +int sparx5_mirror_add(struct sparx5_mall_entry *entry); +void sparx5_mirror_del(struct sparx5_mall_entry *entry); +void sparx5_mirror_stats(struct sparx5_mall_entry *entry, + struct flow_stats *fstats); + /* Clock period in picoseconds */ static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock) { diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h index bd03a0a3c1da..22acc1f3380c 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h @@ -83,6 +83,64 @@ enum sparx5_target { #define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\ FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x) +/* ANA_AC:MIRROR_PROBE:PROBE_CFG */ +#define ANA_AC_PROBE_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 0, 0, 1, 4) + +#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27) +#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x) +#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x) + +#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19) +#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x) +#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x) + +#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6) +#define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x) +#define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x) + +#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4) +#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x) +#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x) + +#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2) +#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x) +#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x) + +#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0) +#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x) +#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\ + FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x) + +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */ +#define ANA_AC_PROBE_PORT_CFG(g) \ + __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 8, 0, 1, 4) + +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */ +#define ANA_AC_PROBE_PORT_CFG1(g) \ + __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 12, 0, 1, 4) + +/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */ +#define ANA_AC_PROBE_PORT_CFG2(g) \ + __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 16, 0, 1, 4) + +#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0) +#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\ + FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x) +#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\ + FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x) + /* ANA_AC:SRC:SRC_CFG */ #define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\ 0, 1, 849920, g, 102, 16, 0, 0, 1, 4) @@ -6203,6 +6261,16 @@ enum sparx5_target { #define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\ FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x) +/* QFWD:SYSTEM:FRAME_COPY_CFG */ +#define QFWD_FRAME_COPY_CFG(r)\ + __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4) + +#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL GENMASK(12, 6) +#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\ + FIELD_PREP(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) +#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\ + FIELD_GET(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x) + /* QRES:RES_CTRL:RES_CFG */ #define QRES_RES_CFG(g) __REG(TARGET_QRES,\ 0, 1, 0, g, 5120, 16, 0, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c new file mode 100644 index 000000000000..15db423be4aa --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries. + */ + +#include "sparx5_main.h" +#include "sparx5_main_regs.h" +#include "sparx5_tc.h" + +#define SPX5_MIRROR_PROBE_MAX 3 +#define SPX5_MIRROR_DISABLED 0 +#define SPX5_MIRROR_EGRESS 1 +#define SPX5_MIRROR_INGRESS 2 +#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65 +#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */ + +/* Convert from bool ingress/egress to mirror direction */ +static u32 sparx5_mirror_to_dir(bool ingress) +{ + return ingress ? SPX5_MIRROR_INGRESS : SPX5_MIRROR_EGRESS; +} + +/* Get ports belonging to this mirror */ +static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx) +{ + return (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32 | + spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx)); +} + +/* Add port to mirror (only front ports) */ +static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno) +{ + u32 val, reg = portno; + + reg = portno / BITS_PER_BYTE; + val = BIT(portno % BITS_PER_BYTE); + + if (reg == 0) + return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx)); + else + return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx)); +} + +/* Delete port from mirror (only front ports) */ +static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno) +{ + u32 val, reg = portno; + + reg = portno / BITS_PER_BYTE; + val = BIT(portno % BITS_PER_BYTE); + + if (reg == 0) + return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx)); + else + return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx)); +} + +/* Check if mirror contains port */ +static bool sparx5_mirror_contains(struct sparx5 *sparx5, u32 idx, u32 portno) +{ + return (sparx5_mirror_port_get(sparx5, idx) & BIT_ULL(portno)) != 0; +} + +/* Check if mirror is empty */ +static bool sparx5_mirror_is_empty(struct sparx5 *sparx5, u32 idx) +{ + return sparx5_mirror_port_get(sparx5, idx) == 0; +} + +/* Get direction of mirror */ +static u32 sparx5_mirror_dir_get(struct sparx5 *sparx5, u32 idx) +{ + u32 val = spx5_rd(sparx5, ANA_AC_PROBE_CFG(idx)); + + return ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(val); +} + +/* Set direction of mirror */ +static void sparx5_mirror_dir_set(struct sparx5 *sparx5, u32 idx, u32 dir) +{ + spx5_rmw(ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(dir), + ANA_AC_PROBE_CFG_PROBE_DIRECTION, sparx5, + ANA_AC_PROBE_CFG(idx)); +} + +/* Set the monitor port for this mirror */ +static void sparx5_mirror_monitor_set(struct sparx5 *sparx5, u32 idx, + u32 portno) +{ + spx5_rmw(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(portno), + QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, sparx5, + QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET)); +} + +/* Get the monitor port of this mirror */ +static u32 sparx5_mirror_monitor_get(struct sparx5 *sparx5, u32 idx) +{ + u32 val = spx5_rd(sparx5, + QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET)); + + return QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(val); +} + +/* Check if port is the monitor port of this mirror */ +static bool sparx5_mirror_has_monitor(struct sparx5 *sparx5, u32 idx, + u32 portno) +{ + return sparx5_mirror_monitor_get(sparx5, idx) == portno; +} + +/* Get a suitable mirror for this port */ +static int sparx5_mirror_get(struct sparx5_port *sport, + struct sparx5_port *mport, u32 dir, u32 *idx) +{ + struct sparx5 *sparx5 = sport->sparx5; + u32 i; + + /* Check if this port is already used as a monitor port */ + for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) + if (sparx5_mirror_has_monitor(sparx5, i, sport->portno)) + return -EINVAL; + + /* Check if existing mirror can be reused + * (same direction and monitor port). + */ + for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) { + if (sparx5_mirror_dir_get(sparx5, i) == dir && + sparx5_mirror_has_monitor(sparx5, i, mport->portno)) { + *idx = i; + return 0; + } + } + + /* Return free mirror */ + for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) { + if (sparx5_mirror_is_empty(sparx5, i)) { + *idx = i; + return 0; + } + } + + return -ENOENT; +} + +int sparx5_mirror_add(struct sparx5_mall_entry *entry) +{ + u32 mirror_idx, dir = sparx5_mirror_to_dir(entry->ingress); + struct sparx5_port *sport, *mport; + struct sparx5 *sparx5; + int err; + + /* Source port */ + sport = entry->port; + /* monitor port */ + mport = entry->mirror.port; + sparx5 = sport->sparx5; + + if (sport->portno == mport->portno) + return -EINVAL; + + err = sparx5_mirror_get(sport, mport, dir, &mirror_idx); + if (err) + return err; + + if (sparx5_mirror_contains(sparx5, mirror_idx, sport->portno)) + return -EEXIST; + + /* Add port to mirror */ + sparx5_mirror_port_add(sparx5, mirror_idx, sport->portno); + + /* Set direction of mirror */ + sparx5_mirror_dir_set(sparx5, mirror_idx, dir); + + /* Set monitor port for mirror */ + sparx5_mirror_monitor_set(sparx5, mirror_idx, mport->portno); + + entry->mirror.idx = mirror_idx; + + return 0; +} + +void sparx5_mirror_del(struct sparx5_mall_entry *entry) +{ + struct sparx5_port *port = entry->port; + struct sparx5 *sparx5 = port->sparx5; + u32 mirror_idx = entry->mirror.idx; + + sparx5_mirror_port_del(sparx5, mirror_idx, port->portno); + if (!sparx5_mirror_is_empty(sparx5, mirror_idx)) + return; + + sparx5_mirror_dir_set(sparx5, mirror_idx, SPX5_MIRROR_DISABLED); + + sparx5_mirror_monitor_set(sparx5, + mirror_idx, + SPX5_MIRROR_MONITOR_PORT_DEFAULT); +} + +void sparx5_mirror_stats(struct sparx5_mall_entry *entry, + struct flow_stats *fstats) +{ + struct sparx5_port *port = entry->port; + struct rtnl_link_stats64 new_stats; + struct flow_stats *old_stats; + + old_stats = &entry->port->mirror_stats; + sparx5_get_stats64(port->ndev, &new_stats); + + if (entry->ingress) { + flow_stats_update(fstats, + new_stats.rx_bytes - old_stats->bytes, + new_stats.rx_packets - old_stats->pkts, + new_stats.rx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.rx_bytes; + old_stats->pkts = new_stats.rx_packets; + old_stats->drops = new_stats.rx_dropped; + old_stats->lastused = jiffies; + } else { + flow_stats_update(fstats, + new_stats.tx_bytes - old_stats->bytes, + new_stats.tx_packets - old_stats->pkts, + new_stats.tx_dropped - old_stats->drops, + old_stats->lastused, + FLOW_ACTION_HW_STATS_IMMEDIATE); + + old_stats->bytes = new_stats.tx_bytes; + old_stats->pkts = new_stats.tx_packets; + old_stats->drops = new_stats.tx_dropped; + old_stats->lastused = jiffies; + } +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index ac7e1cffbcec..f3f5fb420468 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -67,7 +67,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap) for (i = 0; i < IFH_LEN; i++) ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp)); - /* Decode IFH (whats needed) */ + /* Decode IFH (what's needed) */ sparx5_ifh_parse(ifh, &fi); /* Map to port netdev */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 60dd2fd603a8..062e486c002c 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -370,7 +370,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port, /* 6: Wait while the last frame is exiting the queues */ usleep_range(8 * spd_prm, 10 * spd_prm); - /* 7: Flush the queues accociated with the port->portno */ + /* 7: Flush the queues associated with the port->portno */ spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) | HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) | HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) | diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index 4af85d108a06..0b4abc3eb53d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -190,7 +190,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, /* Remove standalone port entry */ sparx5_mact_forget(sparx5, ndev->dev_addr, 0); - /* Port enters in bridge mode therefor don't need to copy to CPU + /* Port enters in bridge mode therefore don't need to copy to CPU * frames for multicast in case the bridge is not requesting them */ __dev_mc_unsync(ndev, sparx5_mc_unsync); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c index 55f255a3c9db..8d67d9f24c76 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c @@ -159,13 +159,14 @@ out: static int sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) { + struct netlink_ext_ack *extack = st->fco->common.extack; struct flow_match_control mt; u32 value, mask; int err = 0; flow_rule_match_control(st->frule, &mt); - if (mt.mask->flags) { + if (mt.mask->flags & (FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT); u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT); u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask; @@ -178,7 +179,7 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx]; if (vdt == FRAG_INVAL) { - NL_SET_ERR_MSG_MOD(st->fco->common.extack, + NL_SET_ERR_MSG_MOD(extack, "Match on invalid fragment flag combination"); return -EINVAL; } @@ -190,16 +191,19 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st) err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_FRAGMENT_TYPE, value, mask); - if (err) - goto out; + if (err) { + NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error"); + return err; + } } - st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT | + FLOW_DIS_FIRST_FRAG, + mt.mask->flags, extack)) + return -EOPNOTSUPP; - return err; + st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); -out: - NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error"); return err; } @@ -1023,6 +1027,64 @@ static int sparx5_tc_action_vlan_push(struct vcap_admin *admin, return err; } +static void sparx5_tc_flower_set_port_mask(struct vcap_u72_action *ports, + struct net_device *ndev) +{ + struct sparx5_port *port = netdev_priv(ndev); + int byidx = port->portno / BITS_PER_BYTE; + int biidx = port->portno % BITS_PER_BYTE; + + ports->value[byidx] |= BIT(biidx); +} + +static int sparx5_tc_action_mirred(struct vcap_admin *admin, + struct vcap_rule *vrule, + struct flow_cls_offload *fco, + struct flow_action_entry *act) +{ + struct vcap_u72_action ports = {0}; + int err; + + if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) { + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Mirror action not supported in this VCAP"); + return -EOPNOTSUPP; + } + + err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, + SPX5_PMM_OR_DSTMASK); + if (err) + return err; + + sparx5_tc_flower_set_port_mask(&ports, act->dev); + + return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports); +} + +static int sparx5_tc_action_redirect(struct vcap_admin *admin, + struct vcap_rule *vrule, + struct flow_cls_offload *fco, + struct flow_action_entry *act) +{ + struct vcap_u72_action ports = {0}; + int err; + + if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) { + NL_SET_ERR_MSG_MOD(fco->common.extack, + "Redirect action not supported in this VCAP"); + return -EOPNOTSUPP; + } + + err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, + SPX5_PMM_REPLACE_ALL); + if (err) + return err; + + sparx5_tc_flower_set_port_mask(&ports, act->dev); + + return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports); +} + /* Remove rule keys that may prevent templates from matching a keyset */ static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin, struct vcap_rule *vrule, @@ -1169,6 +1231,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev, if (err) goto out; break; + case FLOW_ACTION_MIRRED: + err = sparx5_tc_action_mirred(admin, vrule, fco, act); + if (err) + goto out; + break; + case FLOW_ACTION_REDIRECT: + err = sparx5_tc_action_redirect(admin, vrule, fco, act); + if (err) + goto out; + break; case FLOW_ACTION_ACCEPT: err = sparx5_tc_set_actionset(admin, vrule); if (err) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c index d88a93f22606..6b4d1d7b9730 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c @@ -11,11 +11,44 @@ #include "sparx5_main.h" #include "sparx5_vcap_impl.h" +static struct sparx5_mall_entry * +sparx5_tc_matchall_entry_find(struct list_head *entries, unsigned long cookie) +{ + struct sparx5_mall_entry *entry; + + list_for_each_entry(entry, entries, list) { + if (entry->cookie == cookie) + return entry; + } + + return NULL; +} + +static void sparx5_tc_matchall_parse_action(struct sparx5_port *port, + struct sparx5_mall_entry *entry, + struct flow_action_entry *action, + bool ingress, + unsigned long cookie) +{ + entry->port = port; + entry->type = action->id; + entry->ingress = ingress; + entry->cookie = cookie; +} + +static void +sparx5_tc_matchall_parse_mirror_action(struct sparx5_mall_entry *entry, + struct flow_action_entry *action) +{ + entry->mirror.port = netdev_priv(action->dev); +} + static int sparx5_tc_matchall_replace(struct net_device *ndev, struct tc_cls_matchall_offload *tmo, bool ingress) { struct sparx5_port *port = netdev_priv(ndev); + struct sparx5_mall_entry *mall_entry; struct flow_action_entry *action; struct sparx5 *sparx5; int err; @@ -27,8 +60,45 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev, } action = &tmo->rule->action.entries[0]; + mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL); + if (!mall_entry) + return -ENOMEM; + + sparx5_tc_matchall_parse_action(port, + mall_entry, + action, + ingress, + tmo->cookie); + sparx5 = port->sparx5; switch (action->id) { + case FLOW_ACTION_MIRRED: + sparx5_tc_matchall_parse_mirror_action(mall_entry, action); + err = sparx5_mirror_add(mall_entry); + if (err) { + switch (err) { + case -EEXIST: + NL_SET_ERR_MSG_MOD(tmo->common.extack, + "Mirroring already exists"); + break; + case -EINVAL: + NL_SET_ERR_MSG_MOD(tmo->common.extack, + "Cannot mirror a monitor port"); + break; + case -ENOENT: + NL_SET_ERR_MSG_MOD(tmo->common.extack, + "No more mirror probes available"); + break; + default: + NL_SET_ERR_MSG_MOD(tmo->common.extack, + "Unknown error"); + break; + } + return err; + } + /* Get baseline stats for this port */ + sparx5_mirror_stats(mall_entry, &tmo->stats); + break; case FLOW_ACTION_GOTO: err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, tmo->common.chain_index, @@ -59,6 +129,9 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev, NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action"); return -EOPNOTSUPP; } + + list_add_tail(&mall_entry->list, &sparx5->mall_entries); + return 0; } @@ -67,19 +140,51 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev, bool ingress) { struct sparx5_port *port = netdev_priv(ndev); - struct sparx5 *sparx5; - int err; + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_mall_entry *entry; + int err = 0; - sparx5 = port->sparx5; - if (!tmo->rule && tmo->cookie) { + entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries, + tmo->cookie); + if (!entry) + return -ENOENT; + + if (entry->type == FLOW_ACTION_MIRRED) { + sparx5_mirror_del(entry); + } else if (entry->type == FLOW_ACTION_GOTO) { err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0, 0, tmo->cookie, false); - if (err) - return err; - return 0; + } else { + NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action"); + err = -EOPNOTSUPP; } - NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action"); - return -EOPNOTSUPP; + + list_del(&entry->list); + + return err; +} + +static int sparx5_tc_matchall_stats(struct net_device *ndev, + struct tc_cls_matchall_offload *tmo, + bool ingress) +{ + struct sparx5_port *port = netdev_priv(ndev); + struct sparx5 *sparx5 = port->sparx5; + struct sparx5_mall_entry *entry; + + entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries, + tmo->cookie); + if (!entry) + return -ENOENT; + + if (entry->type == FLOW_ACTION_MIRRED) { + sparx5_mirror_stats(entry, &tmo->stats); + } else { + NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action"); + return -EOPNOTSUPP; + } + + return 0; } int sparx5_tc_matchall(struct net_device *ndev, @@ -91,6 +196,8 @@ int sparx5_tc_matchall(struct net_device *ndev, return sparx5_tc_matchall_replace(ndev, tmo, ingress); case TC_CLSMATCHALL_DESTROY: return sparx5_tc_matchall_destroy(ndev, tmo, ingress); + case TC_CLSMATCHALL_STATS: + return sparx5_tc_matchall_stats(ndev, tmo, ingress); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h index c3569a4c7b69..4735fad05708 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h @@ -290,7 +290,7 @@ enum vcap_keyfield_set { * Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType * bit 3 * VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2 - * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP: + * Set if TCP sequence number is 0, LAN966x: Overlaid with PTP over UDP: * messageType bit 0 * VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2 * TCP/UDP source port diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c index ef980e4e5bc2..2687765abe52 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c @@ -327,7 +327,7 @@ static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl, } /* Verify that the typegroup information, subword count, keyset and type id - * are in sync and correct, return the list of matchin keysets + * are in sync and correct, return the list of matching keysets */ int vcap_find_keystream_keysets(struct vcap_control *vctrl, @@ -2907,6 +2907,18 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule, } EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32); +/* Add a 72 bit action field with value to the rule */ +int vcap_rule_add_action_u72(struct vcap_rule *rule, + enum vcap_action_field action, + struct vcap_u72_action *fieldval) +{ + struct vcap_client_actionfield_data data; + + memcpy(&data.u72, fieldval, sizeof(data.u72)); + return vcap_rule_add_action(rule, action, VCAP_FIELD_U72, &data); +} +EXPORT_SYMBOL_GPL(vcap_rule_add_action_u72); + static int vcap_read_counter(struct vcap_rule_internal *ri, struct vcap_counter *ctr) { @@ -2931,7 +2943,7 @@ void vcap_netbytes_copy(u8 *dst, u8 *src, int count) } EXPORT_SYMBOL_GPL(vcap_netbytes_copy); -/* Convert validation error code into tc extact error message */ +/* Convert validation error code into tc extack error message */ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule) { switch (vrule->exterr) { diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h index 88641508f885..cdf79e17ca54 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h @@ -200,6 +200,8 @@ int vcap_rule_add_action_bit(struct vcap_rule *rule, enum vcap_action_field action, enum vcap_bit val); int vcap_rule_add_action_u32(struct vcap_rule *rule, enum vcap_action_field action, u32 value); +int vcap_rule_add_action_u72(struct vcap_rule *rule, enum vcap_action_field action, + struct vcap_u72_action *fieldval); /* Get number of rules in a vcap instance lookup chain id range */ int vcap_admin_rule_count(struct vcap_admin *admin, int cid); @@ -236,7 +238,7 @@ const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl, /* Copy to host byte order */ void vcap_netbytes_copy(u8 *dst, u8 *src, int count); -/* Convert validation error code into tc extact error message */ +/* Convert validation error code into tc extack error message */ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule); /* Cleanup a VCAP instance */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h index df81d9ff502b..844bdf6b5f45 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h @@ -109,7 +109,7 @@ int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev, struct vcap_keyset_list *kslist); /* Verify that the typegroup information, subword count, keyset and type id - * are in sync and correct, return the list of matchin keysets + * are in sync and correct, return the list of matching keysets */ int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt, u32 *keystream, u32 *mskstream, bool mask, diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 01eb7445ead9..286f0d5697a1 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -17,7 +17,8 @@ if NET_VENDOR_MICROSOFT config MICROSOFT_MANA tristate "Microsoft Azure Network Adapter (MANA) support" - depends on PCI_MSI && X86_64 + depends on PCI_MSI + depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES) depends on PCI_HYPERV select AUXILIARY_BUS select PAGE_POOL diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index d8af5e7e15b4..d087cf954f75 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -690,12 +690,12 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu) goto out; } - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); err = mana_attach(ndev); if (err) { netdev_err(ndev, "mana_attach failed: %d\n", err); - ndev->mtu = old_mtu; + WRITE_ONCE(ndev->mtu, old_mtu); } out: @@ -1058,11 +1058,10 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; struct net_device *ndev = apc->ndev; - mana_handle_t *req_indir_tab; u32 req_buf_size; int err; - req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries; + req_buf_size = struct_size(req, indir_tab, num_entries); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -1074,7 +1073,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, req->vport = apc->port_handle; req->num_indir_entries = num_entries; - req->indir_tab_offset = sizeof(*req); + req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, + indir_tab); req->rx_enable = rx; req->rss_enable = apc->rss_state; req->update_default_rxobj = update_default_rxobj; @@ -1086,11 +1086,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, if (update_key) memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE); - if (update_tab) { - req_indir_tab = (mana_handle_t *)(req + 1); - memcpy(req_indir_tab, apc->rxobj_table, - req->num_indir_entries * sizeof(mana_handle_t)); - } + if (update_tab) + memcpy(req->indir_tab, apc->rxobj_table, + flex_array_size(req, indir_tab, req->num_indir_entries)); err = mana_send_request(apc->ac, req, req_buf_size, &resp, sizeof(resp)); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 33b438c6aec5..a057ec3dab97 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -609,11 +609,8 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return ret; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_match_control match; - - flow_rule_match_control(rule, &match); - } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 7b7e1c5b00f4..b7d9657a7af3 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3036,11 +3036,11 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) /* if we change the mtu on an active device, we must * reset the device so the firmware sees the change */ myri10ge_close(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); myri10ge_open(dev); - } else - dev->mtu = new_mtu; - + } else { + WRITE_ONCE(dev->mtu, new_mtu); + } return 0; } diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 650a5a166070..ad0c14849115 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2526,7 +2526,7 @@ static void __set_rx_mode(struct net_device *dev) static int natsemi_change_mtu(struct net_device *dev, int new_mtu) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); /* synchronized against open : rtnl_lock() held by caller */ if (netif_running(dev)) { diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 55408f16fbbc..f235e76e4ce9 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -6637,7 +6637,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu) struct s2io_nic *sp = netdev_priv(dev); int ret = 0; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { s2io_stop_all_tx_queue(sp); s2io_card_down(sp); diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c index a655f9e69a7b..0e1a3800f371 100644 --- a/drivers/net/ethernet/netronome/nfp/devlink_param.c +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -132,7 +132,8 @@ exit_close_nsp: static int nfp_devlink_param_u8_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { const struct nfp_devlink_param_u8_arg *arg; struct nfp_pf *pf = devlink_priv(devlink); diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 2c3f62907958..aca2a7417af3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -396,6 +396,17 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, return 0; } +#define NFP_FL_CHECK(flag) ({ \ + IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \ + __be16 __res; \ + \ + __set_bit(IP_TUNNEL_##flag##_BIT, __check); \ + __res = ip_tunnel_flags_to_be16(__check); \ + \ + BUILD_BUG_ON(__builtin_constant_p(__res) && \ + NFP_FL_TUNNEL_##flag != __res); \ +}) + static int nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, const struct flow_action_entry *act, @@ -410,6 +421,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; + __be16 tun_flags; if (!IS_ENABLED(CONFIG_IPV6) && ipv6) return -EOPNOTSUPP; @@ -417,9 +429,10 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) return -EOPNOTSUPP; - BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || - NFP_FL_TUNNEL_KEY != TUNNEL_KEY || - NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); + NFP_FL_CHECK(CSUM); + NFP_FL_CHECK(KEY); + NFP_FL_CHECK(GENEVE_OPT); + if (ip_tun->options_len && (tun_type != NFP_FL_TUNNEL_GENEVE || !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) { @@ -427,7 +440,9 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, return -EOPNOTSUPP; } - if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) { + tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags); + if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) || + (tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); return -EOPNOTSUPP; @@ -442,7 +457,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx); set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); - if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) + if (tun_flags & NFP_FL_TUNNEL_KEY) set_tun->tun_id = ip_tun->key.tun_id; if (ip_tun->key.ttl) { @@ -486,7 +501,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, } set_tun->tos = ip_tun->key.tos; - set_tun->tun_flags = ip_tun->key.tun_flags; + set_tun->tun_flags = tun_flags; if (tun_type == NFP_FL_TUNNEL_GENEVE) { set_tun->tun_proto = htons(ETH_P_TEB); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 0aceef9fe582..8e0a890381b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -527,10 +527,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, struct flow_match_control ctl; flow_rule_match_control(rule, &ctl); - if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { - NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag"); + + if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS, + ctl.mask->flags, extack)) return -EOPNOTSUPP; - } } ret_key_ls->key_layer = key_layer; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 635d33c0d6d3..ea75b9a06313 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -160,6 +160,7 @@ static const struct nfp_devlink_versions_simple { { DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", }, { DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", }, { "board.model", /* code name */ "assembly.model", }, + { DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER, "pn", }, }; static int diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index f28e769e6fda..182ba0a8b095 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1526,7 +1526,7 @@ static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) *dp = nn->dp; nn->dp = new_dp; - nn->dp.netdev->mtu = new_dp.mtu; + WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu); if (!netif_is_rxfh_configured(nn->dp.netdev)) nfp_net_rss_init_itbl(nn); @@ -2289,10 +2289,7 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { new_ctrl = nn->dp.ctrl; mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index a614df095b08..2dd37557185e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -34,8 +34,11 @@ enum nfp_dumpspec_type { /* generic type plus length */ struct nfp_dump_tl { - __be32 type; - __be32 length; /* chunk length to follow, aligned to 8 bytes */ + /* New members must be added within the struct_group() macro below. */ + struct_group_tagged(nfp_dump_tl_hdr, hdr, + __be32 type; + __be32 length; /* chunk length to follow, aligned to 8 bytes */ + ); char data[]; }; @@ -55,19 +58,19 @@ struct nfp_dump_common_cpp { /* CSR dumpables */ struct nfp_dumpspec_csr { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; struct nfp_dump_common_cpp cpp; __be32 register_width; /* in bits */ }; struct nfp_dumpspec_rtsym { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; char rtsym[]; }; /* header for register dumpable */ struct nfp_dump_csr { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; struct nfp_dump_common_cpp cpp; __be32 register_width; /* in bits */ __be32 error; /* error code encountered while reading */ @@ -75,7 +78,7 @@ struct nfp_dump_csr { }; struct nfp_dump_rtsym { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; struct nfp_dump_common_cpp cpp; __be32 error; /* error code encountered while reading */ u8 padded_name_length; /* pad so data starts at 8 byte boundary */ @@ -84,12 +87,12 @@ struct nfp_dump_rtsym { }; struct nfp_dump_prolog { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; __be32 dump_level; }; struct nfp_dump_error { - struct nfp_dump_tl tl; + struct nfp_dump_tl_hdr tl; __be32 error; char padding[4]; char spec[]; @@ -449,6 +452,8 @@ static int nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr, struct nfp_dump_state *dump) { + struct nfp_dump_tl *spec_csr_tl = + container_of(&spec_csr->tl, struct nfp_dump_tl, hdr); struct nfp_dump_csr *dump_header = dump->p; u32 reg_sz, header_size, total_size; u32 cpp_rd_addr, max_rd_addr; @@ -458,7 +463,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr, int err; if (!nfp_csr_spec_valid(spec_csr)) - return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump); + return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump); reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE; header_size = ALIGN8(sizeof(*dump_header)); @@ -466,7 +471,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr, ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length)); dest = dump->p + header_size; - err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump); + err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump); if (err) return err; @@ -552,6 +557,8 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr, struct nfp_dump_state *dump) { + struct nfp_dump_tl *spec_csr_tl = + container_of(&spec_csr->tl, struct nfp_dump_tl, hdr); struct nfp_dump_csr *dump_header = dump->p; u32 reg_sz, header_size, total_size; u32 cpp_rd_addr, max_rd_addr; @@ -560,7 +567,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf, int err; if (!nfp_csr_spec_valid(spec_csr)) - return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump); + return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump); reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE; header_size = ALIGN8(sizeof(*dump_header)); @@ -569,7 +576,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf, total_size = header_size + ALIGN8(reg_data_length); dest = dump->p + header_size; - err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump); + err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump); if (err) return err; @@ -597,6 +604,8 @@ static int nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, struct nfp_dump_state *dump) { + struct nfp_dump_tl *spec_tl = + container_of(&spec->tl, struct nfp_dump_tl, hdr); struct nfp_dump_rtsym *dump_header = dump->p; struct nfp_dumpspec_cpp_isl_id cpp_params; struct nfp_rtsym_table *rtbl = pf->rtbl; @@ -607,14 +616,14 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, void *dest; int err; - tl_len = be32_to_cpu(spec->tl.length); + tl_len = be32_to_cpu(spec_tl->length); key_len = strnlen(spec->rtsym, tl_len); if (key_len == tl_len) - return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump); + return nfp_dump_error_tlv(spec_tl, -EINVAL, dump); sym = nfp_rtsym_lookup(rtbl, spec->rtsym); if (!sym) - return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump); + return nfp_dump_error_tlv(spec_tl, -ENOENT, dump); sym_size = nfp_rtsym_size(sym); header_size = @@ -622,7 +631,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, total_size = header_size + ALIGN8(sym_size); dest = dump->p + header_size; - err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump); + err = nfp_add_tlv(be32_to_cpu(spec_tl->type), total_size, dump); if (err) return err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 3af1229a3f08..eee0bfc41074 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -177,7 +177,7 @@ static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu) if (err) return err; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index fa1f78b03cb2..2aa4ad9cf96e 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -946,7 +946,7 @@ static int nixge_change_mtu(struct net_device *ndev, int new_mtu) NIXGE_MAX_JUMBO_FRAME_SIZE) return -EINVAL; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 31f896c4aa26..720f577929db 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3098,7 +3098,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) int old_mtu; old_mtu = dev->mtu; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); /* return early if the buffer sizes will not change */ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 28b7cec485ef..4ac29cd59f2b 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2184,7 +2184,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) } } else { pch_gbe_reset(adapter); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); adapter->hw.mac.max_frame_size = max_frame; } diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index ed7dd0a04235..62ba269da902 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1639,7 +1639,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 7f0c6cdc375e..24870da3f484 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1761,13 +1761,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) /* if we're not running, nothing more to do */ if (!netif_running(netdev)) { - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); return 0; } mutex_lock(&lif->queue_lock); ionic_stop_queues_reconfig(lif); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); err = ionic_start_queues_reconfig(lif); mutex_unlock(&lif->queue_lock); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 6e12cd21ac90..89c8b2349694 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -960,7 +960,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) rc = adapter->set_mtu(adapter, mtu); if (!rc) - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1d719726f72b..b7def3b54937 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -662,8 +662,6 @@ struct qed_hwfn { }; struct pci_params { - int pm_cap; - unsigned long mem_start; unsigned long mem_end; unsigned int irq; diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index dad8e617c393..1adc7fbb3f2f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -132,7 +132,8 @@ static int qed_dl_param_get(struct devlink *dl, u32 id, } static int qed_dl_param_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct qed_devlink *qed_dl = devlink_priv(dl); struct qed_dev *cdev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c278f8893042..f915c423fe70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -323,8 +323,7 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) goto err2; } - cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (IS_PF(cdev) && !cdev->pci_params.pm_cap) + if (IS_PF(cdev) && !pdev->pm_cap) DP_NOTICE(cdev, "Cannot find power management capability\n"); rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); @@ -1206,7 +1205,6 @@ out: static int qed_slowpath_wq_start(struct qed_dev *cdev) { struct qed_hwfn *hwfn; - char name[NAME_SIZE]; int i; if (IS_VF(cdev)) @@ -1215,11 +1213,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; - snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", - cdev->pdev->bus->number, - PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", + 0, 0, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), + hwfn->abs_pf_id); - hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); if (!hwfn->slowpath_wq) { DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); return -ENOMEM; @@ -1351,7 +1349,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, (params->drv_rev << 8) | (params->drv_eng); strscpy(drv_version.name, params->name, - MCP_DRV_VER_STR_SIZE - 4); + sizeof(drv_version.name)); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); if (rc) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ae3ebf0cf999..f497f6ca1018 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1026,7 +1026,7 @@ static int qede_get_regs_len(struct net_device *ndev) static void qede_update_mtu(struct qede_dev *edev, struct qede_reload_args *args) { - edev->ndev->mtu = args->u.mtu; + WRITE_ONCE(edev->ndev->mtu, args->u.mtu); } /* Netdevice NDOs */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a5ac21a0ee33..985026dd816f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1520,8 +1520,8 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev, return 0; } -static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, - struct qede_arfs_tuple *t) +static int qede_set_v4_tuple_to_profile(struct qede_arfs_tuple *t, + struct netlink_ext_ack *extack) { /* We must have Only 4-tuples/l4 port/src ip/dst ip * as an input. @@ -1538,7 +1538,7 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, t->dst_ipv4 && !t->src_ipv4) { t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; } else { - DP_INFO(edev, "Invalid N-tuple\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple"); return -EOPNOTSUPP; } @@ -1549,9 +1549,9 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev, return 0; } -static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct in6_addr *zaddr) +static int qede_set_v6_tuple_to_profile(struct qede_arfs_tuple *t, + struct in6_addr *zaddr, + struct netlink_ext_ack *extack) { /* We must have Only 4-tuples/l4 port/src ip/dst ip * as an input. @@ -1573,7 +1573,7 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) { t->mode = QED_FILTER_CONFIG_MODE_IP_DEST; } else { - DP_INFO(edev, "Invalid N-tuple\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple"); return -EOPNOTSUPP; } @@ -1671,7 +1671,7 @@ static int qede_parse_actions(struct qede_dev *edev, int i; if (!flow_action_has_entries(flow_action)) { - DP_NOTICE(edev, "No actions received\n"); + NL_SET_ERR_MSG_MOD(extack, "No actions received"); return -EINVAL; } @@ -1687,7 +1687,8 @@ static int qede_parse_actions(struct qede_dev *edev, break; if (act->queue.index >= QEDE_RSS_COUNT(edev)) { - DP_INFO(edev, "Queue out-of-bounds\n"); + NL_SET_ERR_MSG_MOD(extack, + "Queue out-of-bounds"); return -EINVAL; } break; @@ -1700,8 +1701,8 @@ static int qede_parse_actions(struct qede_dev *edev, } static int -qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *t) +qede_flow_parse_ports(struct flow_rule *rule, struct qede_arfs_tuple *t, + struct netlink_ext_ack *extack) { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; @@ -1709,7 +1710,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, flow_rule_match_ports(rule, &match); if ((match.key->src && match.mask->src != htons(U16_MAX)) || (match.key->dst && match.mask->dst != htons(U16_MAX))) { - DP_NOTICE(edev, "Do not support ports masks\n"); + NL_SET_ERR_MSG_MOD(extack, + "Do not support ports masks"); return -EINVAL; } @@ -1721,10 +1723,12 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, } static int -qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *t) +qede_flow_parse_v6_common(struct flow_rule *rule, + struct qede_arfs_tuple *t, + struct netlink_ext_ack *extack) { struct in6_addr zero_addr, addr; + int err; memset(&zero_addr, 0, sizeof(addr)); memset(&addr, 0xff, sizeof(addr)); @@ -1737,8 +1741,8 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, memcmp(&match.mask->src, &addr, sizeof(addr))) || (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) && memcmp(&match.mask->dst, &addr, sizeof(addr)))) { - DP_NOTICE(edev, - "Do not support IPv6 address prefix/mask\n"); + NL_SET_ERR_MSG_MOD(extack, + "Do not support IPv6 address prefix/mask"); return -EINVAL; } @@ -1746,23 +1750,28 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); } - if (qede_flow_parse_ports(edev, rule, t)) - return -EINVAL; + err = qede_flow_parse_ports(rule, t, extack); + if (err) + return err; - return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); + return qede_set_v6_tuple_to_profile(t, &zero_addr, extack); } static int -qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *t) +qede_flow_parse_v4_common(struct flow_rule *rule, + struct qede_arfs_tuple *t, + struct netlink_ext_ack *extack) { + int err; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); if ((match.key->src && match.mask->src != htonl(U32_MAX)) || (match.key->dst && match.mask->dst != htonl(U32_MAX))) { - DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); + NL_SET_ERR_MSG_MOD(extack, + "Do not support ipv4 prefix/masks"); return -EINVAL; } @@ -1770,55 +1779,57 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, t->dst_ipv4 = match.key->dst; } - if (qede_flow_parse_ports(edev, rule, t)) - return -EINVAL; + err = qede_flow_parse_ports(rule, t, extack); + if (err) + return err; - return qede_set_v4_tuple_to_profile(edev, t); + return qede_set_v4_tuple_to_profile(t, extack); } static int -qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *tuple) +qede_flow_parse_tcp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple, + struct netlink_ext_ack *extack) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_flow_parse_v6_common(edev, rule, tuple); + return qede_flow_parse_v6_common(rule, tuple, extack); } static int -qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *tuple) +qede_flow_parse_tcp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple, + struct netlink_ext_ack *extack) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IP); - return qede_flow_parse_v4_common(edev, rule, tuple); + return qede_flow_parse_v4_common(rule, tuple, extack); } static int -qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *tuple) +qede_flow_parse_udp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple, + struct netlink_ext_ack *extack) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_flow_parse_v6_common(edev, rule, tuple); + return qede_flow_parse_v6_common(rule, tuple, extack); } static int -qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, - struct qede_arfs_tuple *tuple) +qede_flow_parse_udp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple, + struct netlink_ext_ack *extack) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IP); - return qede_flow_parse_v4_common(edev, rule, tuple); + return qede_flow_parse_v4_common(rule, tuple, extack); } static int -qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, - struct flow_rule *rule, struct qede_arfs_tuple *tuple) +qede_parse_flow_attr(__be16 proto, struct flow_rule *rule, + struct qede_arfs_tuple *tuple, + struct netlink_ext_ack *extack) { struct flow_dissector *dissector = rule->match.dissector; int rc = -EINVAL; @@ -1832,14 +1843,18 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) { - DP_NOTICE(edev, "Unsupported key set:0x%llx\n", - dissector->used_keys); + NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported key used: 0x%llx", + dissector->used_keys); return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) { - DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto); + NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported proto=0x%x", + proto); return -EPROTONOSUPPORT; } @@ -1851,15 +1866,15 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, } if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) - rc = qede_flow_parse_tcp_v4(edev, rule, tuple); + rc = qede_flow_parse_tcp_v4(rule, tuple, extack); else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) - rc = qede_flow_parse_tcp_v6(edev, rule, tuple); + rc = qede_flow_parse_tcp_v6(rule, tuple, extack); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) - rc = qede_flow_parse_udp_v4(edev, rule, tuple); + rc = qede_flow_parse_udp_v4(rule, tuple, extack); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) - rc = qede_flow_parse_udp_v6(edev, rule, tuple); + rc = qede_flow_parse_udp_v6(rule, tuple, extack); else - DP_NOTICE(edev, "Invalid protocol request\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid protocol request"); return rc; } @@ -1867,9 +1882,10 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f) { + struct netlink_ext_ack *extack = f->common.extack; struct qede_arfs_fltr_node *n; - int min_hlen, rc = -EINVAL; struct qede_arfs_tuple t; + int min_hlen, rc; __qede_lock(edev); @@ -1879,7 +1895,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse flower attribute and prepare filter */ - if (qede_parse_flow_attr(edev, proto, f->rule, &t)) + rc = qede_parse_flow_attr(proto, f->rule, &t, extack); + if (rc) goto unlock; /* Validate profile mode and number of filters */ @@ -1888,11 +1905,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, DP_NOTICE(edev, "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n", t.mode, edev->arfs->mode, edev->arfs->filter_count); + rc = -EINVAL; goto unlock; } /* parse tc actions and get the vf_id */ - if (qede_parse_actions(edev, &f->rule->action, f->common.extack)) + rc = qede_parse_actions(edev, &f->rule->action, extack); + if (rc) goto unlock; if (qede_flow_find_fltr(edev, &t)) { @@ -1938,8 +1957,11 @@ unlock: static int qede_flow_spec_validate(struct qede_dev *edev, struct flow_action *flow_action, struct qede_arfs_tuple *t, - __u32 location) + __u32 location, + struct netlink_ext_ack *extack) { + int err; + if (location >= QEDE_RFS_MAX_FLTR) { DP_INFO(edev, "Location out-of-bounds\n"); return -EINVAL; @@ -1960,8 +1982,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev, return -EINVAL; } - if (qede_parse_actions(edev, flow_action, NULL)) - return -EINVAL; + err = qede_parse_actions(edev, flow_action, extack); + if (err) + return err; return 0; } @@ -1972,11 +1995,13 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev, { struct ethtool_rx_flow_spec_input input = {}; struct ethtool_rx_flow_rule *flow; + struct netlink_ext_ack extack; __be16 proto; - int err = 0; + int err; - if (qede_flow_spec_validate_unused(edev, fs)) - return -EOPNOTSUPP; + err = qede_flow_spec_validate_unused(edev, fs); + if (err) + return err; switch ((fs->flow_type & ~FLOW_EXT)) { case TCP_V4_FLOW: @@ -1998,15 +2023,16 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev, if (IS_ERR(flow)) return PTR_ERR(flow); - if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { - err = -EINVAL; + err = qede_parse_flow_attr(proto, flow->rule, t, &extack); + if (err) goto err_out; - } /* Make sure location is valid and filter isn't already set */ err = qede_flow_spec_validate(edev, &flow->rule->action, t, - fs->location); + fs->location, &extack); err_out: + if (extack._msg) + DP_NOTICE(edev, "%s\n", extack._msg); ethtool_rx_flow_rule_destroy(flow); return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 4b8bc46f55c2..ae4ee0326ee1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1015,7 +1015,7 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); if (!rc) - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); return rc; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 4c06f55878de..99d4647bf245 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -216,7 +216,7 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu) netif_dbg(adpt, hw, adpt->netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) return emac_reinit_locked(adpt); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 9d2a9562c96f..f1e40aade127 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -90,7 +90,7 @@ static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) new_mtu > (priv->real_dev->mtu - headroom)) return -EINVAL; - rmnet_dev->mtu = new_mtu; + WRITE_ONCE(rmnet_dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index f5786d78ed23..5652da8a178c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1277,14 +1277,14 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) /* if network interface not up, no need for complexity */ if (!netif_running(dev)) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); cp_set_rxbufsize(cp); /* set new rx buf size */ return 0; } /* network IS up, close it, reset MTU, and come up again. */ cp_close(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); cp_set_rxbufsize(cp); return cp_open(dev); } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0fc5fe564ae5..5abbea91bc07 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2229,6 +2229,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) * the wild. Let's disable detection. * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, */ + /* Realtek calls it RTL8168M, but it's handled like RTL8168H */ + { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 }, /* 8168G family. */ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, @@ -3922,7 +3924,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { struct rtl8169_private *tp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); netdev_update_features(dev); rtl_jumbo_config(tp); rtl_set_eee_txidle_timer(tp); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9b1f639f64a1..4d100283c30f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2423,7 +2423,7 @@ static int ravb_change_mtu(struct net_device *ndev, int new_mtu) { struct ravb_private *priv = netdev_priv(ndev); - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); if (netif_running(ndev)) { synchronize_irq(priv->emac_irq); @@ -2564,6 +2564,7 @@ static int ravb_mdio_init(struct ravb_private *priv) { struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; + struct device_node *mdio_node; struct phy_device *phydev; struct device_node *pn; int error; @@ -2583,7 +2584,13 @@ static int ravb_mdio_init(struct ravb_private *priv) pdev->name, pdev->id); /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) { + /* backwards compatibility for DT lacking mdio subnode */ + mdio_node = of_node_get(dev->of_node); + } + error = of_mdiobus_register(priv->mii_bus, mdio_node); + of_node_put(mdio_node); if (error) goto out_free_bus; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 0786eb0da391..7a25903e35c3 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2624,7 +2624,7 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu) if (netif_running(ndev)) return -EBUSY; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); netdev_update_features(ndev); return 0; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 755db89db909..e097ce3e69ea 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1967,7 +1967,7 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) rocker_port_stop(dev); netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); if (err) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index ecbe3994f2b1..12c8396b6942 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1804,7 +1804,7 @@ static int sxgbe_set_features(struct net_device *dev, */ static int sxgbe_change_mtu(struct net_device *dev, int new_mtu) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!netif_running(dev)) return 0; diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 551f890db90a..4ebd5ae23eca 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -302,7 +302,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu) efx_stop_all(efx); mutex_lock(&efx->mac_lock); - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); efx_mac_reconfigure(efx, true); mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 1cb32aedd89c..8925745f1c17 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2125,7 +2125,7 @@ static int ef4_change_mtu(struct net_device *net_dev, int new_mtu) ef4_stop_all(efx); mutex_lock(&efx->mac_lock); - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); ef4_mac_reconfigure(efx); mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index 88e5bc347a44..cf195162e270 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -306,7 +306,7 @@ int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu) efx_siena_stop_all(efx); mutex_lock(&efx->mac_lock); - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); efx_siena_mac_reconfigure(efx, true); mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 82e8891a619a..9d140203e273 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -273,11 +273,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG; match->mask.ip_firstfrag = true; } - if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x", - fm.mask->flags); + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT | + FLOW_DIS_FIRST_FRAG, + fm.mask->flags, extack)) return -EOPNOTSUPP; - } } if (dissector->used_keys & ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig index 775d76d9890e..7e498bdbca73 100644 --- a/drivers/net/ethernet/sis/Kconfig +++ b/drivers/net/ethernet/sis/Kconfig @@ -19,7 +19,7 @@ if NET_VENDOR_SIS config SIS900 tristate "SiS 900/7016 PCI Fast Ethernet Adapter support" - depends on PCI + depends on PCI && HAS_IOPORT select CRC32 select MII help @@ -35,7 +35,7 @@ config SIS900 config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" - depends on PCI + depends on PCI && HAS_IOPORT select CRC32 select MII help diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cb7fec226cab..85b850372efe 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2273,7 +2273,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map) * (which seems to be different from the ifport(pcmcia) definition) */ switch(map->port){ case IF_PORT_UNKNOWN: /* use auto here */ - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); /* we are going to change the media type, so the Link * will be temporary down and we need to reflect that * here. When the Link comes up again, it will be @@ -2294,7 +2294,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map) break; case IF_PORT_10BASET: /* 10BaseT */ - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); /* we are going to change the media type, so the Link * will be temporary down and we need to reflect that @@ -2315,7 +2315,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map) case IF_PORT_100BASET: /* 100BaseT */ case IF_PORT_100BASETX: /* 100BaseTx */ - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); /* we are going to change the media type, so the Link * will be temporary down and we need to reflect that diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 5f22a8a4d27b..13ce9086a9ca 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -54,7 +54,7 @@ config SMC91X config PCMCIA_SMC91C92 tristate "SMC 91Cxx PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT select CRC32 select MII help diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 29bb19f42de9..86e3ec25df07 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1595,7 +1595,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map) return -EOPNOTSUPP; else if (map->port > 2) return -EINVAL; - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); smc_reset(dev); } diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 46eee747c699..45ef5ac0788a 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l) writew(*wp++, a); } -#define SMC_inw(a, r) _swapw(readw((a) + (r))) -#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r)) +#define SMC_inw(a, r) ioread16be((a) + (r)) +#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r)) #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 4ec61f1ee71a..05cc07b8f48c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -142,6 +142,18 @@ config DWMAC_ROCKCHIP This selects the Rockchip RK3288 SoC glue layer support for the stmmac device driver. +config DWMAC_RZN1 + tristate "Renesas RZ/N1 dwmac support" + default ARCH_RZN1 + depends on OF && (ARCH_RZN1 || COMPILE_TEST) + select PCS_RZN1_MIIC + help + Support for Ethernet controller on Renesas RZ/N1 SoC family. + + This selects the Renesas RZ/N1 SoC glue layer support for + the stmmac device driver. This support can make use of a custom MII + converter PCS device. + config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_INTEL_SOCFPGA diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 26cad4344701..c2f0e91f6bf8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o +obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 3b7d4ac1e7be..9cd62b2110a1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -594,7 +594,7 @@ struct mac_device_info { const struct stmmac_mmc_ops *mmc; const struct stmmac_est_ops *est; struct dw_xpcs *xpcs; - struct phylink_pcs *lynx_pcs; /* Lynx external PCS */ + struct phylink_pcs *phylink_pcs; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; void __iomem *pcsr; /* vpointer to device CSRs */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 281687d7083b..4ba15873d5b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -171,6 +171,9 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: div = get_clk_div_rgmii(gmac, speed); clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); @@ -410,6 +413,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val |= NSS_COMMON_GMAC_CTL_CSYS_REQ; switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; case PHY_INTERFACE_MODE_SGMII: @@ -425,6 +431,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; @@ -442,6 +451,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); break; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 382e8de1255d..7ae04d8d291c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -12,10 +12,8 @@ #include <linux/clk.h> #include <linux/phy.h> #include <linux/of_net.h> -#include <linux/gpio.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c new file mode 100644 index 000000000000..848cf3c01f4a --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024 Schneider-Electric + * + * Clément Léger <[email protected]> + */ + +#include <linux/of.h> +#include <linux/pcs-rzn1-miic.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> + +#include "stmmac_platform.h" +#include "stmmac.h" + +static int rzn1_dwmac_pcs_init(struct stmmac_priv *priv) +{ + struct device_node *np = priv->device->of_node; + struct device_node *pcs_node; + struct phylink_pcs *pcs; + + pcs_node = of_parse_phandle(np, "pcs-handle", 0); + + if (pcs_node) { + pcs = miic_create(priv->device, pcs_node); + of_node_put(pcs_node); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); + + priv->hw->phylink_pcs = pcs; + } + + return 0; +} + +static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv) +{ + if (priv->hw->phylink_pcs) + miic_destroy(priv->hw->phylink_pcs); +} + +static int rzn1_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct device *dev = &pdev->dev; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->bsp_priv = plat_dat; + plat_dat->pcs_init = rzn1_dwmac_pcs_init; + plat_dat->pcs_exit = rzn1_dwmac_pcs_exit; + + ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res); + if (ret) + return ret; + + return 0; +} + +static const struct of_device_id rzn1_dwmac_match[] = { + { .compatible = "renesas,rzn1-gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, rzn1_dwmac_match); + +static struct platform_driver rzn1_dwmac_driver = { + .probe = rzn1_dwmac_probe, + .remove_new = stmmac_pltfr_remove, + .driver = { + .name = "rzn1-dwmac", + .of_match_table = rzn1_dwmac_match, + }, +}; +module_platform_driver(rzn1_dwmac_driver); + +MODULE_AUTHOR("Clément Léger <[email protected]>"); +MODULE_DESCRIPTION("Renesas RZN1 DWMAC specific glue layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 68f85e4605cb..b3d45f9dfb55 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -379,6 +379,56 @@ static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac) return 0; } +static int socfpga_dwmac_pcs_init(struct stmmac_priv *priv) +{ + struct socfpga_dwmac *dwmac = priv->plat->bsp_priv; + struct regmap_config pcs_regmap_cfg = { + .reg_bits = 16, + .val_bits = 16, + .reg_shift = REGMAP_UPSHIFT(1), + }; + struct mdio_regmap_config mrc; + struct regmap *pcs_regmap; + struct phylink_pcs *pcs; + struct mii_bus *pcs_bus; + + if (!dwmac->tse_pcs_base) + return 0; + + pcs_regmap = devm_regmap_init_mmio(priv->device, dwmac->tse_pcs_base, + &pcs_regmap_cfg); + if (IS_ERR(pcs_regmap)) + return PTR_ERR(pcs_regmap); + + memset(&mrc, 0, sizeof(mrc)); + mrc.regmap = pcs_regmap; + mrc.parent = priv->device; + mrc.valid_addr = 0x0; + mrc.autoscan = false; + + /* Can't use ndev->name here because it will not have been initialised, + * and in any case, the user can rename network interfaces at runtime. + */ + snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", + dev_name(priv->device)); + pcs_bus = devm_mdio_regmap_register(priv->device, &mrc); + if (IS_ERR(pcs_bus)) + return PTR_ERR(pcs_bus); + + pcs = lynx_pcs_create_mdiodev(pcs_bus, 0); + if (IS_ERR(pcs)) + return PTR_ERR(pcs); + + priv->hw->phylink_pcs = pcs; + return 0; +} + +static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv) +{ + if (priv->hw->phylink_pcs) + lynx_pcs_destroy(priv->hw->phylink_pcs); +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -426,6 +476,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) dwmac->ops = ops; plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; + plat_dat->pcs_init = socfpga_dwmac_pcs_init; + plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) @@ -444,48 +496,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) if (ret) goto err_dvr_remove; - /* Create a regmap for the PCS so that it can be used by the PCS driver, - * if we have such a PCS - */ - if (dwmac->tse_pcs_base) { - struct regmap_config pcs_regmap_cfg; - struct mdio_regmap_config mrc; - struct regmap *pcs_regmap; - struct mii_bus *pcs_bus; - - memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg)); - memset(&mrc, 0, sizeof(mrc)); - - pcs_regmap_cfg.reg_bits = 16; - pcs_regmap_cfg.val_bits = 16; - pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1); - - pcs_regmap = devm_regmap_init_mmio(&pdev->dev, dwmac->tse_pcs_base, - &pcs_regmap_cfg); - if (IS_ERR(pcs_regmap)) { - ret = PTR_ERR(pcs_regmap); - goto err_dvr_remove; - } - - mrc.regmap = pcs_regmap; - mrc.parent = &pdev->dev; - mrc.valid_addr = 0x0; - mrc.autoscan = false; - - snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name); - pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc); - if (IS_ERR(pcs_bus)) { - ret = PTR_ERR(pcs_bus); - goto err_dvr_remove; - } - - stpriv->hw->lynx_pcs = lynx_pcs_create_mdiodev(pcs_bus, 0); - if (IS_ERR(stpriv->hw->lynx_pcs)) { - ret = PTR_ERR(stpriv->hw->lynx_pcs); - goto err_dvr_remove; - } - } - return 0; err_dvr_remove: @@ -494,17 +504,6 @@ err_dvr_remove: return ret; } -static void socfpga_dwmac_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - struct phylink_pcs *pcs = priv->hw->lynx_pcs; - - stmmac_pltfr_remove(pdev); - - lynx_pcs_destroy(pcs); -} - #ifdef CONFIG_PM_SLEEP static int socfpga_dwmac_resume(struct device *dev) { @@ -576,7 +575,7 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match); static struct platform_driver socfpga_dwmac_driver = { .probe = socfpga_dwmac_probe, - .remove_new = socfpga_dwmac_remove, + .remove_new = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", .pm = &socfpga_dwmac_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a38226d7cc6a..b25774d69195 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -68,7 +68,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, init_waitqueue_head(&priv->tstamp_busy_wait); } -static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) +static void dwmac4_update_caps(struct stmmac_priv *priv) { if (priv->plat->tx_queues_to_use > 1) priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); @@ -1190,7 +1190,7 @@ static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw) const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1235,7 +1235,7 @@ const struct stmmac_ops dwmac4_ops = { const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1284,7 +1284,7 @@ const struct stmmac_ops dwmac410_ops = { const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 7be04b54738b..90384db228b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -308,8 +308,8 @@ struct stmmac_est; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); - /* Get phylink capabilities */ - void (*phylink_get_caps)(struct stmmac_priv *priv); + /* Update MAC capabilities */ + void (*update_caps)(struct stmmac_priv *priv); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -430,8 +430,8 @@ struct stmmac_ops { #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) -#define stmmac_mac_phylink_get_caps(__priv) \ - stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) +#define stmmac_mac_update_caps(__priv) \ + stmmac_do_void_callback(__priv, mac, update_caps, __priv) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index dddcaa9220cc..b23b920eedb1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -221,6 +221,20 @@ struct stmmac_dma_conf { unsigned int dma_tx_size; }; +#define EST_GCL 1024 +struct stmmac_est { + int enable; + u32 btr_reserve[2]; + u32 btr_offset[2]; + u32 btr[2]; + u32 ctr[2]; + u32 ter; + u32 gcl_unaligned[EST_GCL]; + u32 gcl[EST_GCL]; + u32 gcl_size; + u32 max_sdu[MTL_MAX_TX_QUEUES]; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; @@ -261,6 +275,9 @@ struct stmmac_priv { struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp; struct stmmac_safety_stats sstats; struct plat_stmmacenet_data *plat; + /* Protect est parameters */ + struct mutex est_lock; + struct stmmac_est *est; struct dma_features dma_cap; struct stmmac_counters mmc; int hw_cap_support; @@ -360,7 +377,8 @@ enum stmmac_state { int stmmac_mdio_unregister(struct net_device *ndev); int stmmac_mdio_register(struct net_device *ndev); int stmmac_mdio_reset(struct mii_bus *mii); -int stmmac_xpcs_setup(struct mii_bus *mii); +int stmmac_pcs_setup(struct net_device *ndev); +void stmmac_pcs_clean(struct net_device *ndev); void stmmac_set_ethtool_ops(struct net_device *netdev); int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7c6fb14b5555..2e9a2da605f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -936,6 +936,22 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) priv->pause, tx_cnt); } +static unsigned long stmmac_mac_get_caps(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + /* Refresh the MAC-specific capabilities */ + stmmac_mac_update_caps(priv); + + config->mac_capabilities = priv->hw->link.caps; + + if (priv->plat->max_speed) + phylink_limit_mac_speed(config, priv->plat->max_speed); + + return config->mac_capabilities; +} + static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { @@ -944,10 +960,7 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, if (priv->hw->xpcs) return &priv->hw->xpcs->pcs; - if (priv->hw->lynx_pcs) - return priv->hw->lynx_pcs; - - return NULL; + return priv->hw->phylink_pcs; } static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, @@ -1105,6 +1118,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, @@ -1204,12 +1218,14 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; struct phylink *phylink; - int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; priv->phylink_config.mac_managed_pm = true; + /* Stmmac always requires an RX clock for hardware initialization */ + priv->phylink_config.mac_requires_rxc = true; + mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) priv->phylink_config.ovr_an_inband = @@ -1225,15 +1241,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) xpcs_get_interfaces(priv->hw->xpcs, priv->phylink_config.supported_interfaces); - /* Get the MAC specific capabilities */ - stmmac_mac_phylink_get_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -2491,9 +2498,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) if (!xsk_tx_peek_desc(pool, &xdp_desc)) break; - if (priv->plat->est && priv->plat->est->enable && - priv->plat->est->max_sdu[queue] && - xdp_desc.len > priv->plat->est->max_sdu[queue]) { + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + xdp_desc.len > priv->est->max_sdu[queue]) { priv->xstats.max_sdu_txq_drop[queue]++; continue; } @@ -3396,6 +3403,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) u32 chan; int ret; + /* Make sure RX clock is enabled */ + if (priv->hw->phylink_pcs) + phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); + /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { @@ -3945,8 +3956,7 @@ static int __stmmac_open(struct net_device *dev, if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI && (!priv->hw->xpcs || - xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && - !priv->hw->lynx_pcs) { + xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { ret = stmmac_init_phy(dev); if (ret) { netdev_err(priv->dev, @@ -4528,9 +4538,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return stmmac_tso_xmit(skb, dev); } - if (priv->plat->est && priv->plat->est->enable && - priv->plat->est->max_sdu[queue] && - skb->len > priv->plat->est->max_sdu[queue]){ + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + skb->len > priv->est->max_sdu[queue]){ priv->xstats.max_sdu_txq_drop[queue]++; goto max_sdu_err; } @@ -4909,9 +4919,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED; - if (priv->plat->est && priv->plat->est->enable && - priv->plat->est->max_sdu[queue] && - xdpf->len > priv->plat->est->max_sdu[queue]) { + if (priv->est && priv->est->enable && + priv->est->max_sdu[queue] && + xdpf->len > priv->est->max_sdu[queue]) { priv->xstats.max_sdu_txq_drop[queue]++; return STMMAC_XDP_CONSUMED; } @@ -5094,9 +5104,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; - skb = __napi_alloc_skb(&ch->rxtx_napi, - xdp->data_end - xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start); if (unlikely(!skb)) return NULL; @@ -5900,7 +5909,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) stmmac_set_rx_mode(dev); } - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); netdev_update_features(dev); return 0; @@ -7327,7 +7336,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) { struct stmmac_priv *priv = netdev_priv(dev); int ret = 0, i; - int max_speed; if (netif_running(dev)) stmmac_release(dev); @@ -7341,14 +7349,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); - stmmac_mac_phylink_get_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - stmmac_napi_add(dev); if (netif_running(dev)) @@ -7754,11 +7754,9 @@ int stmmac_dvr_probe(struct device *device, if (priv->plat->speed_mode_2500) priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); - if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { - ret = stmmac_xpcs_setup(priv->mii); - if (ret) - goto error_xpcs_setup; - } + ret = stmmac_pcs_setup(ndev); + if (ret) + goto error_pcs_setup; ret = stmmac_phy_setup(priv); if (ret) { @@ -7789,8 +7787,9 @@ int stmmac_dvr_probe(struct device *device, error_netdev_register: phylink_destroy(priv->phylink); -error_xpcs_setup: error_phy_setup: + stmmac_pcs_clean(ndev); +error_pcs_setup: if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); @@ -7832,6 +7831,9 @@ void stmmac_dvr_remove(struct device *dev) if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); reset_control_assert(priv->plat->stmmac_ahb_rst); + + stmmac_pcs_clean(ndev); + if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0542cfd1817e..aa43117134d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -495,34 +495,57 @@ int stmmac_mdio_reset(struct mii_bus *bus) return 0; } -int stmmac_xpcs_setup(struct mii_bus *bus) +int stmmac_pcs_setup(struct net_device *ndev) { - struct net_device *ndev = bus->priv; + struct dw_xpcs *xpcs = NULL; struct stmmac_priv *priv; - struct dw_xpcs *xpcs; + int ret = -ENODEV; int mode, addr; priv = netdev_priv(ndev); mode = priv->plat->phy_interface; - /* Try to probe the XPCS by scanning all addresses. */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - xpcs = xpcs_create_mdiodev(bus, addr, mode); - if (IS_ERR(xpcs)) - continue; - - priv->hw->xpcs = xpcs; - break; + if (priv->plat->pcs_init) { + ret = priv->plat->pcs_init(priv); + } else if (priv->plat->mdio_bus_data && + priv->plat->mdio_bus_data->has_xpcs) { + /* Try to probe the XPCS by scanning all addresses */ + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); + if (IS_ERR(xpcs)) + continue; + + ret = 0; + break; + } + } else { + return 0; } - if (!priv->hw->xpcs) { + if (ret) { dev_warn(priv->device, "No xPCS found\n"); - return -ENODEV; + return ret; } + priv->hw->xpcs = xpcs; + return 0; } +void stmmac_pcs_clean(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + if (priv->plat->pcs_exit) + priv->plat->pcs_exit(priv); + + if (!priv->hw->xpcs) + return; + + xpcs_destroy(priv->hw->xpcs); + priv->hw->xpcs = NULL; +} + /** * stmmac_mdio_register * @ndev: net device structure @@ -679,9 +702,6 @@ int stmmac_mdio_unregister(struct net_device *ndev) if (!priv->mii) return 0; - if (priv->hw->xpcs) - xpcs_destroy(priv->hw->xpcs); - mdiobus_unregister(priv->mii); priv->mii->priv = NULL; mdiobus_free(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index e04830a3a1fb..a6b1de9a251d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -68,13 +68,13 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) nsec = reminder; /* If EST is enabled, disabled it before adjust ptp time. */ - if (priv->plat->est && priv->plat->est->enable) { + if (priv->est && priv->est->enable) { est_rst = true; - mutex_lock(&priv->plat->est->lock); - priv->plat->est->enable = false; - stmmac_est_configure(priv, priv, priv->plat->est, + mutex_lock(&priv->est_lock); + priv->est->enable = false; + stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); } write_lock_irqsave(&priv->ptp_lock, flags); @@ -87,24 +87,24 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) ktime_t current_time_ns, basetime; u64 cycle_time; - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); - time.tv_nsec = priv->plat->est->btr_reserve[0]; - time.tv_sec = priv->plat->est->btr_reserve[1]; + time.tv_nsec = priv->est->btr_reserve[0]; + time.tv_sec = priv->est->btr_reserve[1]; basetime = timespec64_to_ktime(time); - cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC + - priv->plat->est->ctr[0]; + cycle_time = (u64)priv->est->ctr[1] * NSEC_PER_SEC + + priv->est->ctr[0]; time = stmmac_calc_tas_basetime(basetime, current_time_ns, cycle_time); - priv->plat->est->btr[0] = (u32)time.tv_nsec; - priv->plat->est->btr[1] = (u32)time.tv_sec; - priv->plat->est->enable = true; - ret = stmmac_est_configure(priv, priv, priv->plat->est, + priv->est->btr[0] = (u32)time.tv_nsec; + priv->est->btr[1] = (u32)time.tv_sec; + priv->est->enable = true; + ret = stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); if (ret) netdev_err(priv->dev, "failed to configure EST\n"); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index cce00719937d..222540b55480 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -918,7 +918,6 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { - struct plat_stmmacenet_data *plat = priv->plat; u32 num_tc = qopt->mqprio.qopt.num_tc; u32 offset, count, i, j; @@ -933,7 +932,7 @@ static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv, count = qopt->mqprio.qopt.count[i]; for (j = offset; j < offset + count; j++) - plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN; + priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN; } } @@ -941,7 +940,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; - struct plat_stmmacenet_data *plat = priv->plat; struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; bool fpe = false; @@ -998,23 +996,25 @@ static int tc_taprio_configure(struct stmmac_priv *priv, if (qopt->cycle_time_extension >= BIT(wid + 7)) return -ERANGE; - if (!plat->est) { - plat->est = devm_kzalloc(priv->device, sizeof(*plat->est), + if (!priv->est) { + priv->est = devm_kzalloc(priv->device, sizeof(*priv->est), GFP_KERNEL); - if (!plat->est) + if (!priv->est) return -ENOMEM; - mutex_init(&priv->plat->est->lock); + mutex_init(&priv->est_lock); } else { - memset(plat->est, 0, sizeof(*plat->est)); + mutex_lock(&priv->est_lock); + memset(priv->est, 0, sizeof(*priv->est)); + mutex_unlock(&priv->est_lock); } size = qopt->num_entries; - mutex_lock(&priv->plat->est->lock); - priv->plat->est->gcl_size = size; - priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE; - mutex_unlock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); + priv->est->gcl_size = size; + priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE; + mutex_unlock(&priv->est_lock); for (i = 0; i < size; i++) { s64 delta_ns = qopt->entries[i].interval; @@ -1042,33 +1042,33 @@ static int tc_taprio_configure(struct stmmac_priv *priv, return -EOPNOTSUPP; } - priv->plat->est->gcl[i] = delta_ns | (gates << wid); + priv->est->gcl[i] = delta_ns | (gates << wid); } - mutex_lock(&priv->plat->est->lock); + mutex_lock(&priv->est_lock); /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, qopt->cycle_time); - priv->plat->est->btr[0] = (u32)time.tv_nsec; - priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->est->btr[0] = (u32)time.tv_nsec; + priv->est->btr[1] = (u32)time.tv_sec; qopt_time = ktime_to_timespec64(qopt->base_time); - priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; - priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec; ctr = qopt->cycle_time; - priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); - priv->plat->est->ctr[1] = (u32)ctr; + priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); + priv->est->ctr[1] = (u32)ctr; - priv->plat->est->ter = qopt->cycle_time_extension; + priv->est->ter = qopt->cycle_time_extension; tc_taprio_map_maxsdu_txq(priv, qopt); if (fpe && !priv->dma_cap.fpesel) { - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); return -EOPNOTSUPP; } @@ -1077,9 +1077,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv, */ priv->plat->fpe_cfg->enable = fpe; - ret = stmmac_est_configure(priv, priv, priv->plat->est, + ret = stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); if (ret) { netdev_err(priv->dev, "failed to configure EST\n"); goto disable; @@ -1095,17 +1095,17 @@ static int tc_taprio_configure(struct stmmac_priv *priv, return 0; disable: - if (priv->plat->est) { - mutex_lock(&priv->plat->est->lock); - priv->plat->est->enable = false; - stmmac_est_configure(priv, priv, priv->plat->est, + if (priv->est) { + mutex_lock(&priv->est_lock); + priv->est->enable = false; + stmmac_est_configure(priv, priv, priv->est, priv->plat->clk_ptp_rate); /* Reset taprio status */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { priv->xstats.max_sdu_txq_drop[i] = 0; priv->xstats.mtl_est_txq_hlbf[i] = 0; } - mutex_unlock(&priv->plat->est->lock); + mutex_unlock(&priv->est_lock); } priv->plat->fpe_cfg->enable = false; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index bfb903506367..b8948d5b779a 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -73,6 +73,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> +#include <linux/skbuff_ref.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> @@ -3803,7 +3804,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu) { struct cas *cp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!netif_running(dev) || !netif_device_present(dev)) return 0; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f68aa813d4fb..41a27ae58ced 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6751,7 +6751,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu) orig_jumbo = (dev->mtu > ETH_DATA_LEN); new_jumbo = (new_mtu > ETH_DATA_LEN); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (!netif_running(dev) || (orig_jumbo == new_jumbo)) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 9bd1df8308d2..3e5f9b17c777 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void gem_poll_controller(struct net_device *dev) -{ - struct gem *gp = netdev_priv(dev); - - disable_irq(gp->pdev->irq); - gem_interrupt(gp->pdev->irq, dev); - enable_irq(gp->pdev->irq); -} -#endif - static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct gem *gp = netdev_priv(dev); @@ -2499,7 +2488,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) { struct gem *gp = netdev_priv(dev); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) @@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = { .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = gem_poll_controller, -#endif }; static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 36b948820c1e..d1793b6154c7 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -823,7 +823,7 @@ static int xlgmac_change_mtu(struct net_device *netdev, int mtu) return ret; pdata->rx_buf_size = ret; - netdev->mtu = mtu; + WRITE_ONCE(netdev->mtu, mtu); xlgmac_restart_dev(pdata); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index ca409515ead5..ede5f7890fb4 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -756,7 +756,7 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu) { ENTER; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); if (netif_running(ndev)) { bdx_close(ndev); bdx_open(ndev); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1530d13984d4..1729eb0e0b41 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -167,7 +167,7 @@ config TI_KEYSTONE_NETCP_ETHSS config TLAN tristate "TI ThunderLAN support" - depends on (PCI || EISA) + depends on (PCI || EISA) && HAS_IOPORT help If you have a PCI Ethernet network card based on the ThunderLAN chip which is supported by this driver, say Y here. @@ -198,6 +198,21 @@ config TI_ICSSG_PRUETH to support the Ethernet operation. Currently, it supports Ethernet with 1G and 100M link speed. +config TI_ICSSG_PRUETH_SR1 + tristate "TI Gigabit PRU SR1.0 Ethernet driver" + select PHYLIB + select TI_ICSS_IEP + select TI_K3_CPPI_DESC_POOL + depends on PRU_REMOTEPROC + depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER + help + Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. + This subsystem is available on the AM65 SR1.0 platform. + + This driver requires firmware binaries which will run on the PRUs + to support the Ethernet operation. Currently, it supports Ethernet + with 1G, 100M and 10M link speed. + config TI_ICSS_IEP tristate "TI PRU ICSS IEP driver" depends on PTP_1588_CLOCK_OPTIONAL diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index d8590304f3df..6e086b4c0384 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -33,10 +33,19 @@ obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg-prueth-y := icssg/icssg_prueth.o \ + icssg/icssg_common.o \ icssg/icssg_classifier.o \ icssg/icssg_queues.o \ icssg/icssg_config.o \ icssg/icssg_mii_cfg.o \ icssg/icssg_stats.o \ icssg/icssg_ethtool.o +obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o +icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o \ + icssg/icssg_common.o \ + icssg/icssg_classifier.o \ + icssg/icssg_config.o \ + icssg/icssg_mii_cfg.o \ + icssg/icssg_stats.o \ + icssg/icssg_ethtool.o obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index d6ce2c9f0a8d..a1d0935d1ebe 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -695,6 +695,17 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); + unsigned int ptp_v2_filter; + + ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) return ethtool_op_get_ts_info(ndev, info); @@ -708,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = am65_cpts_phc_index(common->cpts); info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter; return 0; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1d00e21808c1..4e50b3792888 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -5,6 +5,7 @@ * */ +#include <linux/bpf_trace.h> #include <linux/clk.h> #include <linux/etherdevice.h> #include <linux/if_vlan.h> @@ -30,6 +31,7 @@ #include <linux/sys_soc.h> #include <linux/dma/ti-cppi5.h> #include <linux/dma/k3-udma-glue.h> +#include <net/page_pool/helpers.h> #include <net/switchdev.h> #include "cpsw_ale.h" @@ -101,6 +103,12 @@ #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 +#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0) +#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1) +#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2) +#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3) +#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9) + /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 @@ -124,6 +132,11 @@ AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) +#define AM65_CPSW_TS_RX_ANX_ALL_EN \ + (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \ + AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \ + AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN) + #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 /* Number of TX/RX descriptors */ #define AM65_CPSW_MAX_TX_DESC 500 @@ -138,6 +151,18 @@ #define AM65_CPSW_DEFAULT_TX_CHNS 8 +/* CPPI streaming packet interface */ +#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF +#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 + +/* XDP */ +#define AM65_CPSW_XDP_CONSUMED 2 +#define AM65_CPSW_XDP_REDIRECT 1 +#define AM65_CPSW_XDP_PASS 0 + +/* Include headroom compatible with both skb and xdpf */ +#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) + static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, const u8 *dev_addr) { @@ -305,12 +330,11 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, } static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, - struct sk_buff *skb) + struct page *page) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; - u32 pkt_len = skb_tailroom(skb); dma_addr_t desc_dma; dma_addr_t buf_dma; void *swdata; @@ -322,20 +346,22 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, } desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); - buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, - DMA_FROM_DEVICE); + buf_dma = dma_map_single(rx_chn->dma_dev, + page_address(page) + AM65_CPSW_HEADROOM, + AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - dev_err(dev, "Failed to map rx skb buffer\n"); + dev_err(dev, "Failed to map rx buffer\n"); return -EINVAL; } cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, AM65_CPSW_NAV_PS_DATA_SIZE); k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); - cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE, + buf_dma, AM65_CPSW_MAX_PACKET_SIZE); swdata = cppi5_hdesc_get_swdata(desc_rx); - *((void **)swdata) = skb; + *((void **)swdata) = page_address(page); return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); } @@ -369,25 +395,137 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common); static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port); static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port); +static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common) +{ + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct xdp_rxq_info *rxq; + int i; + + for (i = 0; i < common->port_num; i++) { + if (!common->ports[i].ndev) + continue; + + rxq = &common->ports[i].xdp_rxq; + + if (xdp_rxq_info_is_reg(rxq)) + xdp_rxq_info_unreg(rxq); + } + + if (rx_chn->page_pool) { + page_pool_destroy(rx_chn->page_pool); + rx_chn->page_pool = NULL; + } +} + +static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common) +{ + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP, + .order = 0, + .pool_size = AM65_CPSW_MAX_RX_DESC, + .nid = dev_to_node(common->dev), + .dev = common->dev, + .dma_dir = DMA_BIDIRECTIONAL, + .napi = &common->napi_rx, + }; + struct xdp_rxq_info *rxq; + struct page_pool *pool; + int i, ret; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rx_chn->page_pool = pool; + + for (i = 0; i < common->port_num; i++) { + if (!common->ports[i].ndev) + continue; + + rxq = &common->ports[i].xdp_rxq; + + ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0); + if (ret) + goto err; + + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + goto err; + } + + return 0; + +err: + am65_cpsw_destroy_xdp_rxqs(common); + return ret; +} + +static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool, + void *desc, + unsigned char dsize_log2) +{ + void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool); + + return (desc - pool_addr) >> dsize_log2; +} + +static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn, + struct cppi5_host_desc_t *desc, + enum am65_cpsw_tx_buf_type buf_type) +{ + int desc_idx; + + desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc, + tx_chn->dsize_log2); + k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx, + (void *)buf_type); +} + +static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn, + dma_addr_t desc_dma) +{ + struct cppi5_host_desc_t *desc_tx; + int desc_idx; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); + desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx, + tx_chn->dsize_log2); + + return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool, + desc_idx); +} + +static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn, + struct page *page, + bool allow_direct, + int desc_idx) +{ + page_pool_put_full_page(rx_chn->page_pool, page, allow_direct); + rx_chn->pages[desc_idx] = NULL; +} + static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) { struct am65_cpsw_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb; dma_addr_t buf_dma; u32 buf_dma_len; + void *page_addr; void **swdata; + int desc_idx; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; + page_addr = *swdata; cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - dev_kfree_skb_any(skb); + desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, + rx_chn->dsize_log2); + am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx); } static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, @@ -440,12 +578,32 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) dev_kfree_skb_any(skb); } +static struct sk_buff *am65_cpsw_build_skb(void *page_addr, + struct net_device *ndev, + unsigned int len) +{ + struct sk_buff *skb; + + len += AM65_CPSW_HEADROOM; + + skb = build_skb(page_addr, len); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, AM65_CPSW_HEADROOM); + skb->dev = ndev; + + return skb; +} + static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) { struct am65_cpsw_host *host_p = am65_common_get_host(common); + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; int port_idx, i, ret, tx; - struct sk_buff *skb; u32 val, port_mask; + struct page *page; if (common->usage_count) return 0; @@ -505,25 +663,29 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) am65_cpsw_qos_tx_p0_rate_init(common); - for (i = 0; i < common->rx_chns.descs_num; i++) { - skb = __netdev_alloc_skb_ip_align(NULL, - AM65_CPSW_MAX_PACKET_SIZE, - GFP_KERNEL); - if (!skb) { + ret = am65_cpsw_create_xdp_rxqs(common); + if (ret) { + dev_err(common->dev, "Failed to create XDP rx queues\n"); + return ret; + } + + for (i = 0; i < rx_chn->descs_num; i++) { + page = page_pool_dev_alloc_pages(rx_chn->page_pool); + if (!page) { ret = -ENOMEM; - dev_err(common->dev, "cannot allocate skb\n"); if (i) goto fail_rx; return ret; } + rx_chn->pages[i] = page; - ret = am65_cpsw_nuss_rx_push(common, skb); + ret = am65_cpsw_nuss_rx_push(common, page); if (ret < 0) { dev_err(common->dev, - "cannot submit skb to channel rx, error %d\n", + "cannot submit page to channel rx: %d\n", ret); - kfree_skb(skb); + am65_cpsw_put_page(rx_chn, page, false, i); if (i) goto fail_rx; @@ -531,27 +693,27 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) } } - ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); + ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn); if (ret) { dev_err(common->dev, "couldn't enable rx chn: %d\n", ret); goto fail_rx; } for (tx = 0; tx < common->tx_ch_num; tx++) { - ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn); + ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn); if (ret) { dev_err(common->dev, "couldn't enable tx chn %d: %d\n", tx, ret); tx--; goto fail_tx; } - napi_enable(&common->tx_chns[tx].napi_tx); + napi_enable(&tx_chn[tx].napi_tx); } napi_enable(&common->napi_rx); if (common->rx_irq_disabled) { common->rx_irq_disabled = false; - enable_irq(common->rx_chns.irq); + enable_irq(rx_chn->irq); } dev_dbg(common->dev, "cpsw_nuss started\n"); @@ -559,22 +721,23 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common) fail_tx: while (tx >= 0) { - napi_disable(&common->tx_chns[tx].napi_tx); - k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn); + napi_disable(&tx_chn[tx].napi_tx); + k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn); tx--; } - k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); + k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); fail_rx: - k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0, - &common->rx_chns, + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn, am65_cpsw_nuss_rx_cleanup, 0); return ret; } static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) { + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct am65_cpsw_tx_chn *tx_chn = common->tx_chns; int i; if (common->usage_count != 1) @@ -590,26 +753,25 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) reinit_completion(&common->tdown_complete); for (i = 0; i < common->tx_ch_num; i++) - k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false); + k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false); i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); if (!i) dev_err(common->dev, "tx timeout\n"); for (i = 0; i < common->tx_ch_num; i++) { - napi_disable(&common->tx_chns[i].napi_tx); - hrtimer_cancel(&common->tx_chns[i].tx_hrtimer); + napi_disable(&tx_chn[i].napi_tx); + hrtimer_cancel(&tx_chn[i].tx_hrtimer); } for (i = 0; i < common->tx_ch_num; i++) { - k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn, - &common->tx_chns[i], + k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i], am65_cpsw_nuss_tx_cleanup); - k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn); + k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn); } reinit_completion(&common->tdown_complete); - k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true); + k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true); if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) { i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000)); @@ -621,17 +783,22 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) hrtimer_cancel(&common->rx_hrtimer); for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) - k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i, - &common->rx_chns, + k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn, am65_cpsw_nuss_rx_cleanup, !!i); - k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); + k3_udma_glue_disable_rx_chn(rx_chn->rx_chn); cpsw_ale_stop(common->ale); writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); + for (i = 0; i < rx_chn->descs_num; i++) { + if (rx_chn->pages[i]) + am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i); + } + am65_cpsw_destroy_xdp_rxqs(common); + dev_dbg(common->dev, "cpsw_nuss stopped\n"); return 0; } @@ -749,16 +916,149 @@ runtime_put: return ret; } -static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata) +static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, + struct am65_cpsw_tx_chn *tx_chn, + struct xdp_frame *xdpf, + enum am65_cpsw_tx_buf_type buf_type) { - struct skb_shared_hwtstamps *ssh; - u64 ns; + struct am65_cpsw_common *common = am65_ndev_to_common(ndev); + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + struct cppi5_host_desc_t *host_desc; + struct netdev_queue *netif_txq; + dma_addr_t dma_desc, dma_buf; + u32 pkt_len = xdpf->len; + void **swdata; + int ret; + + host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (unlikely(!host_desc)) { + ndev->stats.tx_dropped++; + return -ENOMEM; + } + + am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type); - ns = ((u64)psdata[1] << 32) | psdata[0]; + dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data, + pkt_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) { + ndev->stats.tx_dropped++; + ret = -ENOMEM; + goto pool_free; + } + + cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + AM65_CPSW_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); + cppi5_hdesc_set_pktlen(host_desc, pkt_len); + cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); + cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id); + + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); + cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); + + swdata = cppi5_hdesc_get_swdata(host_desc); + *(swdata) = xdpf; + + /* Report BQL before sending the packet */ + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); + netdev_tx_sent_queue(netif_txq, pkt_len); + + dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc); + if (AM65_CPSW_IS_CPSW2G(common)) { + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, + dma_desc); + } else { + spin_lock_bh(&tx_chn->lock); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc, + dma_desc); + spin_unlock_bh(&tx_chn->lock); + } + if (ret) { + /* Inform BQL */ + netdev_tx_completed_queue(netif_txq, 1, pkt_len); + ndev->stats.tx_errors++; + goto dma_unmap; + } + + return 0; - ssh = skb_hwtstamps(skb); - memset(ssh, 0, sizeof(*ssh)); - ssh->hwtstamp = ns_to_ktime(ns); +dma_unmap: + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf); + dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE); +pool_free: + k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); + return ret; +} + +static int am65_cpsw_run_xdp(struct am65_cpsw_common *common, + struct am65_cpsw_port *port, + struct xdp_buff *xdp, + int desc_idx, int cpu, int *len) +{ + struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; + struct net_device *ndev = port->ndev; + int ret = AM65_CPSW_XDP_CONSUMED; + struct am65_cpsw_tx_chn *tx_chn; + struct netdev_queue *netif_txq; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + struct page *page; + u32 act; + + prog = READ_ONCE(port->xdp_prog); + if (!prog) + return AM65_CPSW_XDP_PASS; + + act = bpf_prog_run_xdp(prog, xdp); + /* XDP prog might have changed packet data and boundaries */ + *len = xdp->data_end - xdp->data; + + switch (act) { + case XDP_PASS: + ret = AM65_CPSW_XDP_PASS; + goto out; + case XDP_TX: + tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + break; + + __netif_tx_lock(netif_txq, cpu); + ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf, + AM65_CPSW_TX_BUF_TYPE_XDP_TX); + __netif_tx_unlock(netif_txq); + if (ret) + break; + + ndev->stats.rx_bytes += *len; + ndev->stats.rx_packets++; + ret = AM65_CPSW_XDP_CONSUMED; + goto out; + case XDP_REDIRECT: + if (unlikely(xdp_do_redirect(ndev, xdp, prog))) + break; + + ndev->stats.rx_bytes += *len; + ndev->stats.rx_packets++; + ret = AM65_CPSW_XDP_REDIRECT; + goto out; + default: + bpf_warn_invalid_xdp_action(ndev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(ndev, prog, act); + fallthrough; + case XDP_DROP: + ndev->stats.rx_dropped++; + } + + page = virt_to_head_page(xdp->data); + am65_cpsw_put_page(rx_chn, page, true, desc_idx); + +out: + return ret; } /* RX psdata[2] word format - checksum information */ @@ -795,7 +1095,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) } static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, - u32 flow_idx) + u32 flow_idx, int cpu) { struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; u32 buf_dma_len, pkt_len, port_id = 0, csum_info; @@ -803,13 +1103,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, struct am65_cpsw_ndev_stats *stats; struct cppi5_host_desc_t *desc_rx; struct device *dev = common->dev; - struct sk_buff *skb, *new_skb; + struct page *page, *new_page; dma_addr_t desc_dma, buf_dma; struct am65_cpsw_port *port; + int headroom, desc_idx, ret; struct net_device *ndev; + struct sk_buff *skb; + struct xdp_buff xdp; + void *page_addr; void **swdata; u32 *psdata; - int ret = 0; ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); if (ret) { @@ -830,7 +1133,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, __func__, flow_idx, &desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; + page_addr = *swdata; + page = virt_to_page(page_addr); cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); pkt_len = cppi5_hdesc_get_pktlen(desc_rx); @@ -838,12 +1142,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); port = am65_common_get_port(common, port_id); ndev = port->ndev; - skb->dev = ndev; - psdata = cppi5_hdesc_get_psdata(desc_rx); - /* add RX timestamp */ - if (port->rx_ts_enabled) - am65_cpsw_nuss_rx_ts(skb, psdata); csum_info = psdata[2]; dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); @@ -851,36 +1150,64 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE); - if (new_skb) { - ndev_priv = netdev_priv(ndev); - am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); - skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, ndev); - am65_cpsw_nuss_rx_csum(skb, csum_info); - napi_gro_receive(&common->napi_rx, skb); - - stats = this_cpu_ptr(ndev_priv->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += pkt_len; - u64_stats_update_end(&stats->syncp); - kmemleak_not_leak(new_skb); - } else { - ndev->stats.rx_dropped++; - new_skb = skb; + desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx, + rx_chn->dsize_log2); + + skb = am65_cpsw_build_skb(page_addr, ndev, + AM65_CPSW_MAX_PACKET_SIZE); + if (unlikely(!skb)) { + new_page = page; + goto requeue; } + if (port->xdp_prog) { + xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq); + + xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb), + pkt_len, false); + + ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx, + cpu, &pkt_len); + if (ret != AM65_CPSW_XDP_PASS) + return ret; + + /* Compute additional headroom to be reserved */ + headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb); + skb_reserve(skb, headroom); + } + + ndev_priv = netdev_priv(ndev); + am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); + skb_put(skb, pkt_len); + if (port->rx_ts_enabled) + am65_cpts_rx_timestamp(common->cpts, skb); + skb_mark_for_recycle(skb); + skb->protocol = eth_type_trans(skb, ndev); + am65_cpsw_nuss_rx_csum(skb, csum_info); + napi_gro_receive(&common->napi_rx, skb); + + stats = this_cpu_ptr(ndev_priv->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += pkt_len; + u64_stats_update_end(&stats->syncp); + + new_page = page_pool_dev_alloc_pages(rx_chn->page_pool); + if (unlikely(!new_page)) + return -ENOMEM; + rx_chn->pages[desc_idx] = new_page; + if (netif_dormant(ndev)) { - dev_kfree_skb_any(new_skb); + am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); ndev->stats.rx_dropped++; return 0; } - ret = am65_cpsw_nuss_rx_push(common, new_skb); +requeue: + ret = am65_cpsw_nuss_rx_push(common, new_page); if (WARN_ON(ret < 0)) { - dev_kfree_skb_any(new_skb); + am65_cpsw_put_page(rx_chn, new_page, true, desc_idx); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -901,6 +1228,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) { struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); int flow = AM65_CPSW_MAX_RX_FLOWS; + int cpu = smp_processor_id(); + bool xdp_redirect = false; int cur_budget, ret; int num_rx = 0; @@ -909,9 +1238,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) cur_budget = budget - num_rx; while (cur_budget--) { - ret = am65_cpsw_nuss_rx_packets(common, flow); - if (ret) + ret = am65_cpsw_nuss_rx_packets(common, flow, cpu); + if (ret) { + if (ret == AM65_CPSW_XDP_REDIRECT) + xdp_redirect = true; break; + } num_rx++; } @@ -919,6 +1251,9 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) break; } + if (xdp_redirect) + xdp_do_flush(); + dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { @@ -938,8 +1273,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) } static struct sk_buff * -am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn, - dma_addr_t desc_dma) +am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, + dma_addr_t desc_dma) { struct am65_cpsw_ndev_priv *ndev_priv; struct am65_cpsw_ndev_stats *stats; @@ -968,6 +1303,39 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn, return skb; } +static struct xdp_frame * +am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, + struct am65_cpsw_tx_chn *tx_chn, + dma_addr_t desc_dma, + struct net_device **ndev) +{ + struct am65_cpsw_ndev_priv *ndev_priv; + struct am65_cpsw_ndev_stats *stats; + struct cppi5_host_desc_t *desc_tx; + struct am65_cpsw_port *port; + struct xdp_frame *xdpf; + u32 port_id = 0; + void **swdata; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); + cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id); + swdata = cppi5_hdesc_get_swdata(desc_tx); + xdpf = *(swdata); + am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); + + port = am65_common_get_port(common, port_id); + *ndev = port->ndev; + + ndev_priv = netdev_priv(*ndev); + stats = this_cpu_ptr(ndev_priv->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += xdpf->len; + u64_stats_update_end(&stats->syncp); + + return xdpf; +} + static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, struct netdev_queue *netif_txq) { @@ -988,11 +1356,13 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, int chn, unsigned int budget, bool *tdown) { + enum am65_cpsw_tx_buf_type buf_type; struct device *dev = common->dev; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; unsigned int total_bytes = 0; struct net_device *ndev; + struct xdp_frame *xdpf; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -1013,10 +1383,21 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, break; } - skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma); - total_bytes = skb->len; - ndev = skb->dev; - napi_consume_skb(skb, budget); + buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); + if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { + skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); + ndev = skb->dev; + total_bytes = skb->len; + napi_consume_skb(skb, budget); + } else { + xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, + desc_dma, &ndev); + total_bytes = xdpf->len; + if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) + xdp_return_frame_rx_napi(xdpf); + else + xdp_return_frame(xdpf); + } num_tx++; netif_txq = netdev_get_tx_queue(ndev, chn); @@ -1034,11 +1415,13 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, int chn, unsigned int budget, bool *tdown) { + enum am65_cpsw_tx_buf_type buf_type; struct device *dev = common->dev; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; unsigned int total_bytes = 0; struct net_device *ndev; + struct xdp_frame *xdpf; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -1057,11 +1440,21 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, break; } - skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma); - - ndev = skb->dev; - total_bytes += skb->len; - napi_consume_skb(skb, budget); + buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); + if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { + skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); + ndev = skb->dev; + total_bytes += skb->len; + napi_consume_skb(skb, budget); + } else { + xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, + desc_dma, &ndev); + total_bytes += xdpf->len; + if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) + xdp_return_frame_rx_napi(xdpf); + else + xdp_return_frame(xdpf); + } num_tx++; } @@ -1183,10 +1576,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, goto busy_stop_q; } + am65_cpsw_nuss_set_buf_type(tx_chn, first_desc, + AM65_CPSW_TX_BUF_TYPE_SKB); + cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, AM65_CPSW_NAV_PS_DATA_SIZE); - cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF); - cppi5_hdesc_set_pkttype(first_desc, 0x7); + cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID); + cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE); cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); @@ -1225,6 +1621,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, goto busy_free_descs; } + am65_cpsw_nuss_set_buf_type(tx_chn, next_desc, + AM65_CPSW_TX_BUF_TYPE_SKB); + buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) { @@ -1334,7 +1733,6 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) { - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_port *port = am65_ndev_to_port(ndev); u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; struct hwtstamp_config cfg; @@ -1358,11 +1756,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_NONE: port->rx_ts_enabled = false; break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: @@ -1372,10 +1765,13 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: port->rx_ts_enabled = true; - cfg.rx_filter = HWTSTAMP_FILTER_ALL; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_NTP_ALL: + return -EOPNOTSUPP; default: return -ERANGE; } @@ -1405,6 +1801,10 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; + if (port->rx_ts_enabled) + ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | + AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; + writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG); writel(ts_vlan_ltype, port->port_base + AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG); @@ -1412,9 +1812,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); - /* en/dis RX timestamp */ - am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } @@ -1431,7 +1828,7 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, cfg.tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; cfg.rx_filter = port->rx_ts_enabled ? - HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE; return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } @@ -1488,6 +1885,59 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, stats->tx_dropped = dev->stats.tx_dropped; } +static int am65_cpsw_xdp_prog_setup(struct net_device *ndev, + struct bpf_prog *prog) +{ + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + bool running = netif_running(ndev); + struct bpf_prog *old_prog; + + if (running) + am65_cpsw_nuss_ndo_slave_stop(ndev); + + old_prog = xchg(&port->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (running) + return am65_cpsw_nuss_ndo_slave_open(ndev); + + return 0; +} + +static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case XDP_SETUP_PROG: + return am65_cpsw_xdp_prog_setup(ndev, bpf->prog); + default: + return -EINVAL; + } +} + +static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct am65_cpsw_tx_chn *tx_chn; + struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); + int i, nxmit = 0; + + tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES]; + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); + + __netif_tx_lock(netif_txq, cpu); + for (i = 0; i < n; i++) { + if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i], + AM65_CPSW_TX_BUF_TYPE_XDP_NDO)) + break; + nxmit++; + } + __netif_tx_unlock(netif_txq); + + return nxmit; +} + static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_open = am65_cpsw_nuss_ndo_slave_open, .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, @@ -1502,6 +1952,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, + .ndo_bpf = am65_cpsw_ndo_bpf, + .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, }; static void am65_cpsw_disable_phy(struct phy *phy) @@ -1772,7 +2224,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) .mode = K3_RINGACC_RING_MODE_RING, .flags = 0 }; - u32 hdesc_size; + u32 hdesc_size, hdesc_size_out; int i, ret = 0; hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, @@ -1816,6 +2268,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) goto err; } + hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool); + tx_chn->dsize_log2 = __fls(hdesc_size_out); + WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2)); + tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); if (tx_chn->irq < 0) { dev_err(dev, "Failed to get tx dma irq %d\n", @@ -1862,8 +2318,8 @@ static void am65_cpsw_nuss_free_rx_chns(void *data) static void am65_cpsw_nuss_remove_rx_chns(void *data) { struct am65_cpsw_common *common = data; - struct am65_cpsw_rx_chn *rx_chn; struct device *dev = common->dev; + struct am65_cpsw_rx_chn *rx_chn; rx_chn = &common->rx_chns; devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common); @@ -1873,11 +2329,7 @@ static void am65_cpsw_nuss_remove_rx_chns(void *data) netif_napi_del(&common->napi_rx); - if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) - k3_cppi_desc_pool_destroy(rx_chn->desc_pool); - - if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) - k3_udma_glue_release_rx_chn(rx_chn->rx_chn); + am65_cpsw_nuss_free_rx_chns(common); common->rx_flow_id_base = -1; } @@ -1888,7 +2340,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; struct device *dev = common->dev; - u32 hdesc_size; + u32 hdesc_size, hdesc_size_out; u32 fdqring_id; int i, ret = 0; @@ -1920,6 +2372,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) goto err; } + hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool); + rx_chn->dsize_log2 = __fls(hdesc_size_out); + WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2)); + + rx_chn->page_pool = NULL; + + rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num, + sizeof(*rx_chn->pages), GFP_KERNEL); + if (!rx_chn->pages) + return -ENOMEM; + common->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); @@ -2252,6 +2715,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) NETIF_F_HW_TC; port->ndev->features = port->ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; port->ndev->vlan_features |= NETIF_F_SG; port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; @@ -2315,6 +2781,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) if (ret) dev_err(dev, "failed to add percpu stat free action %d\n", ret); + port->xdp_prog = NULL; + if (!common->dma_ndev) common->dma_ndev = port->ndev; @@ -2588,7 +3056,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port) } static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); struct am65_cpsw_common *cpsw = dl_priv->common; @@ -2922,7 +3391,8 @@ static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = { .quirks = 0, .ale_dev_id = "am64-cpswxg", .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, - .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII), + .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) | + BIT(PHY_INTERFACE_MODE_USXGMII), }; static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { @@ -2958,9 +3428,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) struct device_node *node; struct resource *res; struct clk *clk; + int ale_entries; u64 id_temp; int ret, i; - int ale_entries; common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); if (!common) @@ -3172,10 +3642,10 @@ static int am65_cpsw_nuss_suspend(struct device *dev) static int am65_cpsw_nuss_resume(struct device *dev) { struct am65_cpsw_common *common = dev_get_drvdata(dev); + struct am65_cpsw_host *host_p = am65_common_get_host(common); struct am65_cpsw_port *port; struct net_device *ndev; int i, ret; - struct am65_cpsw_host *host_p = am65_common_get_host(common); ret = am65_cpsw_nuss_init_tx_chns(common); if (ret) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 7da0492dc091..d8ce88dc9c89 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/soc/ti/k3-ringacc.h> #include <net/devlink.h> +#include <net/xdp.h> #include "am65-cpsw-qos.h" struct am65_cpts; @@ -56,10 +57,18 @@ struct am65_cpsw_port { bool rx_ts_enabled; struct am65_cpsw_qos qos; struct devlink_port devlink_port; + struct bpf_prog *xdp_prog; + struct xdp_rxq_info xdp_rxq; /* Only for suspend resume context */ u32 vid_context; }; +enum am65_cpsw_tx_buf_type { + AM65_CPSW_TX_BUF_TYPE_SKB, + AM65_CPSW_TX_BUF_TYPE_XDP_TX, + AM65_CPSW_TX_BUF_TYPE_XDP_NDO, +}; + struct am65_cpsw_host { struct am65_cpsw_common *common; void __iomem *port_base; @@ -80,6 +89,7 @@ struct am65_cpsw_tx_chn { int irq; u32 id; u32 descs_num; + unsigned char dsize_log2; char tx_chn_name[128]; u32 rate_mbps; }; @@ -89,7 +99,10 @@ struct am65_cpsw_rx_chn { struct device *dma_dev; struct k3_cppi_desc_pool *desc_pool; struct k3_udma_glue_rx_channel *rx_chn; + struct page_pool *page_pool; + struct page **pages; u32 descs_num; + unsigned char dsize_log2; int irq; }; diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index 816e73a3d6e4..fa96db7c1a13 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -9,6 +9,7 @@ #include <linux/pm_runtime.h> #include <linux/math.h> +#include <linux/math64.h> #include <linux/time.h> #include <linux/units.h> #include <net/pkt_cls.h> @@ -837,6 +838,7 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev, struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct am65_cpts *cpts = common->cpts; struct am65_cpsw_est *est_new; + u64 cur_time, n; int ret, tact; if (!netif_running(ndev)) { @@ -888,13 +890,21 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev, if (tact == TACT_PROG) am65_cpsw_timer_stop(ndev); - if (!est_new->taprio.base_time) - est_new->taprio.base_time = am65_cpts_ns_gettime(cpts); - am65_cpsw_port_est_get_buf_num(ndev, est_new); am65_cpsw_est_set_sched_list(ndev, est_new); am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf); + /* If the base-time is in the past, start schedule from the time: + * base_time + (N*cycle_time) + * where N is the smallest possible integer such that the above + * time is in the future. + */ + cur_time = am65_cpts_ns_gettime(cpts); + if (est_new->taprio.base_time < cur_time) { + n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time); + est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time; + } + am65_cpsw_est_set(ndev, 1); if (tact == TACT_PROG) { @@ -1008,6 +1018,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index f89716b1cfb6..59d6ab989c55 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -275,15 +275,13 @@ static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts, return true; } -static int am65_cpts_fifo_read(struct am65_cpts *cpts) +static int __am65_cpts_fifo_read(struct am65_cpts *cpts) { struct ptp_clock_event pevent; struct am65_cpts_event *event; bool schedule = false; int i, type, ret = 0; - unsigned long flags; - spin_lock_irqsave(&cpts->lock, flags); for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) { event = list_first_entry_or_null(&cpts->pool, struct am65_cpts_event, list); @@ -312,8 +310,7 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts) event->tmo = jiffies + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); - list_del_init(&event->list); - list_add_tail(&event->list, &cpts->events); + list_move_tail(&event->list, &cpts->events); dev_dbg(cpts->dev, "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", @@ -356,14 +353,24 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts) } out: - spin_unlock_irqrestore(&cpts->lock, flags); - if (schedule) ptp_schedule_worker(cpts->ptp_clock, 0); return ret; } +static int am65_cpts_fifo_read(struct am65_cpts *cpts) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&cpts->lock, flags); + ret = __am65_cpts_fifo_read(cpts); + spin_unlock_irqrestore(&cpts->lock, flags); + + return ret; +} + static u64 am65_cpts_gettime(struct am65_cpts *cpts, struct ptp_system_timestamp *sts) { @@ -864,29 +871,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp) return delay; } -/** - * am65_cpts_rx_enable - enable rx timestamping - * @cpts: cpts handle - * @en: enable - * - * This functions enables rx packets timestamping. The CPTS can timestamp all - * rx packets. - */ -void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en) -{ - u32 val; - - mutex_lock(&cpts->ptp_clk_lock); - val = am65_cpts_read32(cpts, control); - if (en) - val |= AM65_CPTS_CONTROL_TSTAMP_EN; - else - val &= ~AM65_CPTS_CONTROL_TSTAMP_EN; - am65_cpts_write32(cpts, val, control); - mutex_unlock(&cpts->ptp_clk_lock); -} -EXPORT_SYMBOL_GPL(am65_cpts_rx_enable); - static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid) { unsigned int ptp_class = ptp_classify_raw(skb); @@ -911,6 +895,69 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid) return 1; } +static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid) +{ + struct list_head *this, *next; + struct am65_cpts_event *event; + unsigned long flags; + u32 mtype_seqid; + u64 ns = 0; + + spin_lock_irqsave(&cpts->lock, flags); + __am65_cpts_fifo_read(cpts); + list_for_each_safe(this, next, &cpts->events) { + event = list_entry(this, struct am65_cpts_event, list); + if (time_after(jiffies, event->tmo)) { + list_move(&event->list, &cpts->pool); + continue; + } + + mtype_seqid = event->event1 & + (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK | + AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK | + AM65_CPTS_EVENT_1_EVENT_TYPE_MASK); + + if (mtype_seqid == skb_mtype_seqid) { + ns = event->timestamp; + list_move(&event->list, &cpts->pool); + break; + } + } + spin_unlock_irqrestore(&cpts->lock, flags); + + return ns; +} + +void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb) +{ + struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb; + struct skb_shared_hwtstamps *ssh; + int ret; + u64 ns; + + /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so + * skb MAC Hdr properties are not configured yet. Hence need to + * reset skb MAC header here + */ + skb_reset_mac_header(skb); + ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid); + if (!ret) + return; /* if not PTP class packet */ + + skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT); + + dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid); + + ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid); + if (!ns) + return; + + ssh = skb_hwtstamps(skb); + memset(ssh, 0, sizeof(*ssh)); + ssh->hwtstamp = ns_to_ktime(ns); +} +EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp); + /** * am65_cpts_tx_timestamp - save tx packet for timestamping * @cpts: cpts handle diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h index 6e14df0be113..6099d772799d 100644 --- a/drivers/net/ethernet/ti/am65-cpts.h +++ b/drivers/net/ethernet/ti/am65-cpts.h @@ -22,9 +22,9 @@ void am65_cpts_release(struct am65_cpts *cpts); struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, struct device_node *node); int am65_cpts_phc_index(struct am65_cpts *cpts); +void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb); void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb); void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb); -void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en); u64 am65_cpts_ns_gettime(struct am65_cpts *cpts); int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx, struct am65_cpts_estf_cfg *cfg); @@ -48,17 +48,18 @@ static inline int am65_cpts_phc_index(struct am65_cpts *cpts) return -1; } -static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts, +static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb) { } -static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, - struct sk_buff *skb) +static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts, + struct sk_buff *skb) { } -static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en) +static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, + struct sk_buff *skb) { } diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 087dcb67505a..2baa198ebfa0 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1625,7 +1625,8 @@ static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, } static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; @@ -1762,7 +1763,8 @@ static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, } static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 764ed298b570..6fe4edabba44 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1404,6 +1404,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv, return -EOPNOTSUPP; } + if (flow_rule_match_has_control_flags(rule, extack)) + return -EOPNOTSUPP; + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c index 6df53ab17fbc..79ba47bb3602 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -274,6 +274,16 @@ static void rx_class_set_or(struct regmap *miig_rt, int slice, int n, regmap_write(miig_rt, offset, data); } +static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n) +{ + u32 offset, val; + + offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN); + regmap_read(miig_rt, offset, &val); + + return val; +} + void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac) { regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 | @@ -288,6 +298,26 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8)); } +static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice, + int slot, const u8 *addr, const u8 *mask) +{ + u32 val; + int i; + + WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot); + + rx_class_ft1_set_da(miig_rt, slice, slot, addr); + rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask); + rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ); + + /* Enable the FT1 slot in OR enable for all classifiers */ + for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) { + val = rx_class_get_or(miig_rt, slice, i); + val |= RX_CLASS_FT_FT1_MATCH(slot); + rx_class_set_or(miig_rt, slice, i, val); + } +} + /* disable all RX traffic */ void icssg_class_disable(struct regmap *miig_rt, int slice) { @@ -331,30 +361,95 @@ void icssg_class_disable(struct regmap *miig_rt, int slice) regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } -void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti) +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, + bool is_sr1) { + int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1; u32 data; + int n; /* defaults */ icssg_class_disable(miig_rt, slice); /* Setup Classifier */ - /* match on Broadcast or MAC_PRU address */ - data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P; + for (n = 0; n < num_classifiers; n++) { + /* match on Broadcast or MAC_PRU address */ + data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P; - /* multicast */ - if (allmulti) - data |= RX_CLASS_FT_MC; + /* multicast */ + if (allmulti) + data |= RX_CLASS_FT_MC; - rx_class_set_or(miig_rt, slice, 0, data); + rx_class_set_or(miig_rt, slice, n, data); - /* set CFG1 for OR_OR_AND for classifier */ - rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND); + /* set CFG1 for OR_OR_AND for classifier */ + rx_class_sel_set_type(miig_rt, slice, n, + RX_CLASS_SEL_TYPE_OR_OR_AND); + } /* clear CFG2 */ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } +void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice) +{ + u32 data, offset; + int n; + + /* defaults */ + icssg_class_disable(miig_rt, slice); + + /* Setup Classifier */ + for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) { + /* set RAW_MASK to bypass filters */ + offset = RX_CLASS_GATES_N_REG(slice, n); + regmap_read(miig_rt, offset, &data); + data |= RX_CLASS_GATES_RAW_MASK; + regmap_write(miig_rt, offset, data); + } +} + +void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, + struct net_device *ndev) +{ + u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff }; + struct netdev_hw_addr *ha; + int slot = 2; + + rx_class_ft1_set_start_len(miig_rt, slice, 0, 6); + /* reserve first 2 slots for + * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses + * 2) 01-00-5e-00-00-XX Local Network Control Block + * (224.0.0.0 - 224.0.0.255 (224.0.0/24)) + */ + icssg_class_ft1_add_mcast(miig_rt, slice, 0, + eth_reserved_addr_base, mask_addr); + icssg_class_ft1_add_mcast(miig_rt, slice, 1, + eth_ipv4_mcast_addr_base, mask_addr); + mask_addr[5] = 0; + netdev_for_each_mc_addr(ha, ndev) { + /* skip addresses matching reserved slots */ + if (!memcmp(eth_reserved_addr_base, ha->addr, 5) || + !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) { + netdev_dbg(ndev, "mcast skip %pM\n", ha->addr); + continue; + } + + if (slot >= FT1_NUM_SLOTS) { + netdev_dbg(ndev, + "can't add more than %d MC addresses, enabling allmulti\n", + FT1_NUM_SLOTS); + icssg_class_default(miig_rt, slice, 1, true); + break; + } + + netdev_dbg(ndev, "mcast add %pM\n", ha->addr); + icssg_class_ft1_add_mcast(miig_rt, slice, slot, + ha->addr, mask_addr); + slot++; + } +} + /* required for SAV check */ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c new file mode 100644 index 000000000000..088ab8076db4 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -0,0 +1,1252 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments ICSSG Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (C) Siemens AG, 2024 + * + */ + +#include <linux/dma-mapping.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/remoteproc/pruss.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> + +#include "icssg_prueth.h" +#include "../k3-cppi-desc-pool.h" + +/* Netif debug messages possible */ +#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR | \ + NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_INTR | \ + NETIF_MSG_TX_DONE | \ + NETIF_MSG_RX_STATUS | \ + NETIF_MSG_PKTDATA | \ + NETIF_MSG_HW | \ + NETIF_MSG_WOL) + +#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) + +void prueth_cleanup_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + int max_rflows) +{ + if (rx_chn->desc_pool) + k3_cppi_desc_pool_destroy(rx_chn->desc_pool); + + if (rx_chn->rx_chn) + k3_udma_glue_release_rx_chn(rx_chn->rx_chn); +} + +void prueth_cleanup_tx_chns(struct prueth_emac *emac) +{ + int i; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + if (tx_chn->desc_pool) + k3_cppi_desc_pool_destroy(tx_chn->desc_pool); + + if (tx_chn->tx_chn) + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); + + /* Assume prueth_cleanup_tx_chns() is called at the + * end after all channel resources are freed + */ + memset(tx_chn, 0, sizeof(*tx_chn)); + } +} + +void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + if (tx_chn->irq) + free_irq(tx_chn->irq, tx_chn); + netif_napi_del(&tx_chn->napi_tx); + } +} + +void prueth_xmit_free(struct prueth_tx_chn *tx_chn, + struct cppi5_host_desc_t *desc) +{ + struct cppi5_host_desc_t *first_desc, *next_desc; + dma_addr_t buf_dma, next_desc_dma; + u32 buf_dma_len; + + first_desc = desc; + next_desc = first_desc; + + cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + while (next_desc_dma) { + next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + next_desc_dma); + cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); + + dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_TO_DEVICE); + + next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); + k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); + + k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); + } + + k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); +} + +int emac_tx_complete_packets(struct prueth_emac *emac, int chn, + int budget, bool *tdown) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_tx; + struct netdev_queue *netif_txq; + struct prueth_tx_chn *tx_chn; + unsigned int total_bytes = 0; + struct sk_buff *skb; + dma_addr_t desc_dma; + int res, num_tx = 0; + void **swdata; + + tx_chn = &emac->tx_chns[chn]; + + while (true) { + res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); + if (res == -ENODATA) + break; + + /* teardown completion */ + if (cppi5_desc_is_tdcm(desc_dma)) { + if (atomic_dec_and_test(&emac->tdown_cnt)) + complete(&emac->tdown_complete); + *tdown = true; + break; + } + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + + /* was this command's TX complete? */ + if (emac->is_sr1 && *(swdata) == emac->cmd_data) { + prueth_xmit_free(tx_chn, desc_tx); + continue; + } + + skb = *(swdata); + prueth_xmit_free(tx_chn, desc_tx); + + ndev = skb->dev; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + total_bytes += skb->len; + napi_consume_skb(skb, budget); + num_tx++; + } + + if (!num_tx) + return 0; + + netif_txq = netdev_get_tx_queue(ndev, chn); + netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); + + if (netif_tx_queue_stopped(netif_txq)) { + /* If the TX queue was stopped, wake it now + * if we have enough room. + */ + __netif_tx_lock(netif_txq, smp_processor_id()); + if (netif_running(ndev) && + (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= + MAX_SKB_FRAGS)) + netif_tx_wake_queue(netif_txq); + __netif_tx_unlock(netif_txq); + } + + return num_tx; +} + +static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) +{ + struct prueth_tx_chn *tx_chns = + container_of(timer, struct prueth_tx_chn, tx_hrtimer); + + enable_irq(tx_chns->irq); + return HRTIMER_NORESTART; +} + +static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); + struct prueth_emac *emac = tx_chn->emac; + bool tdown = false; + int num_tx_packets; + + num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget, + &tdown); + + if (num_tx_packets >= budget) + return budget; + + if (napi_complete_done(napi_tx, num_tx_packets)) { + if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) { + hrtimer_start(&tx_chn->tx_hrtimer, + ns_to_ktime(tx_chn->tx_pace_timeout_ns), + HRTIMER_MODE_REL_PINNED); + } else { + enable_irq(tx_chn->irq); + } + } + + return num_tx_packets; +} + +static irqreturn_t prueth_tx_irq(int irq, void *dev_id) +{ + struct prueth_tx_chn *tx_chn = dev_id; + + disable_irq_nosync(irq); + napi_schedule(&tx_chn->napi_tx); + + return IRQ_HANDLED; +} + +int prueth_ndev_add_tx_napi(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int i, ret; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); + hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + tx_chn->tx_hrtimer.function = &emac_tx_timer_callback; + ret = request_irq(tx_chn->irq, prueth_tx_irq, + IRQF_TRIGGER_HIGH, tx_chn->name, + tx_chn); + if (ret) { + netif_napi_del(&tx_chn->napi_tx); + dev_err(prueth->dev, "unable to request TX IRQ %d\n", + tx_chn->irq); + goto fail; + } + } + + return 0; +fail: + prueth_ndev_del_tx_napi(emac, i); + return ret; +} + +int prueth_init_tx_chns(struct prueth_emac *emac) +{ + static const struct k3_ring_cfg ring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0, + .size = PRUETH_MAX_TX_DESC, + }; + struct k3_udma_glue_tx_channel_cfg tx_cfg; + struct device *dev = emac->prueth->dev; + struct net_device *ndev = emac->ndev; + int ret, slice, i; + u32 hdesc_size; + + slice = prueth_emac_slice(emac); + if (slice < 0) + return slice; + + init_completion(&emac->tdown_complete); + + hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, + PRUETH_NAV_SW_DATA_SIZE); + memset(&tx_cfg, 0, sizeof(tx_cfg)); + tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; + tx_cfg.tx_cfg = ring_cfg; + tx_cfg.txcq_cfg = ring_cfg; + + for (i = 0; i < emac->tx_ch_num; i++) { + struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; + + /* To differentiate channels for SLICE0 vs SLICE1 */ + snprintf(tx_chn->name, sizeof(tx_chn->name), + "tx%d-%d", slice, i); + + tx_chn->emac = emac; + tx_chn->id = i; + tx_chn->descs_num = PRUETH_MAX_TX_DESC; + + tx_chn->tx_chn = + k3_udma_glue_request_tx_chn(dev, tx_chn->name, + &tx_cfg); + if (IS_ERR(tx_chn->tx_chn)) { + ret = PTR_ERR(tx_chn->tx_chn); + tx_chn->tx_chn = NULL; + netdev_err(ndev, + "Failed to request tx dma ch: %d\n", ret); + goto fail; + } + + tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); + tx_chn->desc_pool = + k3_cppi_desc_pool_create_name(tx_chn->dma_dev, + tx_chn->descs_num, + hdesc_size, + tx_chn->name); + if (IS_ERR(tx_chn->desc_pool)) { + ret = PTR_ERR(tx_chn->desc_pool); + tx_chn->desc_pool = NULL; + netdev_err(ndev, "Failed to create tx pool: %d\n", ret); + goto fail; + } + + ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); + if (ret < 0) { + netdev_err(ndev, "failed to get tx irq\n"); + goto fail; + } + tx_chn->irq = ret; + + snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", + dev_name(dev), tx_chn->id); + } + + return 0; + +fail: + prueth_cleanup_tx_chns(emac); + return ret; +} + +int prueth_init_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + char *name, u32 max_rflows, + u32 max_desc_num) +{ + struct k3_udma_glue_rx_channel_cfg rx_cfg; + struct device *dev = emac->prueth->dev; + struct net_device *ndev = emac->ndev; + u32 fdqring_id, hdesc_size; + int i, ret = 0, slice; + int flow_id_base; + + slice = prueth_emac_slice(emac); + if (slice < 0) + return slice; + + /* To differentiate channels for SLICE0 vs SLICE1 */ + snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); + + hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, + PRUETH_NAV_SW_DATA_SIZE); + memset(&rx_cfg, 0, sizeof(rx_cfg)); + rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; + rx_cfg.flow_id_num = max_rflows; + rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ + + /* init all flows */ + rx_chn->dev = dev; + rx_chn->descs_num = max_desc_num; + + rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, + &rx_cfg); + if (IS_ERR(rx_chn->rx_chn)) { + ret = PTR_ERR(rx_chn->rx_chn); + rx_chn->rx_chn = NULL; + netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); + goto fail; + } + + rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); + rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, + rx_chn->descs_num, + hdesc_size, + rx_chn->name); + if (IS_ERR(rx_chn->desc_pool)) { + ret = PTR_ERR(rx_chn->desc_pool); + rx_chn->desc_pool = NULL; + netdev_err(ndev, "Failed to create rx pool: %d\n", ret); + goto fail; + } + + flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); + if (emac->is_sr1 && !strcmp(name, "rxmgm")) { + emac->rx_mgm_flow_id_base = flow_id_base; + netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base); + } else { + emac->rx_flow_id_base = flow_id_base; + netdev_dbg(ndev, "flow id base = %d\n", flow_id_base); + } + + fdqring_id = K3_RINGACC_RING_ID_ANY; + for (i = 0; i < rx_cfg.flow_id_num; i++) { + struct k3_ring_cfg rxring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .mode = K3_RINGACC_RING_MODE_RING, + .flags = 0, + }; + struct k3_ring_cfg fdqring_cfg = { + .elm_size = K3_RINGACC_RING_ELSIZE_8, + .flags = K3_RINGACC_RING_SHARED, + }; + struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { + .rx_cfg = rxring_cfg, + .rxfdq_cfg = fdqring_cfg, + .ring_rxq_id = K3_RINGACC_RING_ID_ANY, + .src_tag_lo_sel = + K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, + }; + + rx_flow_cfg.ring_rxfdq0_id = fdqring_id; + rx_flow_cfg.rx_cfg.size = max_desc_num; + rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; + + ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, + i, &rx_flow_cfg); + if (ret) { + netdev_err(ndev, "Failed to init rx flow%d %d\n", + i, ret); + goto fail; + } + if (!i) + fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, + i); + ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); + if (ret <= 0) { + if (!ret) + ret = -ENXIO; + netdev_err(ndev, "Failed to get rx dma irq"); + goto fail; + } + rx_chn->irq[i] = ret; + } + + return 0; + +fail: + prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); + return ret; +} + +int prueth_dma_rx_push(struct prueth_emac *emac, + struct sk_buff *skb, + struct prueth_rx_chn *rx_chn) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + u32 pkt_len = skb_tailroom(skb); + dma_addr_t desc_dma; + dma_addr_t buf_dma; + void **swdata; + + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); + if (!desc_rx) { + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); + return -ENOMEM; + } + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); + + buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + netdev_err(ndev, "rx push: failed to map rx pkt buffer\n"); + return -EINVAL; + } + + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); + + swdata = cppi5_hdesc_get_swdata(desc_rx); + *swdata = skb; + + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, + desc_rx, desc_dma); +} + +u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) +{ + u32 iepcount_lo, iepcount_hi, hi_rollover_count; + u64 ns; + + iepcount_lo = lo & GENMASK(19, 0); + iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; + hi_rollover_count = hi >> 11; + + ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); + ns = ns * cycle_time_ns + iepcount_lo; + + return ns; +} + +void emac_rx_timestamp(struct prueth_emac *emac, + struct sk_buff *skb, u32 *psdata) +{ + struct skb_shared_hwtstamps *ssh; + u64 ns; + + if (emac->is_sr1) { + ns = (u64)psdata[1] << 32 | psdata[0]; + } else { + u32 hi_sw = readl(emac->prueth->shram.va + + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); + ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], + IEP_DEFAULT_CYCLE_TIME_NS); + } + + ssh = skb_hwtstamps(skb); + memset(ssh, 0, sizeof(*ssh)); + ssh->hwtstamp = ns_to_ktime(ns); +} + +static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 buf_dma_len, pkt_len, port_id = 0; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb, *new_skb; + dma_addr_t desc_dma, buf_dma; + void **swdata; + u32 *psdata; + int ret; + + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx pop: failed: %d\n", ret); + return ret; + } + + if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ + return 0; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + + psdata = cppi5_hdesc_get_psdata(desc_rx); + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + /* firmware adds 4 CRC bytes, strip them */ + pkt_len -= 4; + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + skb->dev = ndev; + new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); + /* if allocation fails we drop the packet but push the + * descriptor back to the ring with old skb to prevent a stall + */ + if (!new_skb) { + ndev->stats.rx_dropped++; + new_skb = skb; + } else { + /* send the filled skb up the n/w stack */ + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; + } + + /* queue another RX DMA */ + ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns); + if (WARN_ON(ret < 0)) { + dev_kfree_skb_any(new_skb); + ndev->stats.rx_errors++; + ndev->stats.rx_dropped++; + } + + return ret; +} + +static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct prueth_rx_chn *rx_chn = data; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb; + dma_addr_t buf_dma; + u32 buf_dma_len; + void **swdata; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, + DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + dev_kfree_skb_any(skb); +} + +static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) +{ + int i; + + /* search and get the next free slot */ + for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { + if (!emac->tx_ts_skb[i]) { + emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ + return i; + } + } + + return -EBUSY; +} + +/** + * emac_ndo_start_xmit - EMAC Transmit function + * @skb: SKB pointer + * @ndev: EMAC network adapter + * + * Called by the system to transmit a packet - we queue the packet in + * EMAC hardware transmit queue + * Doesn't wait for completion we'll check for TX completion in + * emac_tx_complete_packets(). + * + * Return: enum netdev_tx + */ +enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; + struct prueth_emac *emac = netdev_priv(ndev); + struct netdev_queue *netif_txq; + struct prueth_tx_chn *tx_chn; + dma_addr_t desc_dma, buf_dma; + int i, ret = 0, q_idx; + bool in_tx_ts = 0; + int tx_ts_cookie; + void **swdata; + u32 pkt_len; + u32 *epib; + + pkt_len = skb_headlen(skb); + q_idx = skb_get_queue_mapping(skb); + + tx_chn = &emac->tx_chns[q_idx]; + netif_txq = netdev_get_tx_queue(ndev, q_idx); + + /* Map the linear buffer */ + buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(ndev, "tx: failed to map skb buffer\n"); + ret = NETDEV_TX_OK; + goto drop_free_skb; + } + + first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!first_desc) { + netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); + dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); + goto drop_stop_q_busy; + } + + cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(first_desc, 0); + epib = first_desc->epib; + epib[0] = 0; + epib[1] = 0; + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + emac->tx_ts_enabled) { + tx_ts_cookie = prueth_tx_ts_cookie_get(emac); + if (tx_ts_cookie >= 0) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* Request TX timestamp */ + epib[0] = (u32)tx_ts_cookie; + epib[1] = 0x80000000; /* TX TS request */ + emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); + in_tx_ts = 1; + } + } + + /* set dst tag to indicate internal qid at the firmware which is at + * bit8..bit15. bit0..bit7 indicates port num for directed + * packets in case of switch mode operation + */ + cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); + cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); + swdata = cppi5_hdesc_get_swdata(first_desc); + *swdata = skb; + + /* Handle the case where skb is fragmented in pages */ + cur_desc = first_desc; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 frag_size = skb_frag_size(frag); + + next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!next_desc) { + netdev_err(ndev, + "tx: failed to allocate frag. descriptor\n"); + goto free_desc_stop_q_busy_cleanup_tx_ts; + } + + buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(ndev, "tx: Failed to map skb page\n"); + k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); + ret = NETDEV_TX_OK; + goto cleanup_tx_ts; + } + + cppi5_hdesc_reset_hbdesc(next_desc); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); + cppi5_hdesc_attach_buf(next_desc, + buf_dma, frag_size, buf_dma, frag_size); + + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, + next_desc); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); + cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); + + pkt_len += frag_size; + cur_desc = next_desc; + } + WARN_ON_ONCE(pkt_len != skb->len); + + /* report bql before sending packet */ + netdev_tx_sent_queue(netif_txq, pkt_len); + + cppi5_hdesc_set_pktlen(first_desc, pkt_len); + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); + /* cppi5_desc_dump(first_desc, 64); */ + + skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + if (ret) { + netdev_err(ndev, "tx: push failed: %d\n", ret); + goto drop_free_descs; + } + + if (in_tx_ts) + atomic_inc(&emac->tx_ts_pending); + + if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { + netif_tx_stop_queue(netif_txq); + /* Barrier, so that stop_queue visible to other cpus */ + smp_mb__after_atomic(); + + if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= + MAX_SKB_FRAGS) + netif_tx_wake_queue(netif_txq); + } + + return NETDEV_TX_OK; + +cleanup_tx_ts: + if (in_tx_ts) { + dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); + emac->tx_ts_skb[tx_ts_cookie] = NULL; + } + +drop_free_descs: + prueth_xmit_free(tx_chn, first_desc); + +drop_free_skb: + dev_kfree_skb_any(skb); + + /* error */ + ndev->stats.tx_dropped++; + netdev_err(ndev, "tx: error: %d\n", ret); + + return ret; + +free_desc_stop_q_busy_cleanup_tx_ts: + if (in_tx_ts) { + dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); + emac->tx_ts_skb[tx_ts_cookie] = NULL; + } + prueth_xmit_free(tx_chn, first_desc); + +drop_stop_q_busy: + netif_tx_stop_queue(netif_txq); + return NETDEV_TX_BUSY; +} + +static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) +{ + struct prueth_tx_chn *tx_chn = data; + struct cppi5_host_desc_t *desc_tx; + struct sk_buff *skb; + void **swdata; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + skb = *(swdata); + prueth_xmit_free(tx_chn, desc_tx); + + dev_kfree_skb_any(skb); +} + +irqreturn_t prueth_rx_irq(int irq, void *dev_id) +{ + struct prueth_emac *emac = dev_id; + + disable_irq_nosync(irq); + napi_schedule(&emac->napi_rx); + + return IRQ_HANDLED; +} + +void prueth_emac_stop(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice; + + switch (emac->port_id) { + case PRUETH_PORT_MII0: + slice = ICSS_SLICE0; + break; + case PRUETH_PORT_MII1: + slice = ICSS_SLICE1; + break; + default: + netdev_err(emac->ndev, "invalid port\n"); + return; + } + + emac->fw_running = 0; + if (!emac->is_sr1) + rproc_shutdown(prueth->txpru[slice]); + rproc_shutdown(prueth->rtu[slice]); + rproc_shutdown(prueth->pru[slice]); +} + +void prueth_cleanup_tx_ts(struct prueth_emac *emac) +{ + int i; + + for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { + if (emac->tx_ts_skb[i]) { + dev_kfree_skb_any(emac->tx_ts_skb[i]); + emac->tx_ts_skb[i] = NULL; + } + } +} + +int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); + int rx_flow = emac->is_sr1 ? + PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; + int flow = emac->is_sr1 ? + PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; + int num_rx = 0; + int cur_budget; + int ret; + + while (flow--) { + cur_budget = budget - num_rx; + + while (cur_budget--) { + ret = emac_rx_packet(emac, flow); + if (ret) + break; + num_rx++; + } + + if (num_rx >= budget) + break; + } + + if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { + if (unlikely(emac->rx_pace_timeout_ns)) { + hrtimer_start(&emac->rx_hrtimer, + ns_to_ktime(emac->rx_pace_timeout_ns), + HRTIMER_MODE_REL_PINNED); + } else { + enable_irq(emac->rx_chns.irq[rx_flow]); + } + } + + return num_rx; +} + +int prueth_prepare_rx_chan(struct prueth_emac *emac, + struct prueth_rx_chn *chn, + int buf_size) +{ + struct sk_buff *skb; + int i, ret; + + for (i = 0; i < chn->descs_num; i++) { + skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + ret = prueth_dma_rx_push(emac, skb, chn); + if (ret < 0) { + netdev_err(emac->ndev, + "cannot submit skb for rx chan %s ret %d\n", + chn->name, ret); + kfree_skb(skb); + return ret; + } + } + + return 0; +} + +void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, + bool free_skb) +{ + int i; + + for (i = 0; i < ch_num; i++) { + if (free_skb) + k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, + &emac->tx_chns[i], + prueth_tx_cleanup); + k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); + } +} + +void prueth_reset_rx_chan(struct prueth_rx_chn *chn, + int num_flows, bool disable) +{ + int i; + + for (i = 0; i < num_flows; i++) + k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, + prueth_rx_cleanup, !!i); + if (disable) + k3_udma_glue_disable_rx_chn(chn->rx_chn); +} + +void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + ndev->stats.tx_errors++; +} + +static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct hwtstamp_config config; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + emac->tx_ts_enabled = 0; + break; + case HWTSTAMP_TX_ON: + emac->tx_ts_enabled = 1; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + emac->rx_ts_enabled = 0; + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + emac->rx_ts_enabled = 1; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct hwtstamp_config config; + + config.flags = 0; + config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGHWTSTAMP: + return emac_get_ts_config(ndev, ifr); + case SIOCSHWTSTAMP: + return emac_set_ts_config(ndev, ifr); + default: + break; + } + + return phy_do_ioctl(ndev, ifr, cmd); +} + +void emac_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + emac_update_hardware_stats(emac); + + stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); + stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); + stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); + stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); + stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); + stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); + stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); + + stats->rx_errors = ndev->stats.rx_errors; + stats->rx_dropped = ndev->stats.rx_dropped; + stats->tx_errors = ndev->stats.tx_errors; + stats->tx_dropped = ndev->stats.tx_dropped; +} + +int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + + ret = snprintf(name, len, "p%d", emac->port_id); + if (ret >= len) + return -EINVAL; + + return 0; +} + +/* get emac_port corresponding to eth_node name */ +int prueth_node_port(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_PORT_MII0; + else if (port_id == 1) + return PRUETH_PORT_MII1; + else + return PRUETH_PORT_INVALID; +} + +/* get MAC instance corresponding to eth_node name */ +int prueth_node_mac(struct device_node *eth_node) +{ + u32 port_id; + int ret; + + ret = of_property_read_u32(eth_node, "reg", &port_id); + if (ret) + return ret; + + if (port_id == 0) + return PRUETH_MAC0; + else if (port_id == 1) + return PRUETH_MAC1; + else + return PRUETH_MAC_INVALID; +} + +void prueth_netdev_exit(struct prueth *prueth, + struct device_node *eth_node) +{ + struct prueth_emac *emac; + enum prueth_mac mac; + + mac = prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return; + + emac = prueth->emac[mac]; + if (!emac) + return; + + if (of_phy_is_fixed_link(emac->phy_node)) + of_phy_deregister_fixed_link(emac->phy_node); + + netif_napi_del(&emac->napi_rx); + + pruss_release_mem_region(prueth->pruss, &emac->dram); + destroy_workqueue(emac->cmd_wq); + free_netdev(emac->ndev); + prueth->emac[mac] = NULL; +} + +int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) +{ + struct device *dev = prueth->dev; + enum pruss_pru_id pruss_id; + struct device_node *np; + int idx = -1, ret; + + np = dev->of_node; + + switch (slice) { + case ICSS_SLICE0: + idx = 0; + break; + case ICSS_SLICE1: + idx = is_sr1 ? 2 : 3; + break; + default: + return -EINVAL; + } + + prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); + if (IS_ERR(prueth->pru[slice])) { + ret = PTR_ERR(prueth->pru[slice]); + prueth->pru[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); + } + prueth->pru_id[slice] = pruss_id; + + idx++; + prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); + if (IS_ERR(prueth->rtu[slice])) { + ret = PTR_ERR(prueth->rtu[slice]); + prueth->rtu[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); + } + + if (is_sr1) + return 0; + + idx++; + prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); + if (IS_ERR(prueth->txpru[slice])) { + ret = PTR_ERR(prueth->txpru[slice]); + prueth->txpru[slice] = NULL; + return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); + } + + return 0; +} + +void prueth_put_cores(struct prueth *prueth, int slice) +{ + if (prueth->txpru[slice]) + pru_rproc_put(prueth->txpru[slice]); + + if (prueth->rtu[slice]) + pru_rproc_put(prueth->rtu[slice]); + + if (prueth->pru[slice]) + pru_rproc_put(prueth->pru[slice]); +} + +#ifdef CONFIG_PM_SLEEP +static int prueth_suspend(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + netif_device_detach(ndev); + ret = ndev->netdev_ops->ndo_stop(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to stop: %d", ret); + return ret; + } + } + } + + return 0; +} + +static int prueth_resume(struct device *dev) +{ + struct prueth *prueth = dev_get_drvdata(dev); + struct net_device *ndev; + int i, ret; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + ndev = prueth->registered_netdevs[i]; + + if (!ndev) + continue; + + if (netif_running(ndev)) { + ret = ndev->netdev_ops->ndo_open(ndev); + if (ret < 0) { + netdev_err(ndev, "failed to start: %d", ret); + return ret; + } + netif_device_attach(ndev); + } + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +const struct dev_pm_ops prueth_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) +}; diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index 99de8a40ed60..15f2235bf90f 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -20,6 +20,8 @@ /* IPG is in core_clk cycles */ #define MII_RT_TX_IPG_100M 0x17 #define MII_RT_TX_IPG_1G 0xb +#define MII_RT_TX_IPG_100M_SR1 0x166 +#define MII_RT_TX_IPG_1G_SR1 0x1a #define ICSSG_QUEUES_MAX 64 #define ICSSG_QUEUE_OFFSET 0xd00 @@ -202,23 +204,29 @@ void icssg_config_ipg(struct prueth_emac *emac) { struct prueth *prueth = emac->prueth; int slice = prueth_emac_slice(emac); + u32 ipg; switch (emac->speed) { case SPEED_1000: - icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G); + ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G; break; case SPEED_100: - icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M); + ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M; break; case SPEED_10: + /* Firmware hardcodes IPG for SR1.0 */ + if (emac->is_sr1) + return; /* IPG for 10M is same as 100M */ - icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M); + ipg = MII_RT_TX_IPG_100M; break; default: /* Other links speeds not supported */ netdev_err(emac->ndev, "Unsupported link speed\n"); return; } + + icssg_mii_update_ipg(prueth->mii_rt, slice, ipg); } static void emac_r30_cmd_init(struct prueth_emac *emac) diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index 43eb0922172a..cf2ea4bd22a2 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -109,6 +109,62 @@ enum icssg_port_state_cmd { #define ICSSG_FLAG_MASK 0xff00ffff +/* SR1.0-specific bits */ +#define PRUETH_MAX_RX_FLOWS_SR1 4 /* excluding default flow */ +#define PRUETH_RX_FLOW_DATA_SR1 3 /* highest priority flow */ +#define PRUETH_MAX_RX_MGM_DESC_SR1 8 +#define PRUETH_MAX_RX_MGM_FLOWS_SR1 2 /* excluding default flow */ +#define PRUETH_RX_MGM_FLOW_RESPONSE_SR1 0 +#define PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1 1 + +#define PRUETH_NUM_BUF_POOLS_SR1 16 +#define PRUETH_EMAC_BUF_POOL_START_SR1 8 +#define PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 128 +#define PRUETH_EMAC_BUF_SIZE_SR1 1536 +#define PRUETH_EMAC_NUM_BUF_SR1 4 +#define PRUETH_EMAC_BUF_POOL_SIZE_SR1 (PRUETH_EMAC_NUM_BUF_SR1 * \ + PRUETH_EMAC_BUF_SIZE_SR1) +#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */ + +struct icssg_sr1_config { + __le32 status; /* Firmware status */ + __le32 addr_lo; /* MSMC Buffer pool base address low. */ + __le32 addr_hi; /* MSMC Buffer pool base address high. Must be 0 */ + __le32 tx_buf_sz[16]; /* Array of buffer pool sizes */ + __le32 num_tx_threads; /* Number of active egress threads, 1 to 4 */ + __le32 tx_rate_lim_en; /* Bitmask: Egress rate limit en per thread */ + __le32 rx_flow_id; /* RX flow id for first rx ring */ + __le32 rx_mgr_flow_id; /* RX flow id for the first management ring */ + __le32 flags; /* TBD */ + __le32 n_burst; /* for debug */ + __le32 rtu_status; /* RTU status */ + __le32 info; /* reserved */ + __le32 reserve; + __le32 rand_seed; /* Used for the random number generation at fw */ +} __packed; + +/* SR1.0 shutdown command to stop processing at firmware. + * Command format: 0x8101ss00, where + * - ss: sequence number. Currently not used by driver. + */ +#define ICSSG_SHUTDOWN_CMD_SR1 0x81010000 + +/* SR1.0 pstate speed/duplex command to set speed and duplex settings + * in firmware. + * Command format: 0x8102ssPN, where + * - ss: sequence number. Currently not used by driver. + * - P: port number (for switch mode). + * - N: Speed/Duplex state: + * 0x0 - 10Mbps/Half duplex; + * 0x8 - 10Mbps/Full duplex; + * 0x2 - 100Mbps/Half duplex; + * 0xa - 100Mbps/Full duplex; + * 0xc - 1Gbps/Full duplex; + * NOTE: The above are the same value as bits [3..1](slice 0) + * or bits [7..5](slice 1) of RGMII CFG register. + */ +#define ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1 0x81020000 + struct icssg_setclock_desc { u8 request; u8 restore; diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index 9a7dd7efcf69..c8d0f45cc5b1 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -142,6 +142,9 @@ static int emac_set_channels(struct net_device *ndev, emac->tx_ch_num = ch->tx_count; + if (emac->is_sr1) + emac->tx_ch_num++; + return 0; } @@ -152,8 +155,17 @@ static void emac_get_channels(struct net_device *ndev, ch->max_rx = 1; ch->max_tx = PRUETH_MAX_TX_QUEUES; + + /* Disable multiple TX channels due to timeouts + * when using more than one queue */ + if (emac->is_sr1) + ch->max_tx = 1; + ch->rx_count = 1; ch->tx_count = emac->tx_ch_num; + + if (emac->is_sr1) + ch->tx_count--; } static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = { @@ -189,6 +201,93 @@ static void emac_get_rmon_stats(struct net_device *ndev, rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames"); } +static int emac_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth_tx_chn *tx_chn; + + tx_chn = &emac->tx_chns[0]; + + coal->rx_coalesce_usecs = emac->rx_pace_timeout_ns / 1000; + coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000; + + return 0; +} + +static int emac_get_per_queue_coalesce(struct net_device *ndev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth_tx_chn *tx_chn; + + if (queue >= PRUETH_MAX_TX_QUEUES) + return -EINVAL; + + tx_chn = &emac->tx_chns[queue]; + + coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000; + + return 0; +} + +static int emac_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + struct prueth_tx_chn *tx_chn; + + tx_chn = &emac->tx_chns[0]; + + if (coal->rx_coalesce_usecs && + coal->rx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) { + dev_info(prueth->dev, "defaulting to min value of %dus for rx-usecs\n", + ICSSG_MIN_COALESCE_USECS); + coal->rx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS; + } + + if (coal->tx_coalesce_usecs && + coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) { + dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs\n", + ICSSG_MIN_COALESCE_USECS); + coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS; + } + + emac->rx_pace_timeout_ns = coal->rx_coalesce_usecs * 1000; + tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000; + + return 0; +} + +static int emac_set_per_queue_coalesce(struct net_device *ndev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + struct prueth_tx_chn *tx_chn; + + if (queue >= PRUETH_MAX_TX_QUEUES) + return -EINVAL; + + tx_chn = &emac->tx_chns[queue]; + + if (coal->tx_coalesce_usecs && + coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) { + dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs for tx-%u\n", + ICSSG_MIN_COALESCE_USECS, queue); + coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS; + } + + tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000; + + return 0; +} + const struct ethtool_ops icssg_ethtool_ops = { .get_drvinfo = emac_get_drvinfo, .get_msglevel = emac_get_msglevel, @@ -197,6 +296,12 @@ const struct ethtool_ops icssg_ethtool_ops = { .get_ethtool_stats = emac_get_ethtool_stats, .get_strings = emac_get_strings, .get_ts_info = emac_get_ts_info, + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_TX_USECS, + .get_coalesce = emac_get_coalesce, + .set_coalesce = emac_set_coalesce, + .get_per_queue_coalesce = emac_get_per_queue_coalesce, + .set_per_queue_coalesce = emac_set_per_queue_coalesce, .get_channels = emac_get_channels, .set_channels = emac_set_channels, .get_link_ksettings = emac_get_link_ksettings, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index b69af69a1ccd..7c9e9518f555 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -34,570 +34,9 @@ #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" -/* Netif debug messages possible */ -#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \ - NETIF_MSG_PROBE | \ - NETIF_MSG_LINK | \ - NETIF_MSG_TIMER | \ - NETIF_MSG_IFDOWN | \ - NETIF_MSG_IFUP | \ - NETIF_MSG_RX_ERR | \ - NETIF_MSG_TX_ERR | \ - NETIF_MSG_TX_QUEUED | \ - NETIF_MSG_INTR | \ - NETIF_MSG_TX_DONE | \ - NETIF_MSG_RX_STATUS | \ - NETIF_MSG_PKTDATA | \ - NETIF_MSG_HW | \ - NETIF_MSG_WOL) - -#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx) - /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) -#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */ - -static void prueth_cleanup_rx_chns(struct prueth_emac *emac, - struct prueth_rx_chn *rx_chn, - int max_rflows) -{ - if (rx_chn->desc_pool) - k3_cppi_desc_pool_destroy(rx_chn->desc_pool); - - if (rx_chn->rx_chn) - k3_udma_glue_release_rx_chn(rx_chn->rx_chn); -} - -static void prueth_cleanup_tx_chns(struct prueth_emac *emac) -{ - int i; - - for (i = 0; i < emac->tx_ch_num; i++) { - struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; - - if (tx_chn->desc_pool) - k3_cppi_desc_pool_destroy(tx_chn->desc_pool); - - if (tx_chn->tx_chn) - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); - - /* Assume prueth_cleanup_tx_chns() is called at the - * end after all channel resources are freed - */ - memset(tx_chn, 0, sizeof(*tx_chn)); - } -} - -static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) -{ - int i; - - for (i = 0; i < num; i++) { - struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; - - if (tx_chn->irq) - free_irq(tx_chn->irq, tx_chn); - netif_napi_del(&tx_chn->napi_tx); - } -} - -static void prueth_xmit_free(struct prueth_tx_chn *tx_chn, - struct cppi5_host_desc_t *desc) -{ - struct cppi5_host_desc_t *first_desc, *next_desc; - dma_addr_t buf_dma, next_desc_dma; - u32 buf_dma_len; - - first_desc = desc; - next_desc = first_desc; - - cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); - - dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, - DMA_TO_DEVICE); - - next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); - while (next_desc_dma) { - next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, - next_desc_dma); - cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); - - dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len, - DMA_TO_DEVICE); - - next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); - k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma); - - k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); - } - - k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); -} - -static int emac_tx_complete_packets(struct prueth_emac *emac, int chn, - int budget) -{ - struct net_device *ndev = emac->ndev; - struct cppi5_host_desc_t *desc_tx; - struct netdev_queue *netif_txq; - struct prueth_tx_chn *tx_chn; - unsigned int total_bytes = 0; - struct sk_buff *skb; - dma_addr_t desc_dma; - int res, num_tx = 0; - void **swdata; - - tx_chn = &emac->tx_chns[chn]; - - while (true) { - res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); - if (res == -ENODATA) - break; - - /* teardown completion */ - if (cppi5_desc_is_tdcm(desc_dma)) { - if (atomic_dec_and_test(&emac->tdown_cnt)) - complete(&emac->tdown_complete); - break; - } - - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, - desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_tx); - - skb = *(swdata); - prueth_xmit_free(tx_chn, desc_tx); - - ndev = skb->dev; - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - total_bytes += skb->len; - napi_consume_skb(skb, budget); - num_tx++; - } - - if (!num_tx) - return 0; - - netif_txq = netdev_get_tx_queue(ndev, chn); - netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); - - if (netif_tx_queue_stopped(netif_txq)) { - /* If the TX queue was stopped, wake it now - * if we have enough room. - */ - __netif_tx_lock(netif_txq, smp_processor_id()); - if (netif_running(ndev) && - (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= - MAX_SKB_FRAGS)) - netif_tx_wake_queue(netif_txq); - __netif_tx_unlock(netif_txq); - } - - return num_tx; -} - -static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) -{ - struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx); - struct prueth_emac *emac = tx_chn->emac; - int num_tx_packets; - - num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget); - - if (num_tx_packets >= budget) - return budget; - - if (napi_complete_done(napi_tx, num_tx_packets)) - enable_irq(tx_chn->irq); - - return num_tx_packets; -} - -static irqreturn_t prueth_tx_irq(int irq, void *dev_id) -{ - struct prueth_tx_chn *tx_chn = dev_id; - - disable_irq_nosync(irq); - napi_schedule(&tx_chn->napi_tx); - - return IRQ_HANDLED; -} - -static int prueth_ndev_add_tx_napi(struct prueth_emac *emac) -{ - struct prueth *prueth = emac->prueth; - int i, ret; - - for (i = 0; i < emac->tx_ch_num; i++) { - struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; - - netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll); - ret = request_irq(tx_chn->irq, prueth_tx_irq, - IRQF_TRIGGER_HIGH, tx_chn->name, - tx_chn); - if (ret) { - netif_napi_del(&tx_chn->napi_tx); - dev_err(prueth->dev, "unable to request TX IRQ %d\n", - tx_chn->irq); - goto fail; - } - } - - return 0; -fail: - prueth_ndev_del_tx_napi(emac, i); - return ret; -} - -static int prueth_init_tx_chns(struct prueth_emac *emac) -{ - static const struct k3_ring_cfg ring_cfg = { - .elm_size = K3_RINGACC_RING_ELSIZE_8, - .mode = K3_RINGACC_RING_MODE_RING, - .flags = 0, - .size = PRUETH_MAX_TX_DESC, - }; - struct k3_udma_glue_tx_channel_cfg tx_cfg; - struct device *dev = emac->prueth->dev; - struct net_device *ndev = emac->ndev; - int ret, slice, i; - u32 hdesc_size; - - slice = prueth_emac_slice(emac); - if (slice < 0) - return slice; - - init_completion(&emac->tdown_complete); - - hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, - PRUETH_NAV_SW_DATA_SIZE); - memset(&tx_cfg, 0, sizeof(tx_cfg)); - tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; - tx_cfg.tx_cfg = ring_cfg; - tx_cfg.txcq_cfg = ring_cfg; - - for (i = 0; i < emac->tx_ch_num; i++) { - struct prueth_tx_chn *tx_chn = &emac->tx_chns[i]; - - /* To differentiate channels for SLICE0 vs SLICE1 */ - snprintf(tx_chn->name, sizeof(tx_chn->name), - "tx%d-%d", slice, i); - - tx_chn->emac = emac; - tx_chn->id = i; - tx_chn->descs_num = PRUETH_MAX_TX_DESC; - - tx_chn->tx_chn = - k3_udma_glue_request_tx_chn(dev, tx_chn->name, - &tx_cfg); - if (IS_ERR(tx_chn->tx_chn)) { - ret = PTR_ERR(tx_chn->tx_chn); - tx_chn->tx_chn = NULL; - netdev_err(ndev, - "Failed to request tx dma ch: %d\n", ret); - goto fail; - } - - tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn); - tx_chn->desc_pool = - k3_cppi_desc_pool_create_name(tx_chn->dma_dev, - tx_chn->descs_num, - hdesc_size, - tx_chn->name); - if (IS_ERR(tx_chn->desc_pool)) { - ret = PTR_ERR(tx_chn->desc_pool); - tx_chn->desc_pool = NULL; - netdev_err(ndev, "Failed to create tx pool: %d\n", ret); - goto fail; - } - - ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); - if (ret < 0) { - netdev_err(ndev, "failed to get tx irq\n"); - goto fail; - } - tx_chn->irq = ret; - - snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d", - dev_name(dev), tx_chn->id); - } - - return 0; - -fail: - prueth_cleanup_tx_chns(emac); - return ret; -} - -static int prueth_init_rx_chns(struct prueth_emac *emac, - struct prueth_rx_chn *rx_chn, - char *name, u32 max_rflows, - u32 max_desc_num) -{ - struct k3_udma_glue_rx_channel_cfg rx_cfg; - struct device *dev = emac->prueth->dev; - struct net_device *ndev = emac->ndev; - u32 fdqring_id, hdesc_size; - int i, ret = 0, slice; - - slice = prueth_emac_slice(emac); - if (slice < 0) - return slice; - - /* To differentiate channels for SLICE0 vs SLICE1 */ - snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice); - - hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE, - PRUETH_NAV_SW_DATA_SIZE); - memset(&rx_cfg, 0, sizeof(rx_cfg)); - rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE; - rx_cfg.flow_id_num = max_rflows; - rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */ - - /* init all flows */ - rx_chn->dev = dev; - rx_chn->descs_num = max_desc_num; - - rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name, - &rx_cfg); - if (IS_ERR(rx_chn->rx_chn)) { - ret = PTR_ERR(rx_chn->rx_chn); - rx_chn->rx_chn = NULL; - netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret); - goto fail; - } - - rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn); - rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev, - rx_chn->descs_num, - hdesc_size, - rx_chn->name); - if (IS_ERR(rx_chn->desc_pool)) { - ret = PTR_ERR(rx_chn->desc_pool); - rx_chn->desc_pool = NULL; - netdev_err(ndev, "Failed to create rx pool: %d\n", ret); - goto fail; - } - - emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); - netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base); - - fdqring_id = K3_RINGACC_RING_ID_ANY; - for (i = 0; i < rx_cfg.flow_id_num; i++) { - struct k3_ring_cfg rxring_cfg = { - .elm_size = K3_RINGACC_RING_ELSIZE_8, - .mode = K3_RINGACC_RING_MODE_RING, - .flags = 0, - }; - struct k3_ring_cfg fdqring_cfg = { - .elm_size = K3_RINGACC_RING_ELSIZE_8, - .flags = K3_RINGACC_RING_SHARED, - }; - struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { - .rx_cfg = rxring_cfg, - .rxfdq_cfg = fdqring_cfg, - .ring_rxq_id = K3_RINGACC_RING_ID_ANY, - .src_tag_lo_sel = - K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, - }; - - rx_flow_cfg.ring_rxfdq0_id = fdqring_id; - rx_flow_cfg.rx_cfg.size = max_desc_num; - rx_flow_cfg.rxfdq_cfg.size = max_desc_num; - rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode; - - ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, - i, &rx_flow_cfg); - if (ret) { - netdev_err(ndev, "Failed to init rx flow%d %d\n", - i, ret); - goto fail; - } - if (!i) - fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, - i); - ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - if (ret <= 0) { - if (!ret) - ret = -ENXIO; - netdev_err(ndev, "Failed to get rx dma irq"); - goto fail; - } - rx_chn->irq[i] = ret; - } - - return 0; - -fail: - prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); - return ret; -} - -static int prueth_dma_rx_push(struct prueth_emac *emac, - struct sk_buff *skb, - struct prueth_rx_chn *rx_chn) -{ - struct net_device *ndev = emac->ndev; - struct cppi5_host_desc_t *desc_rx; - u32 pkt_len = skb_tailroom(skb); - dma_addr_t desc_dma; - dma_addr_t buf_dma; - void **swdata; - - desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); - if (!desc_rx) { - netdev_err(ndev, "rx push: failed to allocate descriptor\n"); - return -ENOMEM; - } - desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); - - buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - netdev_err(ndev, "rx push: failed to map rx pkt buffer\n"); - return -EINVAL; - } - - cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, - PRUETH_NAV_PS_DATA_SIZE); - k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); - cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); - - swdata = cppi5_hdesc_get_swdata(desc_rx); - *swdata = skb; - - return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, - desc_rx, desc_dma); -} - -static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) -{ - u32 iepcount_lo, iepcount_hi, hi_rollover_count; - u64 ns; - - iepcount_lo = lo & GENMASK(19, 0); - iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20; - hi_rollover_count = hi >> 11; - - ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw); - ns = ns * cycle_time_ns + iepcount_lo; - - return ns; -} - -static void emac_rx_timestamp(struct prueth_emac *emac, - struct sk_buff *skb, u32 *psdata) -{ - struct skb_shared_hwtstamps *ssh; - u64 ns; - - u32 hi_sw = readl(emac->prueth->shram.va + - TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET); - ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0], - IEP_DEFAULT_CYCLE_TIME_NS); - - ssh = skb_hwtstamps(skb); - memset(ssh, 0, sizeof(*ssh)); - ssh->hwtstamp = ns_to_ktime(ns); -} - -static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) -{ - struct prueth_rx_chn *rx_chn = &emac->rx_chns; - u32 buf_dma_len, pkt_len, port_id = 0; - struct net_device *ndev = emac->ndev; - struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb, *new_skb; - dma_addr_t desc_dma, buf_dma; - void **swdata; - u32 *psdata; - int ret; - - ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); - if (ret) { - if (ret != -ENODATA) - netdev_err(ndev, "rx pop: failed: %d\n", ret); - return ret; - } - - if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ - return 0; - - desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); - - swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; - - psdata = cppi5_hdesc_get_psdata(desc_rx); - /* RX HW timestamp */ - if (emac->rx_ts_enabled) - emac_rx_timestamp(emac, skb, psdata); - - cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); - k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); - pkt_len = cppi5_hdesc_get_pktlen(desc_rx); - /* firmware adds 4 CRC bytes, strip them */ - pkt_len -= 4; - cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - - skb->dev = ndev; - new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); - /* if allocation fails we drop the packet but push the - * descriptor back to the ring with old skb to prevent a stall - */ - if (!new_skb) { - ndev->stats.rx_dropped++; - new_skb = skb; - } else { - /* send the filled skb up the n/w stack */ - skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, ndev); - napi_gro_receive(&emac->napi_rx, skb); - ndev->stats.rx_bytes += pkt_len; - ndev->stats.rx_packets++; - } - - /* queue another RX DMA */ - ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns); - if (WARN_ON(ret < 0)) { - dev_kfree_skb_any(new_skb); - ndev->stats.rx_errors++; - ndev->stats.rx_dropped++; - } - - return ret; -} - -static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) -{ - struct prueth_rx_chn *rx_chn = data; - struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb; - dma_addr_t buf_dma; - u32 buf_dma_len; - void **swdata; - - desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; - cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); - k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); - - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, - DMA_FROM_DEVICE); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - - dev_kfree_skb_any(skb); -} - static int emac_get_tx_ts(struct prueth_emac *emac, struct emac_tx_ts_response *rsp) { @@ -663,208 +102,6 @@ static void tx_ts_work(struct prueth_emac *emac) } } -static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) -{ - int i; - - /* search and get the next free slot */ - for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { - if (!emac->tx_ts_skb[i]) { - emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */ - return i; - } - } - - return -EBUSY; -} - -/** - * emac_ndo_start_xmit - EMAC Transmit function - * @skb: SKB pointer - * @ndev: EMAC network adapter - * - * Called by the system to transmit a packet - we queue the packet in - * EMAC hardware transmit queue - * Doesn't wait for completion we'll check for TX completion in - * emac_tx_complete_packets(). - * - * Return: enum netdev_tx - */ -static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; - struct prueth_emac *emac = netdev_priv(ndev); - struct netdev_queue *netif_txq; - struct prueth_tx_chn *tx_chn; - dma_addr_t desc_dma, buf_dma; - int i, ret = 0, q_idx; - bool in_tx_ts = 0; - int tx_ts_cookie; - void **swdata; - u32 pkt_len; - u32 *epib; - - pkt_len = skb_headlen(skb); - q_idx = skb_get_queue_mapping(skb); - - tx_chn = &emac->tx_chns[q_idx]; - netif_txq = netdev_get_tx_queue(ndev, q_idx); - - /* Map the linear buffer */ - buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE); - if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { - netdev_err(ndev, "tx: failed to map skb buffer\n"); - ret = NETDEV_TX_OK; - goto drop_free_skb; - } - - first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); - if (!first_desc) { - netdev_dbg(ndev, "tx: failed to allocate descriptor\n"); - dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); - goto drop_stop_q_busy; - } - - cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, - PRUETH_NAV_PS_DATA_SIZE); - cppi5_hdesc_set_pkttype(first_desc, 0); - epib = first_desc->epib; - epib[0] = 0; - epib[1] = 0; - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - emac->tx_ts_enabled) { - tx_ts_cookie = prueth_tx_ts_cookie_get(emac); - if (tx_ts_cookie >= 0) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - /* Request TX timestamp */ - epib[0] = (u32)tx_ts_cookie; - epib[1] = 0x80000000; /* TX TS request */ - emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb); - in_tx_ts = 1; - } - } - - /* set dst tag to indicate internal qid at the firmware which is at - * bit8..bit15. bit0..bit7 indicates port num for directed - * packets in case of switch mode operation - */ - cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); - k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); - cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); - swdata = cppi5_hdesc_get_swdata(first_desc); - *swdata = skb; - - /* Handle the case where skb is fragmented in pages */ - cur_desc = first_desc; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - u32 frag_size = skb_frag_size(frag); - - next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); - if (!next_desc) { - netdev_err(ndev, - "tx: failed to allocate frag. descriptor\n"); - goto free_desc_stop_q_busy_cleanup_tx_ts; - } - - buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size, - DMA_TO_DEVICE); - if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { - netdev_err(ndev, "tx: Failed to map skb page\n"); - k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); - ret = NETDEV_TX_OK; - goto cleanup_tx_ts; - } - - cppi5_hdesc_reset_hbdesc(next_desc); - k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); - cppi5_hdesc_attach_buf(next_desc, - buf_dma, frag_size, buf_dma, frag_size); - - desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, - next_desc); - k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma); - cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); - - pkt_len += frag_size; - cur_desc = next_desc; - } - WARN_ON_ONCE(pkt_len != skb->len); - - /* report bql before sending packet */ - netdev_tx_sent_queue(netif_txq, pkt_len); - - cppi5_hdesc_set_pktlen(first_desc, pkt_len); - desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); - /* cppi5_desc_dump(first_desc, 64); */ - - skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */ - ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); - if (ret) { - netdev_err(ndev, "tx: push failed: %d\n", ret); - goto drop_free_descs; - } - - if (in_tx_ts) - atomic_inc(&emac->tx_ts_pending); - - if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { - netif_tx_stop_queue(netif_txq); - /* Barrier, so that stop_queue visible to other cpus */ - smp_mb__after_atomic(); - - if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= - MAX_SKB_FRAGS) - netif_tx_wake_queue(netif_txq); - } - - return NETDEV_TX_OK; - -cleanup_tx_ts: - if (in_tx_ts) { - dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); - emac->tx_ts_skb[tx_ts_cookie] = NULL; - } - -drop_free_descs: - prueth_xmit_free(tx_chn, first_desc); - -drop_free_skb: - dev_kfree_skb_any(skb); - - /* error */ - ndev->stats.tx_dropped++; - netdev_err(ndev, "tx: error: %d\n", ret); - - return ret; - -free_desc_stop_q_busy_cleanup_tx_ts: - if (in_tx_ts) { - dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]); - emac->tx_ts_skb[tx_ts_cookie] = NULL; - } - prueth_xmit_free(tx_chn, first_desc); - -drop_stop_q_busy: - netif_tx_stop_queue(netif_txq); - return NETDEV_TX_BUSY; -} - -static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) -{ - struct prueth_tx_chn *tx_chn = data; - struct cppi5_host_desc_t *desc_tx; - struct sk_buff *skb; - void **swdata; - - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - prueth_xmit_free(tx_chn, desc_tx); - - dev_kfree_skb_any(skb); -} - static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; @@ -875,22 +112,6 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t prueth_rx_irq(int irq, void *dev_id) -{ - struct prueth_emac *emac = dev_id; - - disable_irq_nosync(irq); - napi_schedule(&emac->napi_rx); - - return IRQ_HANDLED; -} - -struct icssg_firmwares { - char *pru; - char *rtu; - char *txpru; -}; - static struct icssg_firmwares icssg_emac_firmwares[] = { { .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", @@ -955,41 +176,6 @@ halt_pru: return ret; } -static void prueth_emac_stop(struct prueth_emac *emac) -{ - struct prueth *prueth = emac->prueth; - int slice; - - switch (emac->port_id) { - case PRUETH_PORT_MII0: - slice = ICSS_SLICE0; - break; - case PRUETH_PORT_MII1: - slice = ICSS_SLICE1; - break; - default: - netdev_err(emac->ndev, "invalid port\n"); - return; - } - - emac->fw_running = 0; - rproc_shutdown(prueth->txpru[slice]); - rproc_shutdown(prueth->rtu[slice]); - rproc_shutdown(prueth->pru[slice]); -} - -static void prueth_cleanup_tx_ts(struct prueth_emac *emac) -{ - int i; - - for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) { - if (emac->tx_ts_skb[i]) { - dev_kfree_skb_any(emac->tx_ts_skb[i]); - emac->tx_ts_skb[i] = NULL; - } - } -} - /* called back by PHY layer if there is change in link state of hw port*/ static void emac_adjust_link(struct net_device *ndev) { @@ -1057,84 +243,14 @@ static void emac_adjust_link(struct net_device *ndev) } } -static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) +static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) { - struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); + struct prueth_emac *emac = + container_of(timer, struct prueth_emac, rx_hrtimer); int rx_flow = PRUETH_RX_FLOW_DATA; - int flow = PRUETH_MAX_RX_FLOWS; - int num_rx = 0; - int cur_budget; - int ret; - - while (flow--) { - cur_budget = budget - num_rx; - - while (cur_budget--) { - ret = emac_rx_packet(emac, flow); - if (ret) - break; - num_rx++; - } - - if (num_rx >= budget) - break; - } - if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) - enable_irq(emac->rx_chns.irq[rx_flow]); - - return num_rx; -} - -static int prueth_prepare_rx_chan(struct prueth_emac *emac, - struct prueth_rx_chn *chn, - int buf_size) -{ - struct sk_buff *skb; - int i, ret; - - for (i = 0; i < chn->descs_num; i++) { - skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL); - if (!skb) - return -ENOMEM; - - ret = prueth_dma_rx_push(emac, skb, chn); - if (ret < 0) { - netdev_err(emac->ndev, - "cannot submit skb for rx chan %s ret %d\n", - chn->name, ret); - kfree_skb(skb); - return ret; - } - } - - return 0; -} - -static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, - bool free_skb) -{ - int i; - - for (i = 0; i < ch_num; i++) { - if (free_skb) - k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, - &emac->tx_chns[i], - prueth_tx_cleanup); - k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); - } -} - -static void prueth_reset_rx_chan(struct prueth_rx_chn *chn, - int num_flows, bool disable) -{ - int i; - - for (i = 0; i < num_flows; i++) - k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn, - prueth_rx_cleanup, !!i); - if (disable) - k3_udma_glue_disable_rx_chn(chn->rx_chn); + enable_irq(emac->rx_chns.irq[rx_flow]); + return HRTIMER_NORESTART; } static int emac_phy_connect(struct prueth_emac *emac) @@ -1331,7 +447,7 @@ static int emac_ndo_open(struct net_device *ndev) icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_class_default(prueth->miig_rt, slice, 0); + icssg_class_default(prueth->miig_rt, slice, 0, false); /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, num_data_chn); @@ -1476,8 +592,10 @@ static int emac_ndo_stop(struct net_device *ndev) netdev_err(ndev, "tx teardown timeout\n"); prueth_reset_tx_chan(emac, emac->tx_ch_num, true); - for (i = 0; i < emac->tx_ch_num; i++) + for (i = 0; i < emac->tx_ch_num; i++) { napi_disable(&emac->tx_chns[i].napi_tx); + hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); + } max_rx_flows = PRUETH_MAX_RX_FLOWS; k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); @@ -1485,6 +603,7 @@ static int emac_ndo_stop(struct net_device *ndev) prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); napi_disable(&emac->napi_rx); + hrtimer_cancel(&emac->rx_hrtimer); cancel_work_sync(&emac->rx_mode_work); @@ -1510,11 +629,6 @@ static int emac_ndo_stop(struct net_device *ndev) return 0; } -static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) -{ - ndev->stats.tx_errors++; -} - static void emac_ndo_set_rx_mode_work(struct work_struct *work) { struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work); @@ -1560,116 +674,6 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev) queue_work(emac->cmd_wq, &emac->rx_mode_work); } -static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) -{ - struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - emac->tx_ts_enabled = 0; - break; - case HWTSTAMP_TX_ON: - emac->tx_ts_enabled = 1; - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - emac->rx_ts_enabled = 0; - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - emac->rx_ts_enabled = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) -{ - struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return emac_get_ts_config(ndev, ifr); - case SIOCSHWTSTAMP: - return emac_set_ts_config(ndev, ifr); - default: - break; - } - - return phy_do_ioctl(ndev, ifr, cmd); -} - -static void emac_ndo_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) -{ - struct prueth_emac *emac = netdev_priv(ndev); - - emac_update_hardware_stats(emac); - - stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets"); - stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes"); - stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets"); - stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes"); - stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors"); - stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors"); - stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames"); - - stats->rx_errors = ndev->stats.rx_errors; - stats->rx_dropped = ndev->stats.rx_dropped; - stats->tx_errors = ndev->stats.tx_errors; - stats->tx_dropped = ndev->stats.tx_dropped; -} - -static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, - size_t len) -{ - struct prueth_emac *emac = netdev_priv(ndev); - int ret; - - ret = snprintf(name, len, "p%d", emac->port_id); - if (ret >= len) - return -EINVAL; - - return 0; -} - static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -1683,42 +687,6 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, }; -/* get emac_port corresponding to eth_node name */ -static int prueth_node_port(struct device_node *eth_node) -{ - u32 port_id; - int ret; - - ret = of_property_read_u32(eth_node, "reg", &port_id); - if (ret) - return ret; - - if (port_id == 0) - return PRUETH_PORT_MII0; - else if (port_id == 1) - return PRUETH_PORT_MII1; - else - return PRUETH_PORT_INVALID; -} - -/* get MAC instance corresponding to eth_node name */ -static int prueth_node_mac(struct device_node *eth_node) -{ - u32 port_id; - int ret; - - ret = of_property_read_u32(eth_node, "reg", &port_id); - if (ret) - return ret; - - if (port_id == 0) - return PRUETH_MAC0; - else if (port_id == 1) - return PRUETH_MAC1; - else - return PRUETH_MAC_INVALID; -} - static int prueth_netdev_init(struct prueth *prueth, struct device_node *eth_node) { @@ -1846,6 +814,9 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->features = ndev->hw_features; netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + emac->rx_hrtimer.function = &emac_rx_timer_callback; prueth->emac[mac] = emac; return 0; @@ -1862,90 +833,6 @@ free_ndev: return ret; } -static void prueth_netdev_exit(struct prueth *prueth, - struct device_node *eth_node) -{ - struct prueth_emac *emac; - enum prueth_mac mac; - - mac = prueth_node_mac(eth_node); - if (mac == PRUETH_MAC_INVALID) - return; - - emac = prueth->emac[mac]; - if (!emac) - return; - - if (of_phy_is_fixed_link(emac->phy_node)) - of_phy_deregister_fixed_link(emac->phy_node); - - netif_napi_del(&emac->napi_rx); - - pruss_release_mem_region(prueth->pruss, &emac->dram); - destroy_workqueue(emac->cmd_wq); - free_netdev(emac->ndev); - prueth->emac[mac] = NULL; -} - -static int prueth_get_cores(struct prueth *prueth, int slice) -{ - struct device *dev = prueth->dev; - enum pruss_pru_id pruss_id; - struct device_node *np; - int idx = -1, ret; - - np = dev->of_node; - - switch (slice) { - case ICSS_SLICE0: - idx = 0; - break; - case ICSS_SLICE1: - idx = 3; - break; - default: - return -EINVAL; - } - - prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id); - if (IS_ERR(prueth->pru[slice])) { - ret = PTR_ERR(prueth->pru[slice]); - prueth->pru[slice] = NULL; - return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice); - } - prueth->pru_id[slice] = pruss_id; - - idx++; - prueth->rtu[slice] = pru_rproc_get(np, idx, NULL); - if (IS_ERR(prueth->rtu[slice])) { - ret = PTR_ERR(prueth->rtu[slice]); - prueth->rtu[slice] = NULL; - return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice); - } - - idx++; - prueth->txpru[slice] = pru_rproc_get(np, idx, NULL); - if (IS_ERR(prueth->txpru[slice])) { - ret = PTR_ERR(prueth->txpru[slice]); - prueth->txpru[slice] = NULL; - return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice); - } - - return 0; -} - -static void prueth_put_cores(struct prueth *prueth, int slice) -{ - if (prueth->txpru[slice]) - pru_rproc_put(prueth->txpru[slice]); - - if (prueth->rtu[slice]) - pru_rproc_put(prueth->rtu[slice]); - - if (prueth->pru[slice]) - pru_rproc_put(prueth->pru[slice]); -} - static int prueth_probe(struct platform_device *pdev) { struct device_node *eth_node, *eth_ports_node; @@ -2036,13 +923,13 @@ static int prueth_probe(struct platform_device *pdev) } if (eth0_node) { - ret = prueth_get_cores(prueth, ICSS_SLICE0); + ret = prueth_get_cores(prueth, ICSS_SLICE0, false); if (ret) goto put_cores; } if (eth1_node) { - ret = prueth_get_cores(prueth, ICSS_SLICE1); + ret = prueth_get_cores(prueth, ICSS_SLICE1, false); if (ret) goto put_cores; } @@ -2275,62 +1162,6 @@ static void prueth_remove(struct platform_device *pdev) prueth_put_cores(prueth, ICSS_SLICE0); } -#ifdef CONFIG_PM_SLEEP -static int prueth_suspend(struct device *dev) -{ - struct prueth *prueth = dev_get_drvdata(dev); - struct net_device *ndev; - int i, ret; - - for (i = 0; i < PRUETH_NUM_MACS; i++) { - ndev = prueth->registered_netdevs[i]; - - if (!ndev) - continue; - - if (netif_running(ndev)) { - netif_device_detach(ndev); - ret = emac_ndo_stop(ndev); - if (ret < 0) { - netdev_err(ndev, "failed to stop: %d", ret); - return ret; - } - } - } - - return 0; -} - -static int prueth_resume(struct device *dev) -{ - struct prueth *prueth = dev_get_drvdata(dev); - struct net_device *ndev; - int i, ret; - - for (i = 0; i < PRUETH_NUM_MACS; i++) { - ndev = prueth->registered_netdevs[i]; - - if (!ndev) - continue; - - if (netif_running(ndev)) { - ret = emac_ndo_open(ndev); - if (ret < 0) { - netdev_err(ndev, "failed to start: %d", ret); - return ret; - } - netif_device_attach(ndev); - } - } - - return 0; -} -#endif /* CONFIG_PM_SLEEP */ - -static const struct dev_pm_ops prueth_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) -}; - static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 8b6d6b497010..a78c5eb75fb8 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -55,6 +55,8 @@ #define ICSSG_NUM_STANDARD_STATS 31 #define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS) +#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */ + /* Firmware status codes */ #define ICSS_HS_FW_READY 0x55555555 #define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */ @@ -106,6 +108,8 @@ struct prueth_tx_chn { u32 descs_num; unsigned int irq; char name[32]; + struct hrtimer tx_hrtimer; + unsigned long tx_pace_timeout_ns; }; struct prueth_rx_chn { @@ -125,8 +129,12 @@ struct prueth_rx_chn { #define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */ +/* Minimum coalesce time in usecs for both Tx and Rx */ +#define ICSSG_MIN_COALESCE_USECS 20 + /* data for each emac port */ struct prueth_emac { + bool is_sr1; bool fw_running; struct prueth *prueth; struct net_device *ndev; @@ -155,6 +163,10 @@ struct prueth_emac { int rx_flow_id_base; int tx_ch_num; + /* SR1.0 Management channel */ + struct prueth_rx_chn rx_mgm_chn; + int rx_mgm_flow_id_base; + spinlock_t lock; /* serialize access */ /* TX HW Timestamping */ @@ -165,7 +177,7 @@ struct prueth_emac { u8 cmd_seq; /* shutdown related */ - u32 cmd_data[4]; + __le32 cmd_data[4]; struct completion cmd_complete; /* Mutex to serialize access to firmware command interface */ struct mutex cmd_lock; @@ -176,6 +188,10 @@ struct prueth_emac { struct delayed_work stats_work; u64 stats[ICSSG_NUM_STATS]; + + /* RX IRQ Coalescing Related */ + struct hrtimer rx_hrtimer; + unsigned long rx_pace_timeout_ns; }; /** @@ -188,6 +204,12 @@ struct prueth_pdata { u32 quirk_10m_link_issue:1; }; +struct icssg_firmwares { + char *pru; + char *rtu; + char *txpru; +}; + /** * struct prueth - PRUeth structure * @dev: device @@ -243,6 +265,13 @@ struct emac_tx_ts_response { u32 hi_ts; }; +struct emac_tx_ts_response_sr1 { + __le32 lo_ts; + __le32 hi_ts; + __le32 reserved; + __le32 cookie; +}; + /* get PRUSS SLICE number from prueth_emac */ static inline int prueth_emac_slice(struct prueth_emac *emac) { @@ -257,12 +286,17 @@ static inline int prueth_emac_slice(struct prueth_emac *emac) } extern const struct ethtool_ops icssg_ethtool_ops; +extern const struct dev_pm_ops prueth_dev_pm_ops; /* Classifier helpers */ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac); void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac); void icssg_class_disable(struct regmap *miig_rt, int slice); -void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti); +void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, + bool is_sr1); +void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice); +void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, + struct net_device *ndev); void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr); /* config helpers */ @@ -285,4 +319,54 @@ u32 icssg_queue_level(struct prueth *prueth, int queue); void emac_stats_work_handler(struct work_struct *work); void emac_update_hardware_stats(struct prueth_emac *emac); int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name); + +/* Common functions */ +void prueth_cleanup_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + int max_rflows); +void prueth_cleanup_tx_chns(struct prueth_emac *emac); +void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num); +void prueth_xmit_free(struct prueth_tx_chn *tx_chn, + struct cppi5_host_desc_t *desc); +int emac_tx_complete_packets(struct prueth_emac *emac, int chn, + int budget, bool *tdown); +int prueth_ndev_add_tx_napi(struct prueth_emac *emac); +int prueth_init_tx_chns(struct prueth_emac *emac); +int prueth_init_rx_chns(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + char *name, u32 max_rflows, + u32 max_desc_num); +int prueth_dma_rx_push(struct prueth_emac *emac, + struct sk_buff *skb, + struct prueth_rx_chn *rx_chn); +void emac_rx_timestamp(struct prueth_emac *emac, + struct sk_buff *skb, u32 *psdata); +enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); +irqreturn_t prueth_rx_irq(int irq, void *dev_id); +void prueth_emac_stop(struct prueth_emac *emac); +void prueth_cleanup_tx_ts(struct prueth_emac *emac); +int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget); +int prueth_prepare_rx_chan(struct prueth_emac *emac, + struct prueth_rx_chn *chn, + int buf_size); +void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, + bool free_skb); +void prueth_reset_rx_chan(struct prueth_rx_chn *chn, + int num_flows, bool disable); +void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); +int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +void emac_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats); +int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len); +int prueth_node_port(struct device_node *eth_node); +int prueth_node_mac(struct device_node *eth_node); +void prueth_netdev_exit(struct prueth *prueth, + struct device_node *eth_node); +int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1); +void prueth_put_cores(struct prueth *prueth, int slice); + +/* Revision specific helper */ +u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns); + #endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c new file mode 100644 index 000000000000..7b3304bbd7fc --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -0,0 +1,1181 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments ICSSG SR1.0 Ethernet Driver + * + * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Copyright (c) Siemens AG, 2024 + * + */ + +#include <linux/etherdevice.h> +#include <linux/genalloc.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/phy.h> +#include <linux/remoteproc/pruss.h> +#include <linux/pruss_driver.h> + +#include "icssg_prueth.h" +#include "icssg_mii_rt.h" +#include "../k3-cppi-desc-pool.h" + +#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver" + +/* SR1: Set buffer sizes for the pools. There are 8 internal queues + * implemented in firmware, but only 4 tx channels/threads in the Egress + * direction to firmware. Need a high priority queue for management + * messages since they shouldn't be blocked even during high traffic + * situation. So use Q0-Q2 as data queues and Q3 as management queue + * in the max case. However for ease of configuration, use the max + * data queue + 1 for management message if we are not using max + * case. + * + * Allocate 4 MTU buffers per data queue. Firmware requires + * pool sizes to be set for internal queues. Set the upper 5 queue + * pool size to min size of 128 bytes since there are only 3 tx + * data channels and management queue requires only minimum buffer. + * i.e lower queues are used by driver and highest priority queue + * from that is used for management message. + */ + +static int emac_egress_buf_pool_size[] = { + PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1, + PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, + PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, + PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 +}; + +static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac, + int slice) +{ + struct icssg_sr1_config config; + void __iomem *va; + int i, index; + + memset(&config, 0, sizeof(config)); + config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa)); + config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa)); + config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */ + config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */ + config.rand_seed = cpu_to_le32(get_random_u32()); + + for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) { + index = i - PRUETH_EMAC_BUF_POOL_START_SR1; + config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]); + } + + va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1; + memcpy_toio(va, &config, sizeof(config)); + + emac->speed = SPEED_1000; + emac->duplex = DUPLEX_FULL; +} + +static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd) +{ + struct cppi5_host_desc_t *first_desc; + u32 pkt_len = sizeof(emac->cmd_data); + __le32 *data = emac->cmd_data; + dma_addr_t desc_dma, buf_dma; + struct prueth_tx_chn *tx_chn; + void **swdata; + int ret = 0; + u32 *epib; + + netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd); + + /* only one command at a time allowed to firmware */ + mutex_lock(&emac->cmd_lock); + data[0] = cpu_to_le32(cmd); + + /* highest priority channel for management messages */ + tx_chn = &emac->tx_chns[emac->tx_ch_num - 1]; + + /* Map the linear buffer */ + buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd); + ret = -EINVAL; + goto err_unlock; + } + + first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!first_desc) { + netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd); + dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE); + ret = -ENOMEM; + goto err_unlock; + } + + cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD); + epib = first_desc->epib; + epib[0] = 0; + epib[1] = 0; + + cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); + swdata = cppi5_hdesc_get_swdata(first_desc); + *swdata = data; + + cppi5_hdesc_set_pktlen(first_desc, pkt_len); + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); + + /* send command */ + reinit_completion(&emac->cmd_complete); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + if (ret) { + netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret); + goto free_desc; + } + ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100)); + if (!ret) + netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd); + + mutex_unlock(&emac->cmd_lock); + + return ret; +free_desc: + prueth_xmit_free(tx_chn, first_desc); +err_unlock: + mutex_unlock(&emac->cmd_lock); + + return ret; +} + +static void icssg_config_set_speed_sr1(struct prueth_emac *emac) +{ + u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + + val = icssg_rgmii_get_speed(prueth->miig_rt, slice); + /* firmware expects speed settings in bit 2-1 */ + val <<= 1; + cmd |= val; + + val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice); + /* firmware expects full duplex settings in bit 3 */ + val <<= 3; + cmd |= val; + + emac_send_command_sr1(emac, cmd); +} + +/* called back by PHY layer if there is change in link state of hw port*/ +static void emac_adjust_link_sr1(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + struct prueth *prueth = emac->prueth; + bool new_state = false; + unsigned long flags; + + if (phydev->link) { + /* check the mode of operation - full/half duplex */ + if (phydev->duplex != emac->duplex) { + new_state = true; + emac->duplex = phydev->duplex; + } + if (phydev->speed != emac->speed) { + new_state = true; + emac->speed = phydev->speed; + } + if (!emac->link) { + new_state = true; + emac->link = 1; + } + } else if (emac->link) { + new_state = true; + emac->link = 0; + + /* f/w should support 100 & 1000 */ + emac->speed = SPEED_1000; + + /* half duplex may not be supported by f/w */ + emac->duplex = DUPLEX_FULL; + } + + if (new_state) { + phy_print_status(phydev); + + /* update RGMII and MII configuration based on PHY negotiated + * values + */ + if (emac->link) { + /* Set the RGMII cfg for gig en and full duplex */ + icssg_update_rgmii_cfg(prueth->miig_rt, emac); + + /* update the Tx IPG based on 100M/1G speed */ + spin_lock_irqsave(&emac->lock, flags); + icssg_config_ipg(emac); + spin_unlock_irqrestore(&emac->lock, flags); + icssg_config_set_speed_sr1(emac); + } + } + + if (emac->link) { + /* reactivate the transmit queue */ + netif_tx_wake_all_queues(ndev); + } else { + netif_tx_stop_all_queues(ndev); + prueth_cleanup_tx_ts(emac); + } +} + +static int emac_phy_connect(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + struct net_device *ndev = emac->ndev; + /* connect PHY */ + ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node, + &emac_adjust_link_sr1, 0, + emac->phy_if); + if (!ndev->phydev) { + dev_err(prueth->dev, "couldn't connect to phy %s\n", + emac->phy_node->full_name); + return -ENODEV; + } + + if (!emac->half_duplex) { + dev_dbg(prueth->dev, "half duplex mode is not supported\n"); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + } + + /* Remove 100Mbits half-duplex due to RGMII misreporting connection + * as full duplex */ + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + + /* remove unsupported modes */ + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + + if (emac->phy_if == PHY_INTERFACE_MODE_MII) + phy_set_max_speed(ndev->phydev, SPEED_100); + + return 0; +} + +/* get one packet from requested flow_id + * + * Returns skb pointer if packet found else NULL + * Caller must free the returned skb. + */ +static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac, + u32 flow_id) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct sk_buff *skb, *new_skb; + dma_addr_t desc_dma, buf_dma; + u32 buf_dma_len, pkt_len; + void **swdata; + int ret; + + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx mgm pop: failed: %d\n", ret); + return NULL; + } + + if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */ + return NULL; + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + + /* Fix FW bug about incorrect PSDATA size */ + if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) { + cppi5_hdesc_update_psdata_size(desc_rx, + PRUETH_NAV_PS_DATA_SIZE); + } + + swdata = cppi5_hdesc_get_swdata(desc_rx); + skb = *swdata; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + + dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + + new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); + /* if allocation fails we drop the packet but push the + * descriptor back to the ring with old skb to prevent a stall + */ + if (!new_skb) { + netdev_err(ndev, + "skb alloc failed, dropped mgm pkt from flow %d\n", + flow_id); + new_skb = skb; + skb = NULL; /* return NULL */ + } else { + /* return the filled skb */ + skb_put(skb, pkt_len); + } + + /* queue another DMA */ + ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn); + if (WARN_ON(ret < 0)) + dev_kfree_skb_any(new_skb); + + return skb; +} + +static void prueth_tx_ts_sr1(struct prueth_emac *emac, + struct emac_tx_ts_response_sr1 *tsr) +{ + struct skb_shared_hwtstamps ssh; + u32 hi_ts, lo_ts, cookie; + struct sk_buff *skb; + u64 ns; + + hi_ts = le32_to_cpu(tsr->hi_ts); + lo_ts = le32_to_cpu(tsr->lo_ts); + + ns = (u64)hi_ts << 32 | lo_ts; + + cookie = le32_to_cpu(tsr->cookie); + if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) { + netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n", + cookie); + return; + } + + skb = emac->tx_ts_skb[cookie]; + emac->tx_ts_skb[cookie] = NULL; /* free slot */ + + memset(&ssh, 0, sizeof(ssh)); + ssh.hwtstamp = ns_to_ktime(ns); + + skb_tstamp_tx(skb, &ssh); + dev_consume_skb_any(skb); +} + +static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id) +{ + struct prueth_emac *emac = dev_id; + struct sk_buff *skb; + + skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1); + if (!skb) + return IRQ_NONE; + + prueth_tx_ts_sr1(emac, (void *)skb->data); + dev_kfree_skb_any(skb); + + return IRQ_HANDLED; +} + +static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id) +{ + struct prueth_emac *emac = dev_id; + struct sk_buff *skb; + u32 rsp; + + skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1); + if (!skb) + return IRQ_NONE; + + /* Process command response */ + rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000; + if (rsp == ICSSG_SHUTDOWN_CMD_SR1) { + netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp); + complete(&emac->cmd_complete); + } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) { + netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp); + complete(&emac->cmd_complete); + } + + dev_kfree_skb_any(skb); + + return IRQ_HANDLED; +} + +static struct icssg_firmwares icssg_sr1_emac_firmwares[] = { + { + .pru = "ti-pruss/am65x-pru0-prueth-fw.elf", + .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf", + }, + { + .pru = "ti-pruss/am65x-pru1-prueth-fw.elf", + .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf", + } +}; + +static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) +{ + struct icssg_firmwares *firmwares; + struct device *dev = prueth->dev; + int slice, ret; + + firmwares = icssg_sr1_emac_firmwares; + + slice = prueth_emac_slice(emac); + if (slice < 0) { + netdev_err(emac->ndev, "invalid port\n"); + return -EINVAL; + } + + icssg_config_sr1(prueth, emac, slice); + + ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru); + ret = rproc_boot(prueth->pru[slice]); + if (ret) { + dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret); + return -EINVAL; + } + + ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu); + ret = rproc_boot(prueth->rtu[slice]); + if (ret) { + dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret); + goto halt_pru; + } + + emac->fw_running = 1; + return 0; + +halt_pru: + rproc_shutdown(prueth->pru[slice]); + + return ret; +} + +/** + * emac_ndo_open - EMAC device open + * @ndev: network adapter device + * + * Called when system wants to start the interface. + * + * Return: 0 for a successful open, or appropriate error code + */ +static int emac_ndo_open(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int num_data_chn = emac->tx_ch_num - 1; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + struct device *dev = prueth->dev; + int max_rx_flows, rx_flow; + int ret, i; + + /* clear SMEM and MSMC settings for all slices */ + if (!prueth->emacs_initialized) { + memset_io(prueth->msmcram.va, 0, prueth->msmcram.size); + memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS); + } + + /* set h/w MAC as user might have re-configured */ + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); + + icssg_class_default(prueth->miig_rt, slice, 0, true); + + /* Notify the stack of the actual queue counts. */ + ret = netif_set_real_num_tx_queues(ndev, num_data_chn); + if (ret) { + dev_err(dev, "cannot set real number of tx queues\n"); + return ret; + } + + init_completion(&emac->cmd_complete); + ret = prueth_init_tx_chns(emac); + if (ret) { + dev_err(dev, "failed to init tx channel: %d\n", ret); + return ret; + } + + max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1; + ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx", + max_rx_flows, PRUETH_MAX_RX_DESC); + if (ret) { + dev_err(dev, "failed to init rx channel: %d\n", ret); + goto cleanup_tx; + } + + ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm", + PRUETH_MAX_RX_MGM_FLOWS_SR1, + PRUETH_MAX_RX_MGM_DESC_SR1); + if (ret) { + dev_err(dev, "failed to init rx mgmt channel: %d\n", + ret); + goto cleanup_rx; + } + + ret = prueth_ndev_add_tx_napi(emac); + if (ret) + goto cleanup_rx_mgm; + + /* we use only the highest priority flow for now i.e. @irq[3] */ + rx_flow = PRUETH_RX_FLOW_DATA_SR1; + ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq, + IRQF_TRIGGER_HIGH, dev_name(dev), emac); + if (ret) { + dev_err(dev, "unable to request RX IRQ\n"); + goto cleanup_napi; + } + + ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], + NULL, prueth_rx_mgm_rsp_thread, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + dev_name(dev), emac); + if (ret) { + dev_err(dev, "unable to request RX Management RSP IRQ\n"); + goto free_rx_irq; + } + + ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], + NULL, prueth_rx_mgm_ts_thread_sr1, + IRQF_ONESHOT | IRQF_TRIGGER_HIGH, + dev_name(dev), emac); + if (ret) { + dev_err(dev, "unable to request RX Management TS IRQ\n"); + goto free_rx_mgm_rsp_irq; + } + + /* reset and start PRU firmware */ + ret = prueth_emac_start(prueth, emac); + if (ret) + goto free_rx_mgmt_ts_irq; + + icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu); + + /* Prepare RX */ + ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + if (ret) + goto stop; + + ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64); + if (ret) + goto reset_rx_chn; + + ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn); + if (ret) + goto reset_rx_chn; + + ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + if (ret) + goto reset_rx_mgm_chn; + + for (i = 0; i < emac->tx_ch_num; i++) { + ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); + if (ret) + goto reset_tx_chan; + } + + /* Enable NAPI in Tx and Rx direction */ + for (i = 0; i < emac->tx_ch_num; i++) + napi_enable(&emac->tx_chns[i].napi_tx); + napi_enable(&emac->napi_rx); + + /* start PHY */ + phy_start(ndev->phydev); + + prueth->emacs_initialized++; + + queue_work(system_long_wq, &emac->stats_work.work); + + return 0; + +reset_tx_chan: + /* Since interface is not yet up, there is wouldn't be + * any SKB for completion. So set false to free_skb + */ + prueth_reset_tx_chan(emac, i, false); +reset_rx_mgm_chn: + prueth_reset_rx_chan(&emac->rx_mgm_chn, + PRUETH_MAX_RX_MGM_FLOWS_SR1, true); +reset_rx_chn: + prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); +stop: + prueth_emac_stop(emac); +free_rx_mgmt_ts_irq: + free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], + emac); +free_rx_mgm_rsp_irq: + free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], + emac); +free_rx_irq: + free_irq(emac->rx_chns.irq[rx_flow], emac); +cleanup_napi: + prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); +cleanup_rx_mgm: + prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, + PRUETH_MAX_RX_MGM_FLOWS_SR1); +cleanup_rx: + prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); +cleanup_tx: + prueth_cleanup_tx_chns(emac); + + return ret; +} + +/** + * emac_ndo_stop - EMAC device stop + * @ndev: network adapter device + * + * Called when system wants to stop or down the interface. + * + * Return: Always 0 (Success) + */ +static int emac_ndo_stop(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int rx_flow = PRUETH_RX_FLOW_DATA_SR1; + struct prueth *prueth = emac->prueth; + int max_rx_flows; + int ret, i; + + /* inform the upper layers. */ + netif_tx_stop_all_queues(ndev); + + /* block packets from wire */ + if (ndev->phydev) + phy_stop(ndev->phydev); + + icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); + + emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1); + + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); + /* ensure new tdown_cnt value is visible */ + smp_mb__after_atomic(); + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + for (i = 0; i < emac->tx_ch_num; i++) + k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(ndev, "tx teardown timeout\n"); + + prueth_reset_tx_chan(emac, emac->tx_ch_num, true); + for (i = 0; i < emac->tx_ch_num; i++) + napi_disable(&emac->tx_chns[i].napi_tx); + + max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1; + k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); + + prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); + /* Teardown RX MGM channel */ + k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true); + prueth_reset_rx_chan(&emac->rx_mgm_chn, + PRUETH_MAX_RX_MGM_FLOWS_SR1, true); + + napi_disable(&emac->napi_rx); + + /* Destroying the queued work in ndo_stop() */ + cancel_delayed_work_sync(&emac->stats_work); + + /* stop PRUs */ + prueth_emac_stop(emac); + + free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac); + free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac); + free_irq(emac->rx_chns.irq[rx_flow], emac); + prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); + prueth_cleanup_tx_chns(emac); + + prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1); + prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); + + prueth->emacs_initialized--; + + return 0; +} + +static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + bool allmulti = ndev->flags & IFF_ALLMULTI; + bool promisc = ndev->flags & IFF_PROMISC; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + + if (promisc) { + icssg_class_promiscuous_sr1(prueth->miig_rt, slice); + return; + } + + if (allmulti) { + icssg_class_default(prueth->miig_rt, slice, 1, true); + return; + } + + icssg_class_default(prueth->miig_rt, slice, 0, true); + if (!netdev_mc_empty(ndev)) { + /* program multicast address list into Classifier */ + icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev); + } +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_ndo_open, + .ndo_stop = emac_ndo_stop, + .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, + .ndo_eth_ioctl = emac_ndo_ioctl, + .ndo_get_stats64 = emac_ndo_get_stats64, + .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, +}; + +static int prueth_netdev_init(struct prueth *prueth, + struct device_node *eth_node) +{ + struct prueth_emac *emac; + struct net_device *ndev; + enum prueth_port port; + enum prueth_mac mac; + /* Only enable one TX channel due to timeouts when + * using multiple channels */ + int num_tx_chn = 1; + int ret; + + port = prueth_node_port(eth_node); + if (port == PRUETH_PORT_INVALID) + return -EINVAL; + + mac = prueth_node_mac(eth_node); + if (mac == PRUETH_MAC_INVALID) + return -EINVAL; + + ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn); + if (!ndev) + return -ENOMEM; + + emac = netdev_priv(ndev); + emac->is_sr1 = 1; + emac->prueth = prueth; + emac->ndev = ndev; + emac->port_id = port; + emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); + if (!emac->cmd_wq) { + ret = -ENOMEM; + goto free_ndev; + } + + INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + + ret = pruss_request_mem_region(prueth->pruss, + port == PRUETH_PORT_MII0 ? + PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1, + &emac->dram); + if (ret) { + dev_err(prueth->dev, "unable to get DRAM: %d\n", ret); + ret = -ENOMEM; + goto free_wq; + } + + /* SR1.0 uses a dedicated high priority channel + * to send commands to the firmware + */ + emac->tx_ch_num = 2; + + SET_NETDEV_DEV(ndev, prueth->dev); + spin_lock_init(&emac->lock); + mutex_init(&emac->cmd_lock); + + emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0); + if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) { + dev_err(prueth->dev, "couldn't find phy-handle\n"); + ret = -ENODEV; + goto free; + } else if (of_phy_is_fixed_link(eth_node)) { + ret = of_phy_register_fixed_link(eth_node); + if (ret) { + ret = dev_err_probe(prueth->dev, ret, + "failed to register fixed-link phy\n"); + goto free; + } + + emac->phy_node = eth_node; + } + + ret = of_get_phy_mode(eth_node, &emac->phy_if); + if (ret) { + dev_err(prueth->dev, "could not get phy-mode property\n"); + goto free; + } + + if (emac->phy_if != PHY_INTERFACE_MODE_MII && + !phy_interface_mode_is_rgmii(emac->phy_if)) { + dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if)); + ret = -EINVAL; + goto free; + } + + /* AM65 SR2.0 has TX Internal delay always enabled by hardware + * and it is not possible to disable TX Internal delay. The below + * switch case block describes how we handle different phy modes + * based on hardware restriction. + */ + switch (emac->phy_if) { + case PHY_INTERFACE_MODE_RGMII_ID: + emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + emac->phy_if = PHY_INTERFACE_MODE_RGMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + dev_err(prueth->dev, "RGMII mode without TX delay is not supported"); + ret = -EINVAL; + goto free; + default: + break; + } + + /* get mac address from DT and set private and netdev addr */ + ret = of_get_ethdev_address(eth_node, ndev); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", + port, ndev->dev_addr); + } + ether_addr_copy(emac->mac_addr, ndev->dev_addr); + + ndev->min_mtu = PRUETH_MIN_PKT_SIZE; + ndev->max_mtu = PRUETH_MAX_MTU; + ndev->netdev_ops = &emac_netdev_ops; + ndev->ethtool_ops = &icssg_ethtool_ops; + ndev->hw_features = NETIF_F_SG; + ndev->features = ndev->hw_features; + + netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + prueth->emac[mac] = emac; + + return 0; + +free: + pruss_release_mem_region(prueth->pruss, &emac->dram); +free_wq: + destroy_workqueue(emac->cmd_wq); +free_ndev: + emac->ndev = NULL; + prueth->emac[mac] = NULL; + free_netdev(ndev); + + return ret; +} + +static int prueth_probe(struct platform_device *pdev) +{ + struct device_node *eth_node, *eth_ports_node; + struct device_node *eth0_node = NULL; + struct device_node *eth1_node = NULL; + struct device *dev = &pdev->dev; + struct device_node *np; + struct prueth *prueth; + struct pruss *pruss; + u32 msmc_ram_size; + int i, ret; + + np = dev->of_node; + + prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); + if (!prueth) + return -ENOMEM; + + dev_set_drvdata(dev, prueth); + prueth->pdev = pdev; + prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev); + + prueth->dev = dev; + eth_ports_node = of_get_child_by_name(np, "ethernet-ports"); + if (!eth_ports_node) + return -ENOENT; + + for_each_child_of_node(eth_ports_node, eth_node) { + u32 reg; + + if (strcmp(eth_node->name, "port")) + continue; + ret = of_property_read_u32(eth_node, "reg", ®); + if (ret < 0) { + dev_err(dev, "%pOF error reading port_id %d\n", + eth_node, ret); + } + + of_node_get(eth_node); + + if (reg == 0) { + eth0_node = eth_node; + if (!of_device_is_available(eth0_node)) { + of_node_put(eth0_node); + eth0_node = NULL; + } + } else if (reg == 1) { + eth1_node = eth_node; + if (!of_device_is_available(eth1_node)) { + of_node_put(eth1_node); + eth1_node = NULL; + } + } else { + dev_err(dev, "port reg should be 0 or 1\n"); + } + } + + of_node_put(eth_ports_node); + + /* At least one node must be present and available else we fail */ + if (!eth0_node && !eth1_node) { + dev_err(dev, "neither port0 nor port1 node available\n"); + return -ENODEV; + } + + if (eth0_node == eth1_node) { + dev_err(dev, "port0 and port1 can't have same reg\n"); + of_node_put(eth0_node); + return -ENODEV; + } + + prueth->eth_node[PRUETH_MAC0] = eth0_node; + prueth->eth_node[PRUETH_MAC1] = eth1_node; + + prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt"); + if (IS_ERR(prueth->miig_rt)) { + dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n"); + return -ENODEV; + } + + prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt"); + if (IS_ERR(prueth->mii_rt)) { + dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n"); + return -ENODEV; + } + + if (eth0_node) { + ret = prueth_get_cores(prueth, ICSS_SLICE0, true); + if (ret) + goto put_cores; + } + + if (eth1_node) { + ret = prueth_get_cores(prueth, ICSS_SLICE1, true); + if (ret) + goto put_cores; + } + + pruss = pruss_get(eth0_node ? + prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]); + if (IS_ERR(pruss)) { + ret = PTR_ERR(pruss); + dev_err(dev, "unable to get pruss handle\n"); + goto put_cores; + } + + prueth->pruss = pruss; + + ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2, + &prueth->shram); + if (ret) { + dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret); + goto put_pruss; + } + + prueth->sram_pool = of_gen_pool_get(np, "sram", 0); + if (!prueth->sram_pool) { + dev_err(dev, "unable to get SRAM pool\n"); + ret = -ENODEV; + + goto put_mem; + } + + msmc_ram_size = MSMC_RAM_SIZE_SR1; + + prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool, + msmc_ram_size); + + if (!prueth->msmcram.va) { + ret = -ENOMEM; + dev_err(dev, "unable to allocate MSMC resource\n"); + goto put_mem; + } + prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool, + (unsigned long)prueth->msmcram.va); + prueth->msmcram.size = msmc_ram_size; + memset_io(prueth->msmcram.va, 0, msmc_ram_size); + dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, + prueth->msmcram.va, prueth->msmcram.size); + + if (eth0_node) { + ret = prueth_netdev_init(prueth, eth0_node); + if (ret) { + dev_err_probe(dev, ret, "netdev init %s failed\n", + eth0_node->name); + goto free_pool; + } + + if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC0]->half_duplex = 1; + } + + if (eth1_node) { + ret = prueth_netdev_init(prueth, eth1_node); + if (ret) { + dev_err_probe(dev, ret, "netdev init %s failed\n", + eth1_node->name); + goto netdev_exit; + } + + if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) + prueth->emac[PRUETH_MAC1]->half_duplex = 1; + } + + /* register the network devices */ + if (eth0_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII0\n"); + goto netdev_exit; + } + + prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev; + emac_phy_connect(prueth->emac[PRUETH_MAC0]); + phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev); + } + + if (eth1_node) { + ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev); + if (ret) { + dev_err(dev, "can't register netdev for port MII1\n"); + goto netdev_unregister; + } + + prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev; + emac_phy_connect(prueth->emac[PRUETH_MAC1]); + phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); + } + + dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n", + (!eth0_node || !eth1_node) ? "single" : "dual"); + + if (eth1_node) + of_node_put(eth1_node); + if (eth0_node) + of_node_put(eth0_node); + + return 0; + +netdev_unregister: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + + if (prueth->emac[i]->ndev->phydev) { + phy_disconnect(prueth->emac[i]->ndev->phydev); + prueth->emac[i]->ndev->phydev = NULL; + } + unregister_netdev(prueth->registered_netdevs[i]); + } + +netdev_exit: + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + prueth_netdev_exit(prueth, eth_node); + } + +free_pool: + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->msmcram.va, msmc_ram_size); + +put_mem: + pruss_release_mem_region(prueth->pruss, &prueth->shram); + +put_pruss: + pruss_put(prueth->pruss); + +put_cores: + if (eth1_node) { + prueth_put_cores(prueth, ICSS_SLICE1); + of_node_put(eth1_node); + } + + if (eth0_node) { + prueth_put_cores(prueth, ICSS_SLICE0); + of_node_put(eth0_node); + } + + return ret; +} + +static void prueth_remove(struct platform_device *pdev) +{ + struct prueth *prueth = platform_get_drvdata(pdev); + struct device_node *eth_node; + int i; + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + if (!prueth->registered_netdevs[i]) + continue; + phy_stop(prueth->emac[i]->ndev->phydev); + phy_disconnect(prueth->emac[i]->ndev->phydev); + prueth->emac[i]->ndev->phydev = NULL; + unregister_netdev(prueth->registered_netdevs[i]); + } + + for (i = 0; i < PRUETH_NUM_MACS; i++) { + eth_node = prueth->eth_node[i]; + if (!eth_node) + continue; + + prueth_netdev_exit(prueth, eth_node); + } + + gen_pool_free(prueth->sram_pool, + (unsigned long)prueth->msmcram.va, + MSMC_RAM_SIZE_SR1); + + pruss_release_mem_region(prueth->pruss, &prueth->shram); + + pruss_put(prueth->pruss); + + if (prueth->eth_node[PRUETH_MAC1]) + prueth_put_cores(prueth, ICSS_SLICE1); + + if (prueth->eth_node[PRUETH_MAC0]) + prueth_put_cores(prueth, ICSS_SLICE0); +} + +static const struct prueth_pdata am654_sr1_icssg_pdata = { + .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, +}; + +static const struct of_device_id prueth_dt_match[] = { + { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, prueth_dt_match); + +static struct platform_driver prueth_driver = { + .probe = prueth_probe, + .remove_new = prueth_remove, + .driver = { + .name = "icssg-prueth-sr1", + .of_match_table = prueth_dt_match, + .pm = &prueth_dev_pm_ops, + }, +}; +module_platform_driver(prueth_driver); + +MODULE_AUTHOR("Roger Quadros <[email protected]>"); +MODULE_AUTHOR("Md Danish Anwar <[email protected]>"); +MODULE_AUTHOR("Diogo Ivo <[email protected]>"); +MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c index 05cc7aab1ec8..739bae8e11ee 100644 --- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c +++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c @@ -22,6 +22,7 @@ struct k3_cppi_desc_pool { size_t mem_size; size_t num_desc; struct gen_pool *gen_pool; + void **desc_infos; }; void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) @@ -37,7 +38,11 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, pool->dma_addr); + kfree(pool->desc_infos); + gen_pool_destroy(pool->gen_pool); /* frees pool->name */ + + kfree(pool); } EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy); @@ -50,7 +55,7 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size, const char *pool_name = NULL; int ret = -ENOMEM; - pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); + pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return ERR_PTR(ret); @@ -62,18 +67,21 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size, pool_name = kstrdup_const(name ? name : dev_name(pool->dev), GFP_KERNEL); if (!pool_name) - return ERR_PTR(-ENOMEM); + goto gen_pool_create_fail; pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1); if (!pool->gen_pool) { - ret = -ENOMEM; - dev_err(pool->dev, "pool create failed %d\n", ret); kfree_const(pool_name); goto gen_pool_create_fail; } pool->gen_pool->name = pool_name; + pool->desc_infos = kcalloc(pool->num_desc, + sizeof(*pool->desc_infos), GFP_KERNEL); + if (!pool->desc_infos) + goto gen_pool_desc_infos_alloc_fail; + pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size, &pool->dma_addr, GFP_KERNEL); @@ -94,9 +102,11 @@ gen_pool_add_virt_fail: dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, pool->dma_addr); dma_alloc_fail: + kfree(pool->desc_infos); +gen_pool_desc_infos_alloc_fail: gen_pool_destroy(pool->gen_pool); /* frees pool->name */ gen_pool_create_fail: - devm_kfree(pool->dev, pool); + kfree(pool); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name); @@ -132,5 +142,31 @@ size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool) } EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail); +size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool) +{ + return pool->desc_size; +} +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_size); + +void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool) +{ + return pool->cpumem; +} +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_cpuaddr); + +void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool, + int desc_idx, void *info) +{ + pool->desc_infos[desc_idx] = info; +} +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info_set); + +void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool, + int desc_idx) +{ + return pool->desc_infos[desc_idx]; +} +EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API"); diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h index a7e3fa5e7b62..851d352b338b 100644 --- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h +++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h @@ -26,5 +26,11 @@ k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma); void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool); void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr); size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool); +size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool); +void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool); +void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool, + int desc_idx, void *info); +void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool, + int desc_idx); #endif /* K3_CPPI_DESC_POOL_H_ */ diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index da287ef65be7..00773f5e4d7e 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -20,6 +20,7 @@ config VIA_RHINE tristate "VIA Rhine support" depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP) depends on PCI || ARCH_VT8500 || COMPILE_TEST + depends on HAS_IOPORT depends on HAS_DMA select CRC32 select MII diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1c6b2a9bba08..55fff4d0d380 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2294,7 +2294,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) int ret = 0; if (!netif_running(dev)) { - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); goto out_0; } @@ -2336,7 +2336,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) tmp_vptr->rx = rx; tmp_vptr->tx = tx; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); velocity_init_registers(vptr, VELOCITY_INIT_COLD); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 945c13d1a982..3662483bfe2e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1408,7 +1408,7 @@ int wx_change_mtu(struct net_device *netdev, int new_mtu) { struct wx *wx = netdev_priv(netdev); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); wx_set_rx_buffer_len(wx); return 0; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 93295916b1d2..5f502265f0a6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -302,7 +302,7 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data) status = rd32(wx, TXGBE_CFG_PORT_ST); up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP); - phylink_mac_change(wx->phylink, up); + phylink_pcs_change(&txgbe->xpcs->pcs, up); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 807ead678551..fa5500decc96 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -359,6 +359,7 @@ * @app2: MM2S/S2MM User Application Field 2. * @app3: MM2S/S2MM User Application Field 3. * @app4: MM2S/S2MM User Application Field 4. + * @skb: Pointer to SKB transferred using DMA */ struct axidma_bd { u32 next; /* Physical address of next buffer descriptor */ @@ -399,7 +400,6 @@ struct skbuf_dma_descriptor { * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. * @dev: Pointer to device structure - * @phy_node: Pointer to device node structure * @phylink: Pointer to phylink instance * @phylink_config: phylink configuration settings * @pcs_phy: Reference to PCS/PMA PHY if used @@ -537,7 +537,7 @@ struct axienet_local { }; /** - * struct axiethernet_option - Used to set axi ethernet hardware options + * struct axienet_option - Used to set axi ethernet hardware options * @opt: Option to be set. * @reg: Register offset to be written for setting the option * @m_or: Mask to be ORed for setting the option in the register diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index aaf780fd4f5e..c29809cd9201 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1641,7 +1641,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu) XAE_TRL_SIZE) > lp->rxmem) return -EINVAL; - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); return 0; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 2f07fde361aa..9ca2643c921e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -20,7 +20,14 @@ #define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */ #define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */ -/* Wait till MDIO interface is ready to accept a new transaction.*/ +/** + * axienet_mdio_wait_until_ready - MDIO wait function + * @lp: Pointer to axienet local data structure. + * + * Return : 0 on success, Negative value on errors + * + * Wait till MDIO interface is ready to accept a new transaction. + */ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) { u32 val; @@ -30,14 +37,24 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) 1, 20000); } -/* Enable the MDIO MDC. Called prior to a read/write operation */ +/** + * axienet_mdio_mdc_enable - MDIO MDC enable function + * @lp: Pointer to axienet local data structure. + * + * Enable the MDIO MDC. Called prior to a read/write operation + */ static void axienet_mdio_mdc_enable(struct axienet_local *lp) { axienet_iow(lp, XAE_MDIO_MC_OFFSET, ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK)); } -/* Disable the MDIO MDC. Called after a read/write operation*/ +/** + * axienet_mdio_mdc_disable - MDIO MDC disable function + * @lp: Pointer to axienet local data structure. + * + * Disable the MDIO MDC. Called after a read/write operation + */ static void axienet_mdio_mdc_disable(struct axienet_local *lp) { u32 mc_reg; diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig index 7497b9bea511..bfbdcf758afb 100644 --- a/drivers/net/ethernet/xircom/Kconfig +++ b/drivers/net/ethernet/xircom/Kconfig @@ -19,7 +19,7 @@ if NET_VENDOR_XIRCOM config PCMCIA_XIRC2PS tristate "Xircom 16-bit PCMCIA support" - depends on PCMCIA + depends on PCMCIA && HAS_IOPORT help Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card) Ethernet or Fast Ethernet card to your computer. diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index e9bc38fd2025..a31d5d5e6593 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1366,10 +1366,10 @@ do_config(struct net_device *dev, struct ifmap *map) return -EINVAL; if (!map->port) { local->probe_port = 1; - dev->if_port = 1; + WRITE_ONCE(dev->if_port, 1); } else { local->probe_port = 0; - dev->if_port = map->port; + WRITE_ONCE(dev->if_port, map->port); } netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]); do_reset(dev,1); /* not the fine way :-) */ diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index e0d26148dfd9..8aff6a73ca0a 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1233,7 +1233,7 @@ static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) return ret; } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 1fef8a9b1a0f..0fbbb7286008 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -254,7 +254,7 @@ static const char version[] = #define DFX_BUS_TC(dev) 0 #endif -#if defined(CONFIG_EISA) || defined(CONFIG_PCI) +#ifdef CONFIG_HAS_IOPORT #define dfx_use_mmio bp->mmio #else #define dfx_use_mmio true diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 5fbe33a09bb0..b3ddc9a629d9 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -156,7 +156,6 @@ static void fjes_acpi_remove(struct acpi_device *device) static struct acpi_driver fjes_acpi_driver = { .name = DRV_NAME, .class = DRV_NAME, - .owner = THIS_MODULE, .ids = fjes_acpi_ids, .ops = { .add = fjes_acpi_add, @@ -811,7 +810,7 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu) netif_tx_stop_all_queues(netdev); } - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (running) { for (epidx = 0; epidx < hw->max_epid; epidx++) { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6c2835086b57..51495cb4b9be 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -225,10 +225,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { - __be16 flags; + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; - flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | - (gnvh->critical ? TUNNEL_CRIT_OPT : 0); + __set_bit(IP_TUNNEL_KEY_BIT, flags); + __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam); + __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical); tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), @@ -238,9 +239,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, goto drop; } /* Update tunnel dst according to Geneve options. */ + ip_tunnel_flags_zero(flags); + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags); ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4, - TUNNEL_GENEVE_OPT); + flags); } else { /* Drop packets w/ critical options, * since we don't support any... @@ -745,14 +748,15 @@ static void geneve_build_header(struct genevehdr *geneveh, { geneveh->ver = GENEVE_VER; geneveh->opt_len = info->options_len / 4; - geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM); - geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); + geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags); + geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, + info->key.tun_flags); geneveh->rsvd1 = 0; tunnel_id_to_vni(info->key.tun_id, geneveh->vni); geneveh->proto_type = inner_proto; geneveh->rsvd2 = 0; - if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) ip_tunnel_info_opts_get(geneveh->options, info); } @@ -761,7 +765,7 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb, bool xnet, int ip_hdr_len, bool inner_proto_inherit) { - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); struct genevehdr *gnvh; __be16 inner_proto; int min_headroom; @@ -878,7 +882,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (geneve->cfg.collect_md) { ttl = key->ttl; - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? + htons(IP_DF) : 0; } else { if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); @@ -910,7 +915,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; } @@ -998,7 +1004,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; } #endif @@ -1052,7 +1059,7 @@ static int geneve_change_mtu(struct net_device *dev, int new_mtu) else if (new_mtu < dev->min_mtu) new_mtu = dev->min_mtu; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -1297,7 +1304,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, static bool is_tnl_info_zero(const struct ip_tunnel_info *info) { - return !(info->key.tun_id || info->key.tun_flags || info->key.tos || + return !(info->key.tun_id || info->key.tos || + !ip_tunnel_flags_empty(info->key.tun_flags) || info->key.ttl || info->key.label || info->key.tp_src || memchr_inv(&info->key.u, 0, sizeof(info->key.u))); } @@ -1435,7 +1443,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], "Remote IPv6 address cannot be Multicast"); return -EINVAL; } - info->key.tun_flags |= TUNNEL_CSUM; + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); cfg->use_udp6_rx_checksums = true; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], @@ -1510,7 +1518,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - info->key.tun_flags |= TUNNEL_CSUM; + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { @@ -1520,7 +1528,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) - info->key.tun_flags &= ~TUNNEL_CSUM; + __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], "IPv6 support not enabled in the kernel"); @@ -1753,7 +1761,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) info->key.u.ipv4.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, - !!(info->key.tun_flags & TUNNEL_CSUM))) + test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags))) goto nla_put_failure; #if IS_ENABLED(CONFIG_IPV6) @@ -1762,7 +1771,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) &info->key.u.ipv6.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, - !(info->key.tun_flags & TUNNEL_CSUM))) + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags))) goto nla_put_failure; #endif } diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index e62d6cbdf9bc..427b91aca50d 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -24,6 +24,7 @@ #include <net/net_namespace.h> #include <net/protocol.h> #include <net/ip.h> +#include <net/ipv6.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/icmp.h> @@ -50,8 +51,14 @@ struct pdp_ctx { u8 gtp_version; u16 af; - struct in_addr ms_addr_ip4; - struct in_addr peer_addr_ip4; + union { + struct in_addr addr; + struct in6_addr addr6; + } ms; + union { + struct in_addr addr; + struct in6_addr addr6; + } peer; struct sock *sk; struct net_device *dev; @@ -80,9 +87,15 @@ struct gtp_dev { }; struct echo_info { - struct in_addr ms_addr_ip4; - struct in_addr peer_addr_ip4; + u16 af; u8 gtp_version; + + union { + struct in_addr addr; + } ms; + union { + struct in_addr addr; + } peer; }; static unsigned int gtp_net_id __read_mostly; @@ -121,8 +134,14 @@ static inline u32 ipv4_hashfn(__be32 ip) return jhash_1word((__force u32)ip, gtp_h_initval); } +static u32 ipv6_hashfn(const struct in6_addr *ip6) +{ + return jhash_2words((__force u32)ip6->s6_addr32[0], + (__force u32)ip6->s6_addr32[1], gtp_h_initval); +} + /* Resolve a PDP context structure based on the 64bit TID. */ -static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) +static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family) { struct hlist_head *head; struct pdp_ctx *pdp; @@ -130,7 +149,8 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { - if (pdp->gtp_version == GTP_V0 && + if (pdp->af == family && + pdp->gtp_version == GTP_V0 && pdp->u.v0.tid == tid) return pdp; } @@ -138,7 +158,7 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid) } /* Resolve a PDP context structure based on the 32bit TEI. */ -static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) +static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family) { struct hlist_head *head; struct pdp_ctx *pdp; @@ -146,7 +166,8 @@ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid) head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; hlist_for_each_entry_rcu(pdp, head, hlist_tid) { - if (pdp->gtp_version == GTP_V1 && + if (pdp->af == family && + pdp->gtp_version == GTP_V1 && pdp->u.v1.i_tei == tid) return pdp; } @@ -163,7 +184,42 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr) hlist_for_each_entry_rcu(pdp, head, hlist_addr) { if (pdp->af == AF_INET && - pdp->ms_addr_ip4.s_addr == ms_addr) + pdp->ms.addr.s_addr == ms_addr) + return pdp; + } + + return NULL; +} + +/* 3GPP TS 29.060: PDN Connection: the association between a MS represented by + * [...] one IPv6 *prefix* and a PDN represented by an APN. + * + * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be + * according to the maximum prefix length for a global IPv6 address as + * specified in the IPv6 Addressing Architecture, see RFC 4291. + * + * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other + * than those that start with binary 000 have a 64-bit interface ID field + * (i.e., n + m = 64). + */ +static bool ipv6_pdp_addr_equal(const struct in6_addr *a, + const struct in6_addr *b) +{ + return a->s6_addr32[0] == b->s6_addr32[0] && + a->s6_addr32[1] == b->s6_addr32[1]; +} + +static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp, + const struct in6_addr *ms_addr) +{ + struct hlist_head *head; + struct pdp_ctx *pdp; + + head = >p->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size]; + + hlist_for_each_entry_rcu(pdp, head, hlist_addr) { + if (pdp->af == AF_INET6 && + ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr)) return pdp; } @@ -181,34 +237,85 @@ static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx, iph = (struct iphdr *)(skb->data + hdrlen); if (role == GTP_ROLE_SGSN) - return iph->daddr == pctx->ms_addr_ip4.s_addr; + return iph->daddr == pctx->ms.addr.s_addr; else - return iph->saddr == pctx->ms_addr_ip4.s_addr; + return iph->saddr == pctx->ms.addr.s_addr; +} + +static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx, + unsigned int hdrlen, unsigned int role) +{ + struct ipv6hdr *ip6h; + int ret; + + if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr))) + return false; + + ip6h = (struct ipv6hdr *)(skb->data + hdrlen); + + if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) || + (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL)) + return false; + + if (role == GTP_ROLE_SGSN) { + ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6); + } else { + ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6); + } + + return ret; } /* Check if the inner IP address in this packet is assigned to any * existing mobile subscriber. */ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx, - unsigned int hdrlen, unsigned int role) + unsigned int hdrlen, unsigned int role, + __u16 inner_proto) { - switch (ntohs(skb->protocol)) { + switch (inner_proto) { case ETH_P_IP: return gtp_check_ms_ipv4(skb, pctx, hdrlen, role); + case ETH_P_IPV6: + return gtp_check_ms_ipv6(skb, pctx, hdrlen, role); } return false; } +static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen, + __u16 *inner_proto) +{ + __u8 *ip_version, _ip_version; + + ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version), + &_ip_version); + if (!ip_version) + return -1; + + switch (*ip_version & 0xf0) { + case 0x40: + *inner_proto = ETH_P_IP; + break; + case 0x60: + *inner_proto = ETH_P_IPV6; + break; + default: + return -1; + } + + return 0; +} + static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb, - unsigned int hdrlen, unsigned int role) + unsigned int hdrlen, unsigned int role, __u16 inner_proto) { - if (!gtp_check_ms(skb, pctx, hdrlen, role)) { + if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) { netdev_dbg(pctx->dev, "No PDP ctx for this MS\n"); return 1; } /* Get rid of the GTP + UDP headers. */ - if (iptunnel_pull_header(skb, hdrlen, skb->protocol, + if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto), !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) { pctx->dev->stats.rx_length_errors++; goto err; @@ -250,6 +357,27 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4, return ip_route_output_key(sock_net(sk), fl4); } +static struct rt6_info *ip6_route_output_gtp(struct net *net, + struct flowi6 *fl6, + const struct sock *sk, + const struct in6_addr *daddr, + struct in6_addr *saddr) +{ + struct dst_entry *dst; + + memset(fl6, 0, sizeof(*fl6)); + fl6->flowi6_oif = sk->sk_bound_dev_if; + fl6->daddr = *daddr; + fl6->saddr = *saddr; + fl6->flowi6_proto = sk->sk_protocol; + + dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL); + if (IS_ERR(dst)) + return ERR_PTR(-ENETUNREACH); + + return (struct rt6_info *)dst; +} + /* GSM TS 09.60. 7.3 * In all Path Management messages: * - TID: is not used and shall be set to 0. @@ -292,13 +420,39 @@ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type) hdr->length = 0; } +static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + struct flowi4 fl4; + struct rtable *rt; + + /* find route to the sender, + * src address becomes dst address and vice versa. + */ + rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); + if (IS_ERR(rt)) { + netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", + &iph->saddr); + return -1; + } + + udp_tunnel_xmit_skb(rt, gtp->sk0, skb, + fl4.saddr, fl4.daddr, + iph->tos, + ip4_dst_hoplimit(&rt->dst), + 0, + htons(GTP0_PORT), htons(GTP0_PORT), + !net_eq(sock_net(gtp->sk1u), + dev_net(gtp->dev)), + false); + + return 0; +} + static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_packet *gtp_pkt; struct gtp0_header *gtp0; - struct rtable *rt; - struct flowi4 fl4; - struct iphdr *iph; __be16 seq; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); @@ -325,27 +479,15 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) gtp_pkt->ie.tag = GTPIE_RECOVERY; gtp_pkt->ie.val = gtp->restart_count; - iph = ip_hdr(skb); - - /* find route to the sender, - * src address becomes dst address and vice versa. - */ - rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr); - if (IS_ERR(rt)) { - netdev_dbg(gtp->dev, "no route for echo response from %pI4\n", - &iph->saddr); + switch (gtp->sk0->sk_family) { + case AF_INET: + if (gtp0_send_echo_resp_ip(gtp, skb) < 0) + return -1; + break; + case AF_INET6: return -1; } - udp_tunnel_xmit_skb(rt, gtp->sk0, skb, - fl4.saddr, fl4.daddr, - iph->tos, - ip4_dst_hoplimit(&rt->dst), - 0, - htons(GTP0_PORT), htons(GTP0_PORT), - !net_eq(sock_net(gtp->sk1u), - dev_net(gtp->dev)), - false); return 0; } @@ -360,8 +502,8 @@ static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto failure; if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) || - nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) || - nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr)) + nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) || + nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr)) goto failure; genlmsg_end(skb, genlh); @@ -372,12 +514,20 @@ failure: return -EMSGSIZE; } +static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo) +{ + struct iphdr *iph = ip_hdr(skb); + + echo->ms.addr.s_addr = iph->daddr; + echo->peer.addr.s_addr = iph->saddr; + echo->gtp_version = GTP_V0; +} + static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) { struct gtp0_header *gtp0; struct echo_info echo; struct sk_buff *msg; - struct iphdr *iph; int ret; gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr)); @@ -385,10 +535,13 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) if (!gtp0_validate_echo_hdr(gtp0)) return -1; - iph = ip_hdr(skb); - echo.ms_addr_ip4.s_addr = iph->daddr; - echo.peer_addr_ip4.s_addr = iph->saddr; - echo.gtp_version = GTP_V0; + switch (gtp->sk0->sk_family) { + case AF_INET: + gtp0_handle_echo_resp_ip(skb, &echo); + break; + case AF_INET6: + return -1; + } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) @@ -404,6 +557,21 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } +static int gtp_proto_to_family(__u16 proto) +{ + switch (proto) { + case ETH_P_IP: + return AF_INET; + case ETH_P_IPV6: + return AF_INET6; + default: + WARN_ON_ONCE(1); + break; + } + + return AF_UNSPEC; +} + /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { @@ -411,6 +579,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) sizeof(struct gtp0_header); struct gtp0_header *gtp0; struct pdp_ctx *pctx; + __u16 inner_proto; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -433,13 +602,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if (gtp0->type != GTP_TPDU) return 1; - pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid)); + if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { + netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); + return -1; + } + + pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid), + gtp_proto_to_family(inner_proto)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } - return gtp_rx(pctx, skb, hdrlen, gtp->role); + return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto); } /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */ @@ -549,8 +724,8 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) return -1; iph = ip_hdr(skb); - echo.ms_addr_ip4.s_addr = iph->daddr; - echo.peer_addr_ip4.s_addr = iph->saddr; + echo.ms.addr.s_addr = iph->daddr; + echo.peer.addr.s_addr = iph->saddr; echo.gtp_version = GTP_V1; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); @@ -567,12 +742,50 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb) msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC); } +static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen) +{ + struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr; + unsigned int offset = *hdrlen; + __u8 *next_type, _next_type; + + /* From 29.060: "The Extension Header Length field specifies the length + * of the particular Extension header in 4 octets units." + * + * This length field includes length field size itself (1 byte), + * payload (variable length) and next type (1 byte). The extension + * header is aligned to to 4 bytes. + */ + + do { + gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr), + &_gtp_exthdr); + if (!gtp_exthdr || !gtp_exthdr->len) + return -1; + + offset += gtp_exthdr->len * 4; + + /* From 29.060: "If no such Header follows, then the value of + * the Next Extension Header Type shall be 0." + */ + next_type = skb_header_pointer(skb, offset - 1, + sizeof(_next_type), &_next_type); + if (!next_type) + return -1; + + } while (*next_type != 0); + + *hdrlen = offset; + + return 0; +} + static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) { unsigned int hdrlen = sizeof(struct udphdr) + sizeof(struct gtp1_header); struct gtp1_header *gtp1; struct pdp_ctx *pctx; + __u16 inner_proto; if (!pskb_may_pull(skb, hdrlen)) return -1; @@ -608,15 +821,25 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb) if (!pskb_may_pull(skb, hdrlen)) return -1; + if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) { + netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n"); + return -1; + } + gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr)); - pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid)); + pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid), + gtp_proto_to_family(inner_proto)); if (!pctx) { netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb); return 1; } - return gtp_rx(pctx, skb, hdrlen, gtp->role); + if (gtp1->flags & GTP1_F_EXTHDR && + gtp_parse_exthdrs(skb, &hdrlen) < 0) + return -1; + + return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto); } static void __gtp_encap_destroy(struct sock *sk) @@ -760,11 +983,17 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx) struct gtp_pktinfo { struct sock *sk; - struct iphdr *iph; - struct flowi4 fl4; - struct rtable *rt; + union { + struct flowi4 fl4; + struct flowi6 fl6; + }; + union { + struct rtable *rt; + struct rt6_info *rt6; + }; struct pdp_ctx *pctx; struct net_device *dev; + __u8 tos; __be16 gtph_port; }; @@ -783,64 +1012,61 @@ static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo) } static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo, - struct sock *sk, struct iphdr *iph, + struct sock *sk, __u8 tos, struct pdp_ctx *pctx, struct rtable *rt, struct flowi4 *fl4, struct net_device *dev) { pktinfo->sk = sk; - pktinfo->iph = iph; + pktinfo->tos = tos; pktinfo->pctx = pctx; pktinfo->rt = rt; pktinfo->fl4 = *fl4; pktinfo->dev = dev; } -static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, - struct gtp_pktinfo *pktinfo) +static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo, + struct sock *sk, __u8 tos, + struct pdp_ctx *pctx, struct rt6_info *rt6, + struct flowi6 *fl6, + struct net_device *dev) +{ + pktinfo->sk = sk; + pktinfo->tos = tos; + pktinfo->pctx = pctx; + pktinfo->rt6 = rt6; + pktinfo->fl6 = *fl6; + pktinfo->dev = dev; +} + +static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev, + struct gtp_pktinfo *pktinfo, + struct pdp_ctx *pctx, __u8 tos, + __be16 frag_off) { - struct gtp_dev *gtp = netdev_priv(dev); - struct pdp_ctx *pctx; struct rtable *rt; struct flowi4 fl4; - struct iphdr *iph; __be16 df; int mtu; - /* Read the IP destination address and resolve the PDP context. - * Prepend PDP header with TEI/TID from PDP ctx. - */ - iph = ip_hdr(skb); - if (gtp->role == GTP_ROLE_SGSN) - pctx = ipv4_pdp_find(gtp, iph->saddr); - else - pctx = ipv4_pdp_find(gtp, iph->daddr); - - if (!pctx) { - netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", - &iph->daddr); - return -ENOENT; - } - netdev_dbg(dev, "found PDP context %p\n", pctx); - - rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr, + rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr, inet_sk(pctx->sk)->inet_saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); + &pctx->peer.addr.s_addr); dev->stats.tx_carrier_errors++; goto err; } if (rt->dst.dev == dev) { netdev_dbg(dev, "circular route to SSGN %pI4\n", - &pctx->peer_addr_ip4.s_addr); + &pctx->peer.addr.s_addr); dev->stats.collisions++; goto err_rt; } /* This is similar to tnl_update_pmtu(). */ - df = iph->frag_off; + df = frag_off; if (df) { mtu = dst_mtu(&rt->dst) - dev->hard_header_len - sizeof(struct iphdr) - sizeof(struct udphdr); @@ -858,7 +1084,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (iph->frag_off & htons(IP_DF) && + if (frag_off & htons(IP_DF) && ((!skb_is_gso(skb) && skb->len > mtu) || (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); @@ -867,7 +1093,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, goto err_rt; } - gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev); + gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev); gtp_push_header(skb, pktinfo); return 0; @@ -877,6 +1103,162 @@ err: return -EBADMSG; } +static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb, + struct net_device *dev, + struct gtp_pktinfo *pktinfo, + struct pdp_ctx *pctx, __u8 tos) +{ + struct dst_entry *dst; + struct rt6_info *rt; + struct flowi6 fl6; + int mtu; + + rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6, + &inet6_sk(pctx->sk)->saddr); + if (IS_ERR(rt)) { + netdev_dbg(dev, "no route to SSGN %pI6\n", + &pctx->peer.addr6); + dev->stats.tx_carrier_errors++; + goto err; + } + dst = &rt->dst; + + if (rt->dst.dev == dev) { + netdev_dbg(dev, "circular route to SSGN %pI6\n", + &pctx->peer.addr6); + dev->stats.collisions++; + goto err_rt; + } + + mtu = dst_mtu(&rt->dst) - dev->hard_header_len - + sizeof(struct ipv6hdr) - sizeof(struct udphdr); + switch (pctx->gtp_version) { + case GTP_V0: + mtu -= sizeof(struct gtp0_header); + break; + case GTP_V1: + mtu -= sizeof(struct gtp1_header); + break; + } + + skb_dst_update_pmtu_no_confirm(skb, mtu); + + if ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) { + netdev_dbg(dev, "packet too big, fragmentation needed\n"); + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + goto err_rt; + } + + gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev); + gtp_push_header(skb, pktinfo); + + return 0; +err_rt: + dst_release(dst); +err: + return -EBADMSG; +} + +static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, + struct gtp_pktinfo *pktinfo) +{ + struct gtp_dev *gtp = netdev_priv(dev); + struct net *net = gtp->net; + struct pdp_ctx *pctx; + struct iphdr *iph; + int ret; + + /* Read the IP destination address and resolve the PDP context. + * Prepend PDP header with TEI/TID from PDP ctx. + */ + iph = ip_hdr(skb); + if (gtp->role == GTP_ROLE_SGSN) + pctx = ipv4_pdp_find(gtp, iph->saddr); + else + pctx = ipv4_pdp_find(gtp, iph->daddr); + + if (!pctx) { + netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n", + &iph->daddr); + return -ENOENT; + } + netdev_dbg(dev, "found PDP context %p\n", pctx); + + switch (pctx->sk->sk_family) { + case AF_INET: + ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, + iph->tos, iph->frag_off); + break; + case AF_INET6: + ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, + iph->tos); + break; + default: + ret = -1; + WARN_ON_ONCE(1); + break; + } + + if (ret < 0) + return ret; + + netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n", + &iph->saddr, &iph->daddr); + + return 0; +} + +static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev, + struct gtp_pktinfo *pktinfo) +{ + struct gtp_dev *gtp = netdev_priv(dev); + struct net *net = gtp->net; + struct pdp_ctx *pctx; + struct ipv6hdr *ip6h; + __u8 tos; + int ret; + + /* Read the IP destination address and resolve the PDP context. + * Prepend PDP header with TEI/TID from PDP ctx. + */ + ip6h = ipv6_hdr(skb); + if (gtp->role == GTP_ROLE_SGSN) + pctx = ipv6_pdp_find(gtp, &ip6h->saddr); + else + pctx = ipv6_pdp_find(gtp, &ip6h->daddr); + + if (!pctx) { + netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n", + &ip6h->daddr); + return -ENOENT; + } + netdev_dbg(dev, "found PDP context %p\n", pctx); + + tos = ipv6_get_dsfield(ip6h); + + switch (pctx->sk->sk_family) { + case AF_INET: + ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0); + break; + case AF_INET6: + ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos); + break; + default: + ret = -1; + WARN_ON_ONCE(1); + break; + } + + if (ret < 0) + return ret; + + netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n", + &ip6h->saddr, &ip6h->daddr); + + return 0; +} + static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int proto = ntohs(skb->protocol); @@ -895,6 +1277,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) case ETH_P_IP: err = gtp_build_skb_ip4(skb, dev, &pktinfo); break; + case ETH_P_IPV6: + err = gtp_build_skb_ip6(skb, dev, &pktinfo); + break; default: err = -EOPNOTSUPP; break; @@ -904,13 +1289,11 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) if (err < 0) goto tx_err; - switch (proto) { - case ETH_P_IP: - netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n", - &pktinfo.iph->saddr, &pktinfo.iph->daddr); + switch (pktinfo.pctx->sk->sk_family) { + case AF_INET: udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb, pktinfo.fl4.saddr, pktinfo.fl4.daddr, - pktinfo.iph->tos, + pktinfo.tos, ip4_dst_hoplimit(&pktinfo.rt->dst), 0, pktinfo.gtph_port, pktinfo.gtph_port, @@ -918,6 +1301,19 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) dev_net(dev)), false); break; + case AF_INET6: +#if IS_ENABLED(CONFIG_IPV6) + udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev, + &pktinfo.fl6.saddr, &pktinfo.fl6.daddr, + pktinfo.tos, + ip6_dst_hoplimit(&pktinfo.rt->dst), + 0, + pktinfo.gtph_port, pktinfo.gtph_port, + false); +#else + goto tx_err; +#endif + break; } return NETDEV_TX_OK; @@ -936,11 +1332,11 @@ static const struct device_type gtp_type = { .name = "gtp", }; +#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header)) +#define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN) + static void gtp_link_setup(struct net_device *dev) { - unsigned int max_gtp_header_len = sizeof(struct iphdr) + - sizeof(struct udphdr) + - sizeof(struct gtp0_header); struct gtp_dev *gtp = netdev_priv(dev); dev->netdev_ops = >p_netdev_ops; @@ -949,7 +1345,7 @@ static void gtp_link_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; - dev->mtu = ETH_DATA_LEN - max_gtp_header_len; + dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN; /* Zero header length. */ dev->type = ARPHRD_NONE; @@ -960,7 +1356,7 @@ static void gtp_link_setup(struct net_device *dev) dev->features |= NETIF_F_LLTX; netif_keep_dst(dev); - dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len; + dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN; gtp->dev = dev; } @@ -975,17 +1371,45 @@ static void gtp_destructor(struct net_device *dev) kfree(gtp->tid_hash); } -static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp) +static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf, + const struct nlattr *nla, int family) +{ + udp_conf->family = family; + + switch (udp_conf->family) { + case AF_INET: + udp_conf->local_ip.s_addr = nla_get_be32(nla); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + udp_conf->local_ip6 = nla_get_in6_addr(nla); + break; +#endif + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp, + const struct nlattr *nla, int family) { struct udp_tunnel_sock_cfg tuncfg = {}; - struct udp_port_cfg udp_conf = { - .local_ip.s_addr = htonl(INADDR_ANY), - .family = AF_INET, - }; + struct udp_port_cfg udp_conf = {}; struct net *net = gtp->net; struct socket *sock; int err; + if (nla) { + err = gtp_sock_udp_config(&udp_conf, nla, family); + if (err < 0) + return ERR_PTR(err); + } else { + udp_conf.local_ip.s_addr = htonl(INADDR_ANY); + udp_conf.family = AF_INET; + } + if (type == UDP_ENCAP_GTP0) udp_conf.local_udp_port = htons(GTP0_PORT); else if (type == UDP_ENCAP_GTP1U) @@ -1007,16 +1431,17 @@ static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp) return sock->sk; } -static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[]) +static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla, + int family) { - struct sock *sk1u = NULL; - struct sock *sk0 = NULL; + struct sock *sk1u; + struct sock *sk0; - sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp); + sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family); if (IS_ERR(sk0)) return PTR_ERR(sk0); - sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp); + sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family); if (IS_ERR(sk1u)) { udp_tunnel_sock_release(sk0->sk_socket); return PTR_ERR(sk1u); @@ -1029,6 +1454,9 @@ static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[]) return 0; } +#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header)) +#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN) + static int gtp_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -1038,6 +1466,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, struct gtp_net *gn; int hashsize, err; +#if !IS_ENABLED(CONFIG_IPV6) + if (data[IFLA_GTP_LOCAL6]) + return -EAFNOSUPPORT; +#endif + gtp = netdev_priv(dev); if (!data[IFLA_GTP_PDP_HASHSIZE]) { @@ -1066,13 +1499,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - if (data[IFLA_GTP_CREATE_SOCKETS]) - err = gtp_create_sockets(gtp, data); - else + if (data[IFLA_GTP_CREATE_SOCKETS]) { + if (data[IFLA_GTP_LOCAL6]) + err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6); + else + err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET); + } else { err = gtp_encap_enable(gtp, data); + } + if (err < 0) goto out_hashtable; + if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) || + (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) { + dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN; + dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN; + } + err = register_netdevice(dev); if (err < 0) { netdev_dbg(dev, "failed to register new netdev %d\n", err); @@ -1117,6 +1561,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = { [IFLA_GTP_ROLE] = { .type = NLA_U32 }, [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 }, [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 }, + [IFLA_GTP_LOCAL] = { .type = NLA_U32 }, + [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) }, }; static int gtp_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1216,6 +1662,12 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, goto out_sock; } + if (sk->sk_family == AF_INET6 && + !sk->sk_ipv6only) { + sk = ERR_PTR(-EADDRNOTAVAIL); + goto out_sock; + } + lock_sock(sk); if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); @@ -1267,6 +1719,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]) gtp->sk0 = sk0; gtp->sk1u = sk1u; + if (sk0 && sk1u && + sk0->sk_family != sk1u->sk_family) { + gtp_encap_disable_sock(sk0); + gtp_encap_disable_sock(sk1u); + return -EINVAL; + } + return 0; } @@ -1296,14 +1755,9 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[]) return gtp; } -static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) { pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]); - pctx->af = AF_INET; - pctx->peer_addr_ip4.s_addr = - nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); - pctx->ms_addr_ip4.s_addr = - nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); switch (pctx->gtp_version) { case GTP_V0: @@ -1323,29 +1777,102 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) } } +static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ + if (info->attrs[GTPA_PEER_ADDRESS]) { + pctx->peer.addr.s_addr = + nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]); + } else if (info->attrs[GTPA_PEER_ADDR6]) { + pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]); + } +} + +static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ + ip_pdp_peer_fill(pctx, info); + pctx->ms.addr.s_addr = + nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + gtp_pdp_fill(pctx, info); +} + +static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info) +{ + ip_pdp_peer_fill(pctx, info); + pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); + if (pctx->ms.addr6.s6_addr32[2] || + pctx->ms.addr6.s6_addr32[3]) + return false; + + gtp_pdp_fill(pctx, info); + + return true; +} + static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, struct genl_info *info) { struct pdp_ctx *pctx, *pctx_tid = NULL; struct net_device *dev = gtp->dev; u32 hash_ms, hash_tid = 0; + struct in6_addr ms_addr6; unsigned int version; bool found = false; __be32 ms_addr; + int family; - ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); - hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; version = nla_get_u32(info->attrs[GTPA_VERSION]); - pctx = ipv4_pdp_find(gtp, ms_addr); + if (info->attrs[GTPA_FAMILY]) + family = nla_get_u8(info->attrs[GTPA_FAMILY]); + else + family = AF_INET; + +#if !IS_ENABLED(CONFIG_IPV6) + if (family == AF_INET6) + return ERR_PTR(-EAFNOSUPPORT); +#endif + if (!info->attrs[GTPA_PEER_ADDRESS] && + !info->attrs[GTPA_PEER_ADDR6]) + return ERR_PTR(-EINVAL); + + if ((info->attrs[GTPA_PEER_ADDRESS] && + sk->sk_family == AF_INET6) || + (info->attrs[GTPA_PEER_ADDR6] && + sk->sk_family == AF_INET)) + return ERR_PTR(-EAFNOSUPPORT); + + switch (family) { + case AF_INET: + if (!info->attrs[GTPA_MS_ADDRESS] || + info->attrs[GTPA_MS_ADDR6]) + return ERR_PTR(-EINVAL); + + ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]); + hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size; + pctx = ipv4_pdp_find(gtp, ms_addr); + break; + case AF_INET6: + if (!info->attrs[GTPA_MS_ADDR6] || + info->attrs[GTPA_MS_ADDRESS]) + return ERR_PTR(-EINVAL); + + ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]); + hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size; + pctx = ipv6_pdp_find(gtp, &ms_addr6); + break; + default: + return ERR_PTR(-EAFNOSUPPORT); + } if (pctx) found = true; if (version == GTP_V0) pctx_tid = gtp0_pdp_find(gtp, - nla_get_u64(info->attrs[GTPA_TID])); + nla_get_u64(info->attrs[GTPA_TID]), + family); else if (version == GTP_V1) pctx_tid = gtp1_pdp_find(gtp, - nla_get_u32(info->attrs[GTPA_I_TEI])); + nla_get_u32(info->attrs[GTPA_I_TEI]), + family); if (pctx_tid) found = true; @@ -1360,7 +1887,15 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, if (!pctx) pctx = pctx_tid; - ipv4_pdp_fill(pctx, info); + switch (pctx->af) { + case AF_INET: + ipv4_pdp_fill(pctx, info); + break; + case AF_INET6: + if (!ipv6_pdp_fill(pctx, info)) + return ERR_PTR(-EADDRNOTAVAIL); + break; + } if (pctx->gtp_version == GTP_V0) netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n", @@ -1380,7 +1915,32 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, sock_hold(sk); pctx->sk = sk; pctx->dev = gtp->dev; - ipv4_pdp_fill(pctx, info); + pctx->af = family; + + switch (pctx->af) { + case AF_INET: + if (!info->attrs[GTPA_MS_ADDRESS]) { + sock_put(sk); + kfree(pctx); + return ERR_PTR(-EINVAL); + } + + ipv4_pdp_fill(pctx, info); + break; + case AF_INET6: + if (!info->attrs[GTPA_MS_ADDR6]) { + sock_put(sk); + kfree(pctx); + return ERR_PTR(-EINVAL); + } + + if (!ipv6_pdp_fill(pctx, info)) { + sock_put(sk); + kfree(pctx); + return ERR_PTR(-EADDRNOTAVAIL); + } + break; + } atomic_set(&pctx->tx_seq, 0); switch (pctx->gtp_version) { @@ -1403,13 +1963,13 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk, switch (pctx->gtp_version) { case GTP_V0: netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n", - pctx->u.v0.tid, &pctx->peer_addr_ip4, - &pctx->ms_addr_ip4, pctx); + pctx->u.v0.tid, &pctx->peer.addr, + &pctx->ms.addr, pctx); break; case GTP_V1: netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n", pctx->u.v1.i_tei, pctx->u.v1.o_tei, - &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx); + &pctx->peer.addr, &pctx->ms.addr, pctx); break; } @@ -1442,9 +2002,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info) int err; if (!info->attrs[GTPA_VERSION] || - !info->attrs[GTPA_LINK] || - !info->attrs[GTPA_PEER_ADDRESS] || - !info->attrs[GTPA_MS_ADDRESS]) + !info->attrs[GTPA_LINK]) return -EINVAL; version = nla_get_u32(info->attrs[GTPA_VERSION]); @@ -1502,6 +2060,12 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, struct nlattr *nla[]) { struct gtp_dev *gtp; + int family; + + if (nla[GTPA_FAMILY]) + family = nla_get_u8(nla[GTPA_FAMILY]); + else + family = AF_INET; gtp = gtp_find_dev(net, nla); if (!gtp) @@ -1510,14 +2074,31 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net, if (nla[GTPA_MS_ADDRESS]) { __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]); + if (family != AF_INET) + return ERR_PTR(-EINVAL); + return ipv4_pdp_find(gtp, ip); + } else if (nla[GTPA_MS_ADDR6]) { + struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]); + + if (family != AF_INET6) + return ERR_PTR(-EINVAL); + + if (addr.s6_addr32[2] || + addr.s6_addr32[3]) + return ERR_PTR(-EADDRNOTAVAIL); + + return ipv6_pdp_find(gtp, &addr); } else if (nla[GTPA_VERSION]) { u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]); - if (gtp_version == GTP_V0 && nla[GTPA_TID]) - return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID])); - else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) - return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI])); + if (gtp_version == GTP_V0 && nla[GTPA_TID]) { + return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]), + family); + } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) { + return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]), + family); + } } return ERR_PTR(-EINVAL); @@ -1581,10 +2162,31 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || - nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || - nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) + nla_put_u8(skb, GTPA_FAMILY, pctx->af)) goto nla_put_failure; + switch (pctx->af) { + case AF_INET: + if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr)) + goto nla_put_failure; + break; + case AF_INET6: + if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6)) + goto nla_put_failure; + break; + } + + switch (pctx->sk->sk_family) { + case AF_INET: + if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr)) + goto nla_put_failure; + break; + case AF_INET6: + if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6)) + goto nla_put_failure; + break; + } + switch (pctx->gtp_version) { case GTP_V0: if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) || @@ -1811,6 +2413,9 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = { [GTPA_NET_NS_FD] = { .type = NLA_U32, }, [GTPA_I_TEI] = { .type = NLA_U32, }, [GTPA_O_TEI] = { .type = NLA_U32, }, + [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), }, + [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), }, + [GTPA_FAMILY] = { .type = NLA_U8, }, }; static const struct genl_small_ops gtp_genl_ops[] = { diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index 25b1f929c422..36a9aade9f33 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -83,7 +83,7 @@ config SCC_TRXECHO config BAYCOM_SER_FDX tristate "BAYCOM ser12 fullduplex driver for AX.25" - depends on AX25 && !S390 + depends on AX25 && HAS_IOPORT select CRC_CCITT help This is one of two drivers for Baycom style simple amateur radio @@ -103,7 +103,7 @@ config BAYCOM_SER_FDX config BAYCOM_SER_HDX tristate "BAYCOM ser12 halfduplex driver for AX.25" - depends on AX25 && !S390 + depends on AX25 && HAS_IOPORT select CRC_CCITT help This is one of two drivers for Baycom style simple amateur radio @@ -150,7 +150,7 @@ config BAYCOM_EPP config YAM tristate "YAM driver for AX.25" - depends on AX25 && !S390 + depends on AX25 && HAS_IOPORT help The YAM is a modem for packet radio which connects to the serial port and includes some of the functions of a Terminal Node diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 11831a1c9762..44142245343d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1233,14 +1233,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (ret) goto rollback_vf; - ndev->mtu = mtu; + WRITE_ONCE(ndev->mtu, mtu); ret = netvsc_attach(ndev, device_info); if (!ret) goto out; /* Attempt rollback to original MTU */ - ndev->mtu = orig_mtu; + WRITE_ONCE(ndev->mtu, orig_mtu); if (netvsc_attach(ndev, device_info)) netdev_err(ndev, "restoring mtu failed\n"); diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c index 3380fb3483b2..e902d731776d 100644 --- a/drivers/net/ipa/data/ipa_data-v3.1.c +++ b/drivers/net/ipa/data/ipa_data-v3.1.c @@ -1,15 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c index 4287114b24db..f632aab56f4c 100644 --- a/drivers/net/ipa/data/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c @@ -1,15 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c index 1b4b52501ee3..c1428483ca34 100644 --- a/drivers/net/ipa/data/ipa_data-v4.11.c +++ b/drivers/net/ipa/data/ipa_data-v4.11.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 Linaro Ltd. */ +/* Copyright (C) 2021-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c index 199ed0ed868b..2c7e8cb429b9 100644 --- a/drivers/net/ipa/data/ipa_data-v4.2.c +++ b/drivers/net/ipa/data/ipa_data-v4.2.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2019-2021 Linaro Ltd. */ +/* Copyright (C) 2019-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c index 19b549f2998b..57dc78c526b0 100644 --- a/drivers/net/ipa/data/ipa_data-v4.5.c +++ b/drivers/net/ipa/data/ipa_data-v4.5.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 Linaro Ltd. */ +/* Copyright (C) 2021-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c index b83390c48615..c8c23d9be961 100644 --- a/drivers/net/ipa/data/ipa_data-v4.7.c +++ b/drivers/net/ipa/data/ipa_data-v4.7.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c index d30fc1fe6ca2..4eb9c909d5b3 100644 --- a/drivers/net/ipa/data/ipa_data-v4.9.c +++ b/drivers/net/ipa/data/ipa_data-v4.9.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021 Linaro Ltd. */ +/* Copyright (C) 2021-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c index 4d8171dae4cd..050580c99b65 100644 --- a/drivers/net/ipa/data/ipa_data-v5.0.c +++ b/drivers/net/ipa/data/ipa_data-v5.0.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> #include <linux/log2.h> -#include "../gsi.h" #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.0 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c index 2c6390f11354..0e6663e22533 100644 --- a/drivers/net/ipa/data/ipa_data-v5.5.c +++ b/drivers/net/ipa/data/ipa_data-v5.5.c @@ -1,13 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ -#include <linux/kernel.h> +#include <linux/array_size.h> #include <linux/log2.h> #include "../ipa_data.h" #include "../ipa_endpoint.h" #include "../ipa_mem.h" +#include "../ipa_version.h" /** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.5 */ enum ipa_resource_type { diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 9a0b1fe4a93a..4c3227e77898 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1,28 +1,26 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2023 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ -#include <linux/types.h> #include <linux/bits.h> -#include <linux/bitfield.h> -#include <linux/mutex.h> -#include <linux/completion.h> -#include <linux/io.h> #include <linux/bug.h> +#include <linux/completion.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> +#include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/types.h> #include "gsi.h" -#include "reg.h" -#include "gsi_reg.h" #include "gsi_private.h" +#include "gsi_reg.h" #include "gsi_trans.h" -#include "ipa_gsi.h" #include "ipa_data.h" +#include "ipa_gsi.h" #include "ipa_version.h" +#include "reg.h" /** * DOC: The IPA Generic Software Interface @@ -1730,10 +1728,10 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) gsi_channel_program(channel, true); if (channel->toward_ipa) - netif_napi_add_tx(&gsi->dummy_dev, &channel->napi, + netif_napi_add_tx(gsi->dummy_dev, &channel->napi, gsi_channel_poll); else - netif_napi_add(&gsi->dummy_dev, &channel->napi, + netif_napi_add(gsi->dummy_dev, &channel->napi, gsi_channel_poll); return 0; @@ -2369,12 +2367,14 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, /* GSI uses NAPI on all channels. Create a dummy network device * for the channel NAPI contexts to be associated with. */ - init_dummy_netdev(&gsi->dummy_dev); + gsi->dummy_dev = alloc_netdev_dummy(0); + if (!gsi->dummy_dev) + return -ENOMEM; init_completion(&gsi->completion); ret = gsi_reg_init(gsi, pdev); if (ret) - return ret; + goto err_reg_exit; ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ if (ret) @@ -2389,6 +2389,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, return 0; err_reg_exit: + free_netdev(gsi->dummy_dev); gsi_reg_exit(gsi); return ret; @@ -2399,6 +2400,7 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); + free_netdev(gsi->dummy_dev); gsi_reg_exit(gsi); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 42063b227c18..9d8e05d950e3 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -1,17 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2023 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _GSI_H_ #define _GSI_H_ -#include <linux/types.h> -#include <linux/spinlock.h> -#include <linux/mutex.h> #include <linux/completion.h> -#include <linux/platform_device.h> +#include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/types.h> #include "ipa_version.h" @@ -23,12 +21,10 @@ #define GSI_TLV_MAX 64 struct device; -struct scatterlist; struct platform_device; struct gsi; struct gsi_trans; -struct gsi_channel_data; struct ipa_gsi_endpoint_data; struct gsi_ring { @@ -155,7 +151,7 @@ struct gsi { struct mutex mutex; /* protects commands, programming */ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; - struct net_device dummy_dev; /* needed for NAPI */ + struct net_device *dummy_dev; /* needed for NAPI */ }; /** diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index c65f7c5cdc8d..968ab1e596e8 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _GSI_PRIVATE_H_ #define _GSI_PRIVATE_H_ @@ -10,9 +10,10 @@ #include <linux/types.h> -struct gsi_trans; -struct gsi_ring; +struct gsi; struct gsi_channel; +struct gsi_ring; +struct gsi_trans; #define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */ diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c index 106c43884aef..825598661188 100644 --- a/drivers/net/ipa/gsi_reg.c +++ b/drivers/net/ipa/gsi_reg.c @@ -1,13 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ -#include <linux/platform_device.h> #include <linux/io.h> +#include <linux/platform_device.h> #include "gsi.h" -#include "reg.h" #include "gsi_reg.h" +#include "reg.h" /* Is this register ID valid for the current GSI version? */ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index ee6fb00b71eb..19531883864a 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -1,22 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/bits.h> #include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/dma-direction.h> #include <linux/refcount.h> #include <linux/scatterlist.h> -#include <linux/dma-direction.h> +#include <linux/types.h> #include "gsi.h" #include "gsi_private.h" #include "gsi_trans.h" -#include "ipa_gsi.h" -#include "ipa_data.h" #include "ipa_cmd.h" +#include "ipa_data.h" +#include "ipa_gsi.h" /** * DOC: GSI Transactions diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 30c1c2dc77c6..c1b3386cbb9d 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -1,25 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _GSI_TRANS_H_ #define _GSI_TRANS_H_ -#include <linux/types.h> -#include <linux/refcount.h> #include <linux/completion.h> #include <linux/dma-direction.h> +#include <linux/refcount.h> +#include <linux/types.h> #include "ipa_cmd.h" +struct device; struct page; struct scatterlist; -struct device; struct sk_buff; struct gsi; -struct gsi_trans; struct gsi_trans_pool; /* Maximum number of TREs in an IPA immediate command transaction */ diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 334cd62cf286..7ef10a4ff35e 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -1,30 +1,25 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_H_ #define _IPA_H_ -#include <linux/types.h> -#include <linux/device.h> #include <linux/notifier.h> -#include <linux/pm_wakeup.h> +#include <linux/types.h> -#include "ipa_version.h" #include "gsi.h" +#include "ipa_endpoint.h" #include "ipa_mem.h" #include "ipa_qmi.h" -#include "ipa_endpoint.h" -#include "ipa_interrupt.h" +#include "ipa_version.h" -struct clk; -struct icc_path; struct net_device; +struct ipa_interrupt; struct ipa_power; struct ipa_smp2p; -struct ipa_interrupt; /** * struct ipa - IPA information diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 39219963dbb3..984311a9a5f2 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -1,22 +1,23 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/device.h> -#include <linux/slab.h> #include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/device.h> #include <linux/dma-direction.h> +#include <linux/types.h> #include "gsi.h" #include "gsi_trans.h" #include "ipa.h" -#include "ipa_endpoint.h" -#include "ipa_table.h" #include "ipa_cmd.h" +#include "ipa_endpoint.h" #include "ipa_mem.h" +#include "ipa_reg.h" +#include "ipa_table.h" /** * DOC: IPA Immediate Commands diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index e2cf1c2b0ef2..2077fdbade99 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -1,21 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_CMD_H_ #define _IPA_CMD_H_ #include <linux/types.h> -#include <linux/dma-direction.h> - -struct sk_buff; -struct scatterlist; +struct gsi_channel; +struct gsi_trans; struct ipa; struct ipa_mem; -struct gsi_trans; -struct gsi_channel; /** * enum ipa_cmd_opcode: IPA immediate commands @@ -58,14 +54,6 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route); /** - * ipa_cmd_data_valid() - Validate command-realted configuration is valid - * @ipa: - IPA pointer - * - * Return: true if assumptions required for command are valid - */ -bool ipa_cmd_data_valid(struct ipa *ipa); - -/** * ipa_cmd_pool_init() - initialize command channel pools * @channel: AP->IPA command TX GSI channel pointer * @tre_count: Number of pool elements to allocate diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 2a1605e67b65..d88cbbbf18b7 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -1,16 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_DATA_H_ #define _IPA_DATA_H_ #include <linux/types.h> -#include "ipa_version.h" #include "ipa_endpoint.h" #include "ipa_mem.h" +#include "ipa_version.h" /** * DOC: IPA/GSI Configuration Data diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index dd490941615e..0bd9b9fbbf56 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1,27 +1,30 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/device.h> -#include <linux/slab.h> #include <linux/bitfield.h> -#include <linux/if_rmnet.h> +#include <linux/bits.h> +#include <linux/device.h> #include <linux/dma-direction.h> +#include <linux/if_rmnet.h> +#include <linux/types.h> #include "gsi.h" #include "gsi_trans.h" #include "ipa.h" +#include "ipa_cmd.h" #include "ipa_data.h" #include "ipa_endpoint.h" -#include "ipa_cmd.h" +#include "ipa_gsi.h" +#include "ipa_interrupt.h" #include "ipa_mem.h" #include "ipa_modem.h" -#include "ipa_table.h" -#include "ipa_gsi.h" #include "ipa_power.h" +#include "ipa_reg.h" +#include "ipa_table.h" +#include "ipa_version.h" /* Hardware is told about receive buffers once a "batch" has been queued */ #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 3ad2e802040a..e7d8ae6c6f6a 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -1,21 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_ENDPOINT_H_ #define _IPA_ENDPOINT_H_ #include <linux/types.h> #include <linux/workqueue.h> -#include <linux/if_ether.h> -#include "gsi.h" #include "ipa_reg.h" +#include "ipa_version.h" struct net_device; struct sk_buff; +struct gsi_trans; struct ipa; struct ipa_gsi_endpoint_data; @@ -199,9 +199,9 @@ int ipa_endpoint_init(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data); void ipa_endpoint_exit(struct ipa *ipa); -void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa, +void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, struct gsi_trans *trans); -void ipa_endpoint_trans_release(struct ipa_endpoint *ipa, +void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, struct gsi_trans *trans); #endif /* _IPA_ENDPOINT_H_ */ diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c index d323adb03383..cb654c7b5498 100644 --- a/drivers/net/ipa/ipa_gsi.c +++ b/drivers/net/ipa/ipa_gsi.c @@ -1,16 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2020 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #include <linux/types.h> -#include "ipa_gsi.h" #include "gsi_trans.h" #include "ipa.h" -#include "ipa_endpoint.h" #include "ipa_data.h" +#include "ipa_endpoint.h" +#include "ipa_gsi.h" +#include "ipa_version.h" void ipa_gsi_trans_complete(struct gsi_trans *trans) { diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index c3e8784d51d9..245a06997055 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ /* DOC: IPA Interrupts @@ -19,29 +19,31 @@ * time only these three are supported. */ -#include <linux/platform_device.h> -#include <linux/types.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> +#include <linux/types.h> #include "ipa.h" -#include "ipa_reg.h" #include "ipa_endpoint.h" +#include "ipa_interrupt.h" #include "ipa_power.h" +#include "ipa_reg.h" #include "ipa_uc.h" -#include "ipa_interrupt.h" /** * struct ipa_interrupt - IPA interrupt information * @ipa: IPA pointer * @irq: Linux IRQ number used for IPA interrupts * @enabled: Mask indicating which interrupts are enabled + * @suspend_enabled: Bitmap of endpoints with the SUSPEND interrupt enabled */ struct ipa_interrupt { struct ipa *ipa; u32 irq; u32 enabled; + unsigned long *suspend_enabled; }; /* Clear the suspend interrupt for all endpoints that signaled it */ @@ -194,6 +196,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, u32 mask = BIT(endpoint_id % 32); u32 unit = endpoint_id / 32; const struct reg *reg; + unsigned long weight; u32 offset; u32 val; @@ -203,6 +206,10 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, if (ipa->version == IPA_VERSION_3_0) return; + weight = bitmap_weight(interrupt->suspend_enabled, ipa->endpoint_count); + if (weight == 1 && !enable) + ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND); + reg = ipa_reg(ipa, IRQ_SUSPEND_EN); offset = reg_n_offset(reg, unit); val = ioread32(ipa->reg_virt + offset); @@ -211,8 +218,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, val |= mask; else val &= ~mask; + __change_bit(endpoint_id, interrupt->suspend_enabled); iowrite32(val, ipa->reg_virt + offset); + + if (!weight && enable) + ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND); } /* Enable TX_SUSPEND for an endpoint */ @@ -246,7 +257,16 @@ int ipa_interrupt_config(struct ipa *ipa) interrupt->ipa = ipa; - /* Disable all IPA interrupt types */ + /* Initially all IPA interrupt types are disabled */ + interrupt->enabled = 0; + interrupt->suspend_enabled = bitmap_zalloc(ipa->endpoint_count, + GFP_KERNEL); + if (!interrupt->suspend_enabled) { + ret = -ENOMEM; + goto err_kfree; + } + + /* Disable IPA interrupt types */ reg = ipa_reg(ipa, IPA_IRQ_EN); iowrite32(0, ipa->reg_virt + reg_offset(reg)); @@ -254,22 +274,32 @@ int ipa_interrupt_config(struct ipa *ipa) "ipa", interrupt); if (ret) { dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret); - goto err_kfree; + goto err_free_bitmap; + } + + ret = device_init_wakeup(dev, true); + if (ret) { + dev_err(dev, "error %d enabling wakeup\n", ret); + goto err_free_irq; } ret = dev_pm_set_wake_irq(dev, irq); if (ret) { dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret); - goto err_free_irq; + goto err_disable_wakeup; } ipa->interrupt = interrupt; return 0; +err_disable_wakeup: + (void)device_init_wakeup(dev, false); err_free_irq: free_irq(interrupt->irq, interrupt); +err_free_bitmap: + bitmap_free(interrupt->suspend_enabled); err_kfree: kfree(interrupt); @@ -285,22 +315,20 @@ void ipa_interrupt_deconfig(struct ipa *ipa) ipa->interrupt = NULL; dev_pm_clear_wake_irq(dev); + (void)device_init_wakeup(dev, false); free_irq(interrupt->irq, interrupt); + bitmap_free(interrupt->suspend_enabled); } /* Initialize the IPA interrupt structure */ struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev) { - struct device *dev = &pdev->dev; struct ipa_interrupt *interrupt; int irq; irq = platform_get_irq_byname(pdev, "ipa"); - if (irq <= 0) { - dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq); - + if (irq <= 0) return ERR_PTR(irq ? : -EINVAL); - } interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL); if (!interrupt) diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index f3f4f4330a59..d11c4af14fa2 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -1,16 +1,18 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_INTERRUPT_H_ #define _IPA_INTERRUPT_H_ #include <linux/types.h> -#include <linux/bits.h> + +struct platform_device; struct ipa; struct ipa_interrupt; + enum ipa_irq_id; /** diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 57b241417e8c..5f3dd5a2dcf4 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -1,38 +1,37 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2023 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/atomic.h> -#include <linux/bitfield.h> #include <linux/bug.h> -#include <linux/io.h> #include <linux/firmware.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/types.h> + #include <linux/firmware/qcom/qcom_scm.h> #include <linux/soc/qcom/mdt_loader.h> #include "ipa.h" -#include "ipa_power.h" +#include "ipa_cmd.h" #include "ipa_data.h" #include "ipa_endpoint.h" -#include "ipa_resource.h" -#include "ipa_cmd.h" -#include "ipa_reg.h" +#include "ipa_interrupt.h" #include "ipa_mem.h" -#include "ipa_table.h" -#include "ipa_smp2p.h" #include "ipa_modem.h" -#include "ipa_uc.h" -#include "ipa_interrupt.h" -#include "gsi_trans.h" +#include "ipa_power.h" +#include "ipa_reg.h" +#include "ipa_resource.h" +#include "ipa_smp2p.h" #include "ipa_sysfs.h" +#include "ipa_table.h" +#include "ipa_uc.h" +#include "ipa_version.h" /** * DOC: The IP Accelerator @@ -120,10 +119,6 @@ int ipa_setup(struct ipa *ipa) if (ret) return ret; - ret = ipa_power_setup(ipa); - if (ret) - goto err_gsi_teardown; - ipa_endpoint_setup(ipa); /* We need to use the AP command TX endpoint to perform other @@ -170,8 +165,6 @@ err_command_disable: ipa_endpoint_disable_one(command_endpoint); err_endpoint_teardown: ipa_endpoint_teardown(ipa); - ipa_power_teardown(ipa); -err_gsi_teardown: gsi_teardown(&ipa->gsi); return ret; @@ -196,7 +189,6 @@ static void ipa_teardown(struct ipa *ipa) command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_teardown(ipa); - ipa_power_teardown(ipa); gsi_teardown(&ipa->gsi); } @@ -818,11 +810,6 @@ static int ipa_probe(struct platform_device *pdev) return -ENODEV; } - if (!ipa_version_supported(data->version)) { - dev_err(dev, "unsupported IPA version %u\n", data->version); - return -EINVAL; - } - if (!data->modem_route_count) { dev_err(dev, "modem_route_count cannot be zero\n"); return -EINVAL; @@ -873,6 +860,10 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_reg_exit; + ret = ipa_cmd_init(ipa); + if (ret) + goto err_mem_exit; + ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, data->endpoint_data); if (ret) diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 709f061ede61..dee985eb08cb 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -1,25 +1,24 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/bitfield.h> -#include <linux/bug.h> #include <linux/dma-mapping.h> +#include <linux/io.h> #include <linux/iommu.h> #include <linux/platform_device.h> -#include <linux/io.h> +#include <linux/types.h> + #include <linux/soc/qcom/smem.h> +#include "gsi_trans.h" #include "ipa.h" -#include "ipa_reg.h" -#include "ipa_data.h" #include "ipa_cmd.h" +#include "ipa_data.h" #include "ipa_mem.h" +#include "ipa_reg.h" #include "ipa_table.h" -#include "gsi_trans.h" /* "Canary" value placed between memory regions to detect overflow */ #define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef) diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h index 28aad00a151d..b25babade787 100644 --- a/drivers/net/ipa/ipa_mem.h +++ b/drivers/net/ipa/ipa_mem.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_MEM_H_ #define _IPA_MEM_H_ +#include <linux/types.h> + struct platform_device; struct ipa; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index c27ca3f27f7d..8fe0d0e1a00f 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -1,29 +1,27 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #include <linux/errno.h> +#include <linux/etherdevice.h> #include <linux/if_arp.h> +#include <linux/if_rmnet.h> #include <linux/netdevice.h> +#include <linux/pm_runtime.h> #include <linux/skbuff.h> -#include <linux/if_rmnet.h> -#include <linux/etherdevice.h> #include <net/pkt_sched.h> -#include <linux/pm_runtime.h> + #include <linux/remoteproc/qcom_rproc.h> #include "ipa.h" -#include "ipa_data.h" #include "ipa_endpoint.h" -#include "ipa_table.h" #include "ipa_mem.h" #include "ipa_modem.h" #include "ipa_smp2p.h" -#include "ipa_qmi.h" +#include "ipa_table.h" #include "ipa_uc.h" -#include "ipa_power.h" #define IPA_NETDEV_NAME "rmnet_ipa%d" #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */ diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h index d85718db9a57..b1d2c80ed096 100644 --- a/drivers/net/ipa/ipa_modem.h +++ b/drivers/net/ipa/ipa_modem.h @@ -1,15 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_MODEM_H_ #define _IPA_MODEM_H_ -struct ipa; struct net_device; struct sk_buff; +struct ipa; + int ipa_modem_start(struct ipa *ipa); int ipa_modem_stop(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index 41ca7ef5e20f..65fd14da0f86 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #include <linux/clk.h> @@ -9,15 +9,15 @@ #include <linux/interconnect.h> #include <linux/pm.h> #include <linux/pm_runtime.h> -#include <linux/bitops.h> #include "linux/soc/qcom/qcom_aoss.h" #include "ipa.h" -#include "ipa_power.h" +#include "ipa_data.h" #include "ipa_endpoint.h" +#include "ipa_interrupt.h" #include "ipa_modem.h" -#include "ipa_data.h" +#include "ipa_power.h" /** * DOC: IPA Power Management @@ -232,25 +232,6 @@ void ipa_power_retention(struct ipa *ipa, bool enable) ret, enable ? "en" : "dis"); } -int ipa_power_setup(struct ipa *ipa) -{ - int ret; - - ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND); - - ret = device_init_wakeup(ipa->dev, true); - if (ret) - ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND); - - return ret; -} - -void ipa_power_teardown(struct ipa *ipa) -{ - (void)device_init_wakeup(ipa->dev, false); - ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND); -} - /* Initialize IPA power management */ struct ipa_power * ipa_power_init(struct device *dev, const struct ipa_power_data *data) diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h index 227cc04bea80..a83524a61c28 100644 --- a/drivers/net/ipa/ipa_power.h +++ b/drivers/net/ipa/ipa_power.h @@ -1,16 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_POWER_H_ #define _IPA_POWER_H_ +#include <linux/types.h> + struct device; struct ipa; struct ipa_power_data; -enum ipa_irq_id; /* IPA device power management function block */ extern const struct dev_pm_ops ipa_pm_ops; @@ -31,20 +32,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa); void ipa_power_retention(struct ipa *ipa, bool enable); /** - * ipa_power_setup() - Set up IPA power management - * @ipa: IPA pointer - * - * Return: 0 if successful, or a negative error code - */ -int ipa_power_setup(struct ipa *ipa); - -/** - * ipa_power_teardown() - Inverse of ipa_power_setup() - * @ipa: IPA pointer - */ -void ipa_power_teardown(struct ipa *ipa); - -/** * ipa_power_init() - Initialize IPA power management * @dev: IPA device * @data: Clock configuration data diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 65c40e207802..d771f3a71f94 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -1,19 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/string.h> -#include <linux/slab.h> #include <linux/qrtr.h> -#include <linux/soc/qcom/qmi.h> +#include <linux/string.h> +#include <linux/types.h> #include "ipa.h" -#include "ipa_endpoint.h" #include "ipa_mem.h" -#include "ipa_table.h" #include "ipa_modem.h" #include "ipa_qmi_msg.h" diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h index 1c236826c17a..fb15ea7f47e0 100644 --- a/drivers/net/ipa/ipa_qmi.h +++ b/drivers/net/ipa/ipa_qmi.h @@ -1,12 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_QMI_H_ #define _IPA_QMI_H_ #include <linux/types.h> +#include <linux/workqueue.h> + #include <linux/soc/qcom/qmi.h> struct ipa; diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 894f99517233..51dc13a577a5 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #include <linux/stddef.h> + #include <linux/soc/qcom/qmi.h> #include "ipa_qmi_msg.h" diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index b73503552c4d..644b8c27108b 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_QMI_MSG_H_ #define _IPA_QMI_MSG_H_ @@ -9,6 +9,7 @@ /* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */ #include <linux/types.h> + #include <linux/soc/qcom/qmi.h> /* Request/response/indication QMI message ids used for IPA. Receiving diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c index 98625956e0bb..c574f798fdc9 100644 --- a/drivers/net/ipa/ipa_reg.c +++ b/drivers/net/ipa/ipa_reg.c @@ -1,11 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2023 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/platform_device.h> #include <linux/io.h> +#include <linux/platform_device.h> #include "ipa.h" #include "ipa_reg.h" diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 62c62495b796..61b7c441ae95 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -1,15 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2023 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #ifndef _IPA_REG_H_ #define _IPA_REG_H_ -#include <linux/bitfield.h> -#include <linux/bug.h> - -#include "ipa_version.h" #include "reg.h" struct platform_device; diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index 82c88a744d10..1b0c4695c32a 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ #include <linux/types.h> -#include <linux/kernel.h> #include "ipa.h" #include "ipa_data.h" diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 2f917582c423..fcaadd111a8a 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -1,20 +1,20 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/panic_notifier.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/soc/qcom/smem.h> +#include <linux/types.h> + #include <linux/soc/qcom/smem_state.h> -#include "ipa_smp2p.h" #include "ipa.h" +#include "ipa_smp2p.h" #include "ipa_uc.h" /** diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c index 2ff09ce343b7..a59bd215494c 100644 --- a/drivers/net/ipa/ipa_sysfs.c +++ b/drivers/net/ipa/ipa_sysfs.c @@ -1,15 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2021-2022 Linaro Ltd. */ +/* Copyright (C) 2021-2024 Linaro Ltd. */ -#include <linux/kernel.h> -#include <linux/types.h> #include <linux/device.h> #include <linux/sysfs.h> +#include <linux/types.h> #include "ipa.h" -#include "ipa_version.h" #include "ipa_sysfs.h" +#include "ipa_version.h" static const char *ipa_version_string(struct ipa *ipa) { diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h index 58ba22810bab..43d9cb0722a4 100644 --- a/drivers/net/ipa/ipa_sysfs.h +++ b/drivers/net/ipa/ipa_sysfs.h @@ -1,13 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_SYSFS_H_ #define _IPA_SYSFS_H_ -struct attribute_group; - extern const struct attribute_group ipa_attribute_group; extern const struct attribute_group ipa_feature_attribute_group; extern const struct attribute_group ipa_endpoint_id_attribute_group; diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index a24ac11b8893..4e4a3f8aa8e8 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -1,28 +1,25 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2023 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/bits.h> #include <linux/bitops.h> -#include <linux/bitfield.h> -#include <linux/io.h> #include <linux/build_bug.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/types.h> +#include "gsi.h" +#include "gsi_trans.h" #include "ipa.h" -#include "ipa_version.h" +#include "ipa_cmd.h" #include "ipa_endpoint.h" -#include "ipa_table.h" -#include "ipa_reg.h" #include "ipa_mem.h" -#include "ipa_cmd.h" -#include "gsi.h" -#include "gsi_trans.h" +#include "ipa_reg.h" +#include "ipa_table.h" +#include "ipa_version.h" /** * DOC: IPA Filter and Route Tables @@ -161,6 +158,12 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6) return ipa_mem_find(ipa, mem_id); } +/* Return true if hashed tables are supported */ +bool ipa_table_hash_support(struct ipa *ipa) +{ + return ipa->version != IPA_VERSION_4_2; +} + bool ipa_filtered_valid(struct ipa *ipa, u64 filtered) { struct device *dev = ipa->dev; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 7cc951904bb4..16d4d15df9e9 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_TABLE_H_ #define _IPA_TABLE_H_ @@ -23,10 +23,7 @@ bool ipa_filtered_valid(struct ipa *ipa, u64 filtered); * ipa_table_hash_support() - Return true if hashed tables are supported * @ipa: IPA pointer */ -static inline bool ipa_table_hash_support(struct ipa *ipa) -{ - return ipa->version != IPA_VERSION_4_2; -} +bool ipa_table_hash_support(struct ipa *ipa); /** * ipa_table_reset() - Reset filter and route tables entries to "none" diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index bfd5dc6dab43..2963db83ab6b 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -1,17 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2022 Linaro Ltd. + * Copyright (C) 2018-2024 Linaro Ltd. */ -#include <linux/types.h> -#include <linux/io.h> #include <linux/delay.h> +#include <linux/io.h> #include <linux/pm_runtime.h> +#include <linux/types.h> #include "ipa.h" -#include "ipa_uc.h" +#include "ipa_interrupt.h" #include "ipa_power.h" +#include "ipa_reg.h" +#include "ipa_uc.h" /** * DOC: The IPA embedded microcontroller diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h index 85aa0df818c2..12997ecf5faa 100644 --- a/drivers/net/ipa/ipa_uc.h +++ b/drivers/net/ipa/ipa_uc.h @@ -1,13 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_UC_H_ #define _IPA_UC_H_ struct ipa; -enum ipa_irq_id; /** * ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h index 38150345b607..38c47f51a50c 100644 --- a/drivers/net/ipa/ipa_version.h +++ b/drivers/net/ipa/ipa_version.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2024 Linaro Ltd. */ #ifndef _IPA_VERSION_H_ #define _IPA_VERSION_H_ +#include <linux/types.h> + /** * enum ipa_version * @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0 @@ -45,24 +47,6 @@ enum ipa_version { IPA_VERSION_COUNT, /* Last; not a version */ }; -static inline bool ipa_version_supported(enum ipa_version version) -{ - switch (version) { - case IPA_VERSION_3_1: - case IPA_VERSION_3_5_1: - case IPA_VERSION_4_2: - case IPA_VERSION_4_5: - case IPA_VERSION_4_7: - case IPA_VERSION_4_9: - case IPA_VERSION_4_11: - case IPA_VERSION_5_0: - case IPA_VERSION_5_5: - return true; - default: - return false; - } -} - /* Execution environment IDs */ enum gsi_ee_id { GSI_EE_AP = 0x0, diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h index 2ee07eebca67..53c16e594ea4 100644 --- a/drivers/net/ipa/reg.h +++ b/drivers/net/ipa/reg.h @@ -1,13 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* *Copyright (C) 2022-2023 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ #ifndef _REG_H_ #define _REG_H_ -#include <linux/types.h> -#include <linux/log2.h> +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/bug.h> +#include <linux/log2.h> +#include <linux/types.h> /** * struct reg - A register descriptor diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c index e036805a7882..8c577b8b5c7a 100644 --- a/drivers/net/ipa/reg/gsi_reg-v3.1.c +++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c index 8c3ab3a5288e..a1c609f40d99 100644 --- a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c +++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c index 7cc7a21d07f9..ff1fb1ca47dd 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.0.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c index 01696519032f..ab9757ce42e7 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.11.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c index 2900e5c3ff88..01b45f79c315 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.5.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c index 8b5d95425a76..783eaaee2936 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.9.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c020 + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c index 145eb0bd096d..36d1e65df71b 100644 --- a/drivers/net/ipa/reg/gsi_reg-v5.0.c +++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../gsi.h" -#include "../reg.h" #include "../gsi_reg.h" +#include "../ipa_version.h" +#include "../reg.h" REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk, 0x0000c01c + 0x1000 * GSI_EE_AP); diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c index 648dbfe1fce3..a89103701583 100644 --- a/drivers/net/ipa/reg/ipa_reg-v3.1.c +++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { [COMP_CFG_ENABLE] = BIT(0), @@ -76,19 +78,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -403,7 +392,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [IPA_BCR] = ®_ipa_bcr, diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c index 78b1bf60cd02..c81c48ec51f9 100644 --- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c +++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { [COMP_CFG_ENABLE] = BIT(0), @@ -81,19 +83,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -414,7 +403,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [IPA_BCR] = ®_ipa_bcr, diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c index 29e71cce4a84..18bddc32c931 100644 --- a/drivers/net/ipa/reg/ipa_reg-v4.11.c +++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0), @@ -113,19 +115,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -470,7 +459,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt, diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c index bb7cf488144d..e78dd71e8b03 100644 --- a/drivers/net/ipa/reg/ipa_reg-v4.2.c +++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { /* Bit 0 reserved */ diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c index 1c58f78851c2..8494731efdd3 100644 --- a/drivers/net/ipa/reg/ipa_reg-v4.5.c +++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { /* Bit 0 reserved */ @@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -489,7 +478,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt, diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c index 731824fce1d4..2c161cf69193 100644 --- a/drivers/net/ipa/reg/ipa_reg-v4.7.c +++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0), @@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -462,7 +451,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt, diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c index 01f87b5290e0..fa6fd312e486 100644 --- a/drivers/net/ipa/reg/ipa_reg-v4.9.c +++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2022 Linaro Ltd. */ +/* Copyright (C) 2022-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_comp_cfg_fmask[] = { [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0), @@ -112,19 +114,6 @@ static const u32 reg_qsb_max_reads_fmask[] = { REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078); -static const u32 reg_filt_rout_hash_en_fmask[] = { - [IPV6_ROUTER_HASH] = BIT(0), - /* Bits 1-3 reserved */ - [IPV6_FILTER_HASH] = BIT(4), - /* Bits 5-7 reserved */ - [IPV4_ROUTER_HASH] = BIT(8), - /* Bits 9-11 reserved */ - [IPV4_FILTER_HASH] = BIT(12), - /* Bits 13-31 reserved */ -}; - -REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148); - static const u32 reg_filt_rout_hash_flush_fmask[] = { [IPV6_ROUTER_HASH] = BIT(0), /* Bits 1-3 reserved */ @@ -467,7 +456,6 @@ static const struct reg *reg_array[] = { [SHARED_MEM_SIZE] = ®_shared_mem_size, [QSB_MAX_WRITES] = ®_qsb_max_writes, [QSB_MAX_READS] = ®_qsb_max_reads, - [FILT_ROUT_HASH_EN] = ®_filt_rout_hash_en, [FILT_ROUT_HASH_FLUSH] = ®_filt_rout_hash_flush, [STATE_AGGR_ACTIVE] = ®_state_aggr_active, [LOCAL_PKT_PROC_CNTXT] = ®_local_pkt_proc_cntxt, diff --git a/drivers/net/ipa/reg/ipa_reg-v5.0.c b/drivers/net/ipa/reg/ipa_reg-v5.0.c index 95e0edff4170..b26b5f57ac03 100644 --- a/drivers/net/ipa/reg/ipa_reg-v5.0.c +++ b/drivers/net/ipa/reg/ipa_reg-v5.0.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ +#include <linux/array_size.h> +#include <linux/bits.h> #include <linux/types.h> -#include "../ipa.h" #include "../ipa_reg.h" +#include "../ipa_version.h" static const u32 reg_flavor_0_fmask[] = { [MAX_PIPES] = GENMASK(7, 0), diff --git a/drivers/net/ipa/reg/ipa_reg-v5.5.c b/drivers/net/ipa/reg/ipa_reg-v5.5.c index 26ca9c9bac59..abb0c443ef66 100644 --- a/drivers/net/ipa/reg/ipa_reg-v5.5.c +++ b/drivers/net/ipa/reg/ipa_reg-v5.5.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2023 Linaro Ltd. */ +/* Copyright (C) 2023-2024 Linaro Ltd. */ -#include <linux/kernel.h> -#include <linux/types.h> +#include <linux/array_size.h> #include <linux/bits.h> +#include <linux/types.h> #include "../ipa_reg.h" #include "../ipa_version.h" diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 5920f7e63352..094f44dac5c8 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -735,6 +735,7 @@ static int ipvlan_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: + case NETDEV_DOWN: case NETDEV_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) netif_stacked_transfer_operstate(ipvlan->phy_dev, diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index f6eab66c2660..2b486e7c749c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -141,9 +141,6 @@ static const struct ethtool_ops loopback_ethtool_ops = { static int loopback_dev_init(struct net_device *dev) { - dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); - if (!dev->lstats) - return -ENOMEM; netdev_lockdep_set_classes(dev); return 0; } @@ -151,7 +148,6 @@ static int loopback_dev_init(struct net_device *dev) static void loopback_dev_free(struct net_device *dev) { dev_net(dev)->loopback_dev = NULL; - free_percpu(dev->lstats); } static const struct net_device_ops loopback_ops = { @@ -191,6 +187,7 @@ static void gen_lo_setup(struct net_device *dev, dev->header_ops = hdr_ops; dev->netdev_ops = dev_ops; dev->needs_free_netdev = true; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; dev->priv_destructor = dev_destructor; netif_set_tso_max_size(dev, GSO_MAX_SIZE); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index ff016c11b4a0..2da70bc3dd86 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3753,7 +3753,7 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu) if (macsec->real_dev->mtu - extra < new_mtu) return -ERANGE; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0cec2783a3e7..67b7ef2d463f 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -865,7 +865,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) if (vlan->lowerdev->mtu < new_mtu) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 778db310a28d..82088741debd 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -132,8 +132,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; } - if (dev->of_node && - of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) { + if (device_is_compatible(dev, "microchip,mdio-smi0")) { bitbang->ctrl.op_c22_read = 0; bitbang->ctrl.op_c22_write = 0; bitbang->ctrl.override_op_c22 = 1; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index d0c916a53d7c..963d8b4af28d 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -231,7 +231,7 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu) } } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index bd546d4d26c6..3f9c9327f149 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -140,6 +140,13 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) return 0; } +static void +nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats) +{ + fec_stats->corrected_blocks.total = 123; + fec_stats->uncorrectable_blocks.total = 4; +} + static int nsim_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -163,6 +170,7 @@ static const struct ethtool_ops nsim_ethtool_ops = { .set_channels = nsim_set_channels, .get_fecparam = nsim_get_fecparam, .set_fecparam = nsim_set_fecparam, + .get_fec_stats = nsim_get_fec_stats, .get_ts_info = nsim_get_ts_info, }; @@ -182,6 +190,9 @@ void nsim_ethtool_init(struct netdevsim *ns) nsim_ethtool_ring_init(ns); + ns->ethtool.pauseparam.report_stats_rx = true; + ns->ethtool.pauseparam.report_stats_tx = true; + ns->ethtool.fec.fec = ETHTOOL_FEC_NONE; ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 8330bc0bcb7e..c22897bf5509 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -19,6 +19,8 @@ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/slab.h> +#include <net/netdev_queues.h> +#include <net/page_pool/helpers.h> #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/rtnetlink.h> @@ -26,11 +28,33 @@ #include "netdevsim.h" +#define NSIM_RING_SIZE 256 + +static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb) +{ + if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) { + dev_kfree_skb_any(skb); + return NET_RX_DROP; + } + + skb_queue_tail(&rq->skb_queue, skb); + return NET_RX_SUCCESS; +} + +static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb, + struct nsim_rq *rq) +{ + return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb); +} + static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct netdevsim *ns = netdev_priv(dev); + struct net_device *peer_dev; unsigned int len = skb->len; struct netdevsim *peer_ns; + struct nsim_rq *rq; + int rxq; rcu_read_lock(); if (!nsim_ipsec_tx(ns, skb)) @@ -40,10 +64,18 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!peer_ns) goto out_drop_free; + peer_dev = peer_ns->netdev; + rxq = skb_get_queue_mapping(skb); + if (rxq >= peer_dev->num_rx_queues) + rxq = rxq % peer_dev->num_rx_queues; + rq = &peer_ns->rq[rxq]; + skb_tx_timestamp(skb); - if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP)) + if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP)) goto out_drop_cnt; + napi_schedule(&rq->napi); + rcu_read_unlock(); u64_stats_update_begin(&ns->syncp); ns->tx_packets++; @@ -72,7 +104,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -298,6 +330,150 @@ static int nsim_get_iflink(const struct net_device *dev) return iflink; } +static int nsim_rcv(struct nsim_rq *rq, int budget) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < budget; i++) { + if (skb_queue_empty(&rq->skb_queue)) + break; + + skb = skb_dequeue(&rq->skb_queue); + netif_receive_skb(skb); + } + + return i; +} + +static int nsim_poll(struct napi_struct *napi, int budget) +{ + struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi); + int done; + + done = nsim_rcv(rq, budget); + napi_complete(napi); + + return done; +} + +static int nsim_create_page_pool(struct nsim_rq *rq) +{ + struct page_pool_params p = { + .order = 0, + .pool_size = NSIM_RING_SIZE, + .nid = NUMA_NO_NODE, + .dev = &rq->napi.dev->dev, + .napi = &rq->napi, + .dma_dir = DMA_BIDIRECTIONAL, + .netdev = rq->napi.dev, + }; + + rq->page_pool = page_pool_create(&p); + if (IS_ERR(rq->page_pool)) { + int err = PTR_ERR(rq->page_pool); + + rq->page_pool = NULL; + return err; + } + return 0; +} + +static int nsim_init_napi(struct netdevsim *ns) +{ + struct net_device *dev = ns->netdev; + struct nsim_rq *rq; + int err, i; + + for (i = 0; i < dev->num_rx_queues; i++) { + rq = &ns->rq[i]; + + netif_napi_add(dev, &rq->napi, nsim_poll); + } + + for (i = 0; i < dev->num_rx_queues; i++) { + rq = &ns->rq[i]; + + err = nsim_create_page_pool(rq); + if (err) + goto err_pp_destroy; + } + + return 0; + +err_pp_destroy: + while (i--) { + page_pool_destroy(ns->rq[i].page_pool); + ns->rq[i].page_pool = NULL; + } + + for (i = 0; i < dev->num_rx_queues; i++) + __netif_napi_del(&ns->rq[i].napi); + + return err; +} + +static void nsim_enable_napi(struct netdevsim *ns) +{ + struct net_device *dev = ns->netdev; + int i; + + for (i = 0; i < dev->num_rx_queues; i++) { + struct nsim_rq *rq = &ns->rq[i]; + + netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi); + napi_enable(&rq->napi); + } +} + +static int nsim_open(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + int err; + + err = nsim_init_napi(ns); + if (err) + return err; + + nsim_enable_napi(ns); + + return 0; +} + +static void nsim_del_napi(struct netdevsim *ns) +{ + struct net_device *dev = ns->netdev; + int i; + + for (i = 0; i < dev->num_rx_queues; i++) { + struct nsim_rq *rq = &ns->rq[i]; + + napi_disable(&rq->napi); + __netif_napi_del(&rq->napi); + } + synchronize_net(); + + for (i = 0; i < dev->num_rx_queues; i++) { + page_pool_destroy(ns->rq[i].page_pool); + ns->rq[i].page_pool = NULL; + } +} + +static int nsim_stop(struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + struct netdevsim *peer; + + netif_carrier_off(dev); + peer = rtnl_dereference(ns->peer); + if (peer) + netif_carrier_off(peer->netdev); + + nsim_del_napi(ns); + + return 0; +} + static const struct net_device_ops nsim_netdev_ops = { .ndo_start_xmit = nsim_start_xmit, .ndo_set_rx_mode = nsim_set_rx_mode, @@ -317,6 +493,8 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_set_features = nsim_set_features, .ndo_get_iflink = nsim_get_iflink, .ndo_bpf = nsim_bpf, + .ndo_open = nsim_open, + .ndo_stop = nsim_stop, }; static const struct net_device_ops nsim_vf_netdev_ops = { @@ -330,6 +508,107 @@ static const struct net_device_ops nsim_vf_netdev_ops = { .ndo_set_features = nsim_set_features, }; +/* We don't have true per-queue stats, yet, so do some random fakery here. + * Only report stuff for queue 0. + */ +static void nsim_get_queue_stats_rx(struct net_device *dev, int idx, + struct netdev_queue_stats_rx *stats) +{ + struct rtnl_link_stats64 rtstats = {}; + + if (!idx) + nsim_get_stats64(dev, &rtstats); + + stats->packets = rtstats.rx_packets - !!rtstats.rx_packets; + stats->bytes = rtstats.rx_bytes; +} + +static void nsim_get_queue_stats_tx(struct net_device *dev, int idx, + struct netdev_queue_stats_tx *stats) +{ + struct rtnl_link_stats64 rtstats = {}; + + if (!idx) + nsim_get_stats64(dev, &rtstats); + + stats->packets = rtstats.tx_packets - !!rtstats.tx_packets; + stats->bytes = rtstats.tx_bytes; +} + +static void nsim_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct rtnl_link_stats64 rtstats = {}; + + nsim_get_stats64(dev, &rtstats); + + rx->packets = !!rtstats.rx_packets; + rx->bytes = 0; + tx->packets = !!rtstats.tx_packets; + tx->bytes = 0; +} + +static const struct netdev_stat_ops nsim_stat_ops = { + .get_queue_stats_tx = nsim_get_queue_stats_tx, + .get_queue_stats_rx = nsim_get_queue_stats_rx, + .get_base_stats = nsim_get_base_stats, +}; + +static ssize_t +nsim_pp_hold_read(struct file *file, char __user *data, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = file->private_data; + char buf[3] = "n\n"; + + if (ns->page) + buf[0] = 'y'; + + return simple_read_from_buffer(data, count, ppos, buf, 2); +} + +static ssize_t +nsim_pp_hold_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + struct netdevsim *ns = file->private_data; + ssize_t ret; + bool val; + + ret = kstrtobool_from_user(data, count, &val); + if (ret) + return ret; + + rtnl_lock(); + ret = count; + if (val == !!ns->page) + goto exit; + + if (!netif_running(ns->netdev) && val) { + ret = -ENETDOWN; + } else if (val) { + ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool); + if (!ns->page) + ret = -ENOMEM; + } else { + page_pool_put_full_page(ns->page->pp, ns->page, false); + ns->page = NULL; + } + rtnl_unlock(); + +exit: + return count; +} + +static const struct file_operations nsim_pp_hold_fops = { + .open = simple_open, + .read = nsim_pp_hold_read, + .write = nsim_pp_hold_write, + .llseek = generic_file_llseek, + .owner = THIS_MODULE, +}; + static void nsim_setup(struct net_device *dev) { ether_setup(dev); @@ -349,6 +628,35 @@ static void nsim_setup(struct net_device *dev) dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD; } +static int nsim_queue_init(struct netdevsim *ns) +{ + struct net_device *dev = ns->netdev; + int i; + + ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq), + GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); + if (!ns->rq) + return -ENOMEM; + + for (i = 0; i < dev->num_rx_queues; i++) + skb_queue_head_init(&ns->rq[i].skb_queue); + + return 0; +} + +static void nsim_queue_free(struct netdevsim *ns) +{ + struct net_device *dev = ns->netdev; + int i; + + for (i = 0; i < dev->num_rx_queues; i++) + skb_queue_purge_reason(&ns->rq[i].skb_queue, + SKB_DROP_REASON_QUEUE_PURGE); + + kvfree(ns->rq); + ns->rq = NULL; +} + static int nsim_init_netdevsim(struct netdevsim *ns) { struct mock_phc *phc; @@ -360,16 +668,21 @@ static int nsim_init_netdevsim(struct netdevsim *ns) ns->phc = phc; ns->netdev->netdev_ops = &nsim_netdev_ops; + ns->netdev->stat_ops = &nsim_stat_ops; err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev); if (err) goto err_phc_destroy; rtnl_lock(); - err = nsim_bpf_init(ns); + err = nsim_queue_init(ns); if (err) goto err_utn_destroy; + err = nsim_bpf_init(ns); + if (err) + goto err_rq_destroy; + nsim_macsec_init(ns); nsim_ipsec_init(ns); @@ -383,6 +696,8 @@ err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_macsec_teardown(ns); nsim_bpf_uninit(ns); +err_rq_destroy: + nsim_queue_free(ns); err_utn_destroy: rtnl_unlock(); nsim_udp_tunnels_info_destroy(ns->netdev); @@ -436,6 +751,10 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err = nsim_init_netdevsim_vf(ns); if (err) goto err_free_netdev; + + ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir, + ns, &nsim_pp_hold_fops); + return ns; err_free_netdev: @@ -448,6 +767,8 @@ void nsim_destroy(struct netdevsim *ns) struct net_device *dev = ns->netdev; struct netdevsim *peer; + debugfs_remove(ns->pp_dfs); + rtnl_lock(); peer = rtnl_dereference(ns->peer); if (peer) @@ -458,10 +779,18 @@ void nsim_destroy(struct netdevsim *ns) nsim_macsec_teardown(ns); nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); + nsim_queue_free(ns); } rtnl_unlock(); if (nsim_dev_port_is_pf(ns->nsim_dev_port)) nsim_exit_netdevsim(ns); + + /* Put this intentionally late to exercise the orphaning path */ + if (ns->page) { + page_pool_put_full_page(ns->page->pp, ns->page, false); + ns->page = NULL; + } + free_netdev(dev); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 553c4b9b4f63..bf02efa10956 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -90,11 +90,18 @@ struct nsim_ethtool { struct ethtool_fecparam fec; }; +struct nsim_rq { + struct napi_struct napi; + struct sk_buff_head skb_queue; + struct page_pool *page_pool; +}; + struct netdevsim { struct net_device *netdev; struct nsim_dev *nsim_dev; struct nsim_dev_port *nsim_dev_port; struct mock_phc *phc; + struct nsim_rq *rq; u64 tx_packets; u64 tx_bytes; @@ -125,6 +132,9 @@ struct netdevsim { struct debugfs_u32_array dfs_ports[2]; } udp_ports; + struct page *page; + struct dentry *pp_dfs; + struct nsim_ethtool ethtool; struct netdevsim __rcu *peer; }; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 536bd6564f8b..ffe7f463e16e 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -306,7 +306,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) return -EINVAL; if (!netif_running(ndev)) { - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); return 0; } @@ -335,7 +335,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu) } } - ndev->mtu = new_mtu; + WRITE_ONCE(ndev->mtu, new_mtu); ntb_transport_link_up(dev->qp); diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index 853b8c138718..b79aedad855b 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -61,11 +61,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs, static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs, struct phylink_link_state *state) { - int bmsr, lpa; + int bmsr; bmsr = mdiodev_read(pcs, MII_BMSR); - lpa = mdiodev_read(pcs, MII_LPA); - if (bmsr < 0 || lpa < 0) { + if (bmsr < 0) { state->link = false; return; } diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index 4bd66fdde367..d0a722d43368 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -279,10 +279,38 @@ static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported, return -EINVAL; } +static int miic_pre_init(struct phylink_pcs *pcs) +{ + struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); + struct miic *miic = miic_port->miic; + u32 val, mask; + + /* Start RX clock if required */ + if (pcs->rxc_always_on) { + /* In MII through mode, the clock signals will be driven by the + * external PHY, which might not be initialized yet. Set RMII + * as default mode to ensure that a reference clock signal is + * generated. + */ + miic_port->interface = PHY_INTERFACE_MODE_RMII; + + val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, CONV_MODE_RMII) | + FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, CONV_MODE_100MBPS); + mask = MIIC_CONVCTRL_CONV_MODE | MIIC_CONVCTRL_CONV_SPEED; + + miic_reg_rmw(miic, MIIC_CONVCTRL(miic_port->port), mask, val); + + miic_converter_enable(miic, miic_port->port, 1); + } + + return 0; +} + static const struct phylink_pcs_ops miic_phylink_ops = { .pcs_validate = miic_validate, .pcs_config = miic_config, .pcs_link_up = miic_link_up, + .pcs_pre_init = miic_pre_init, }; struct phylink_pcs *miic_create(struct device *dev, struct device_node *np) diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c new file mode 100644 index 000000000000..69434fd13f96 --- /dev/null +++ b/drivers/net/pfcp.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PFCP according to 3GPP TS 29.244 + * + * Copyright (C) 2022, Intel Corporation. + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/rculist.h> +#include <linux/skbuff.h> +#include <linux/types.h> + +#include <net/udp.h> +#include <net/udp_tunnel.h> +#include <net/pfcp.h> + +struct pfcp_dev { + struct list_head list; + + struct socket *sock; + struct net_device *dev; + struct net *net; + + struct gro_cells gro_cells; +}; + +static unsigned int pfcp_net_id __read_mostly; + +struct pfcp_net { + struct list_head pfcp_dev_list; +}; + +static void +pfcp_session_recv(struct pfcp_dev *pfcp, struct sk_buff *skb, + struct pfcp_metadata *md) +{ + struct pfcphdr_session *unparsed = pfcp_hdr_session(skb); + + md->seid = unparsed->seid; + md->type = PFCP_TYPE_SESSION; +} + +static void +pfcp_node_recv(struct pfcp_dev *pfcp, struct sk_buff *skb, + struct pfcp_metadata *md) +{ + md->type = PFCP_TYPE_NODE; +} + +static int pfcp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; + struct metadata_dst *tun_dst; + struct pfcp_metadata *md; + struct pfcphdr *unparsed; + struct pfcp_dev *pfcp; + + if (unlikely(!pskb_may_pull(skb, PFCP_HLEN))) + goto drop; + + pfcp = rcu_dereference_sk_user_data(sk); + if (unlikely(!pfcp)) + goto drop; + + unparsed = pfcp_hdr(skb); + + ip_tunnel_flags_zero(flags); + tun_dst = udp_tun_rx_dst(skb, sk->sk_family, flags, 0, + sizeof(*md)); + if (unlikely(!tun_dst)) + goto drop; + + md = ip_tunnel_info_opts(&tun_dst->u.tun_info); + if (unlikely(!md)) + goto drop; + + if (unparsed->flags & PFCP_SEID_FLAG) + pfcp_session_recv(pfcp, skb, md); + else + pfcp_node_recv(pfcp, skb, md); + + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, tun_dst->u.tun_info.key.tun_flags); + tun_dst->u.tun_info.options_len = sizeof(*md); + + if (unlikely(iptunnel_pull_header(skb, PFCP_HLEN, skb->protocol, + !net_eq(sock_net(sk), + dev_net(pfcp->dev))))) + goto drop; + + skb_dst_set(skb, (struct dst_entry *)tun_dst); + + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + skb->dev = pfcp->dev; + + gro_cells_receive(&pfcp->gro_cells, skb); + + return 0; +drop: + kfree_skb(skb); + return 0; +} + +static void pfcp_del_sock(struct pfcp_dev *pfcp) +{ + udp_tunnel_sock_release(pfcp->sock); + pfcp->sock = NULL; +} + +static void pfcp_dev_uninit(struct net_device *dev) +{ + struct pfcp_dev *pfcp = netdev_priv(dev); + + gro_cells_destroy(&pfcp->gro_cells); + pfcp_del_sock(pfcp); +} + +static int pfcp_dev_init(struct net_device *dev) +{ + struct pfcp_dev *pfcp = netdev_priv(dev); + + pfcp->dev = dev; + + return gro_cells_init(&pfcp->gro_cells, dev); +} + +static const struct net_device_ops pfcp_netdev_ops = { + .ndo_init = pfcp_dev_init, + .ndo_uninit = pfcp_dev_uninit, + .ndo_get_stats64 = dev_get_tstats64, +}; + +static const struct device_type pfcp_type = { + .name = "pfcp", +}; + +static void pfcp_link_setup(struct net_device *dev) +{ + dev->netdev_ops = &pfcp_netdev_ops; + dev->needs_free_netdev = true; + SET_NETDEV_DEVTYPE(dev, &pfcp_type); + + dev->hard_header_len = 0; + dev->addr_len = 0; + + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; + dev->priv_flags |= IFF_NO_QUEUE; + + netif_keep_dst(dev); +} + +static struct socket *pfcp_create_sock(struct pfcp_dev *pfcp) +{ + struct udp_tunnel_sock_cfg tuncfg = {}; + struct udp_port_cfg udp_conf = { + .local_ip.s_addr = htonl(INADDR_ANY), + .family = AF_INET, + }; + struct net *net = pfcp->net; + struct socket *sock; + int err; + + udp_conf.local_udp_port = htons(PFCP_PORT); + + err = udp_sock_create(net, &udp_conf, &sock); + if (err) + return ERR_PTR(err); + + tuncfg.sk_user_data = pfcp; + tuncfg.encap_rcv = pfcp_encap_recv; + tuncfg.encap_type = 1; + + setup_udp_tunnel_sock(net, sock, &tuncfg); + + return sock; +} + +static int pfcp_add_sock(struct pfcp_dev *pfcp) +{ + pfcp->sock = pfcp_create_sock(pfcp); + + return PTR_ERR_OR_ZERO(pfcp->sock); +} + +static int pfcp_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct pfcp_dev *pfcp = netdev_priv(dev); + struct pfcp_net *pn; + int err; + + pfcp->net = net; + + err = pfcp_add_sock(pfcp); + if (err) { + netdev_dbg(dev, "failed to add pfcp socket %d\n", err); + goto exit_err; + } + + err = register_netdevice(dev); + if (err) { + netdev_dbg(dev, "failed to register pfcp netdev %d\n", err); + goto exit_del_pfcp_sock; + } + + pn = net_generic(dev_net(dev), pfcp_net_id); + list_add_rcu(&pfcp->list, &pn->pfcp_dev_list); + + netdev_dbg(dev, "registered new PFCP interface\n"); + + return 0; + +exit_del_pfcp_sock: + pfcp_del_sock(pfcp); +exit_err: + pfcp->net = NULL; + return err; +} + +static void pfcp_dellink(struct net_device *dev, struct list_head *head) +{ + struct pfcp_dev *pfcp = netdev_priv(dev); + + list_del_rcu(&pfcp->list); + unregister_netdevice_queue(dev, head); +} + +static struct rtnl_link_ops pfcp_link_ops __read_mostly = { + .kind = "pfcp", + .priv_size = sizeof(struct pfcp_dev), + .setup = pfcp_link_setup, + .newlink = pfcp_newlink, + .dellink = pfcp_dellink, +}; + +static int __net_init pfcp_net_init(struct net *net) +{ + struct pfcp_net *pn = net_generic(net, pfcp_net_id); + + INIT_LIST_HEAD(&pn->pfcp_dev_list); + return 0; +} + +static void __net_exit pfcp_net_exit(struct net *net) +{ + struct pfcp_net *pn = net_generic(net, pfcp_net_id); + struct pfcp_dev *pfcp; + LIST_HEAD(list); + + rtnl_lock(); + list_for_each_entry(pfcp, &pn->pfcp_dev_list, list) + pfcp_dellink(pfcp->dev, &list); + + unregister_netdevice_many(&list); + rtnl_unlock(); +} + +static struct pernet_operations pfcp_net_ops = { + .init = pfcp_net_init, + .exit = pfcp_net_exit, + .id = &pfcp_net_id, + .size = sizeof(struct pfcp_net), +}; + +static int __init pfcp_init(void) +{ + int err; + + err = register_pernet_subsys(&pfcp_net_ops); + if (err) + goto exit_err; + + err = rtnl_link_register(&pfcp_link_ops); + if (err) + goto exit_unregister_subsys; + return 0; + +exit_unregister_subsys: + unregister_pernet_subsys(&pfcp_net_ops); +exit_err: + pr_err("loading PFCP module failed: err %d\n", err); + return err; +} +late_initcall(pfcp_init); + +static void __exit pfcp_exit(void) +{ + rtnl_link_unregister(&pfcp_link_ops); + unregister_pernet_subsys(&pfcp_net_ops); + + pr_info("PFCP module unloaded\n"); +} +module_exit(pfcp_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wojciech Drewek <[email protected]>"); +MODULE_DESCRIPTION("Interface driver for PFCP encapsulated traffic"); +MODULE_ALIAS_RTNL_LINK("pfcp"); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 1df0595c5ba9..7fddc8306d82 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -76,6 +76,11 @@ config SFP comment "MII PHY device drivers" +config AIR_EN8811H_PHY + tristate "Airoha EN8811H 2.5 Gigabit PHY" + help + Currently supports the Airoha EN8811H PHY. + config AMD_PHY tristate "AMD and Altima PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 197acfa0b412..202ed7f450da 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -34,6 +34,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_ADIN_PHY) += adin.o obj-$(CONFIG_ADIN1100_PHY) += adin1100.o +obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia/ ifdef CONFIG_AX88796B_RUST_PHY diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c new file mode 100644 index 000000000000..3cdc8c6b30b6 --- /dev/null +++ b/drivers/net/phy/air_en8811h.c @@ -0,0 +1,1090 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for the Airoha EN8811H 2.5 Gigabit PHY. + * + * Limitations of the EN8811H: + * - Only full duplex supported + * - Forced speed (AN off) is not supported by hardware (100Mbps) + * + * Source originated from airoha's en8811h.c and en8811h.h v1.2.1 + * + * Copyright (C) 2023 Airoha Technology Corp. + */ + +#include <linux/phy.h> +#include <linux/firmware.h> +#include <linux/property.h> +#include <linux/wordpart.h> +#include <asm/unaligned.h> + +#define EN8811H_PHY_ID 0x03a2a411 + +#define EN8811H_MD32_DM "airoha/EthMD32.dm.bin" +#define EN8811H_MD32_DSP "airoha/EthMD32.DSP.bin" + +#define AIR_FW_ADDR_DM 0x00000000 +#define AIR_FW_ADDR_DSP 0x00100000 + +/* MII Registers */ +#define AIR_AUX_CTRL_STATUS 0x1d +#define AIR_AUX_CTRL_STATUS_SPEED_MASK GENMASK(4, 2) +#define AIR_AUX_CTRL_STATUS_SPEED_100 0x4 +#define AIR_AUX_CTRL_STATUS_SPEED_1000 0x8 +#define AIR_AUX_CTRL_STATUS_SPEED_2500 0xc + +#define AIR_EXT_PAGE_ACCESS 0x1f +#define AIR_PHY_PAGE_STANDARD 0x0000 +#define AIR_PHY_PAGE_EXTENDED_4 0x0004 + +/* MII Registers Page 4*/ +#define AIR_BPBUS_MODE 0x10 +#define AIR_BPBUS_MODE_ADDR_FIXED 0x0000 +#define AIR_BPBUS_MODE_ADDR_INCR BIT(15) +#define AIR_BPBUS_WR_ADDR_HIGH 0x11 +#define AIR_BPBUS_WR_ADDR_LOW 0x12 +#define AIR_BPBUS_WR_DATA_HIGH 0x13 +#define AIR_BPBUS_WR_DATA_LOW 0x14 +#define AIR_BPBUS_RD_ADDR_HIGH 0x15 +#define AIR_BPBUS_RD_ADDR_LOW 0x16 +#define AIR_BPBUS_RD_DATA_HIGH 0x17 +#define AIR_BPBUS_RD_DATA_LOW 0x18 + +/* Registers on MDIO_MMD_VEND1 */ +#define EN8811H_PHY_FW_STATUS 0x8009 +#define EN8811H_PHY_READY 0x02 + +#define AIR_PHY_MCU_CMD_1 0x800c +#define AIR_PHY_MCU_CMD_1_MODE1 0x0 +#define AIR_PHY_MCU_CMD_2 0x800d +#define AIR_PHY_MCU_CMD_2_MODE1 0x0 +#define AIR_PHY_MCU_CMD_3 0x800e +#define AIR_PHY_MCU_CMD_3_MODE1 0x1101 +#define AIR_PHY_MCU_CMD_3_DOCMD 0x1100 +#define AIR_PHY_MCU_CMD_4 0x800f +#define AIR_PHY_MCU_CMD_4_MODE1 0x0002 +#define AIR_PHY_MCU_CMD_4_INTCLR 0x00e4 + +/* Registers on MDIO_MMD_VEND2 */ +#define AIR_PHY_LED_BCR 0x021 +#define AIR_PHY_LED_BCR_MODE_MASK GENMASK(1, 0) +#define AIR_PHY_LED_BCR_TIME_TEST BIT(2) +#define AIR_PHY_LED_BCR_CLK_EN BIT(3) +#define AIR_PHY_LED_BCR_EXT_CTRL BIT(15) + +#define AIR_PHY_LED_DUR_ON 0x022 + +#define AIR_PHY_LED_DUR_BLINK 0x023 + +#define AIR_PHY_LED_ON(i) (0x024 + ((i) * 2)) +#define AIR_PHY_LED_ON_MASK (GENMASK(6, 0) | BIT(8)) +#define AIR_PHY_LED_ON_LINK1000 BIT(0) +#define AIR_PHY_LED_ON_LINK100 BIT(1) +#define AIR_PHY_LED_ON_LINK10 BIT(2) +#define AIR_PHY_LED_ON_LINKDOWN BIT(3) +#define AIR_PHY_LED_ON_FDX BIT(4) /* Full duplex */ +#define AIR_PHY_LED_ON_HDX BIT(5) /* Half duplex */ +#define AIR_PHY_LED_ON_FORCE_ON BIT(6) +#define AIR_PHY_LED_ON_LINK2500 BIT(8) +#define AIR_PHY_LED_ON_POLARITY BIT(14) +#define AIR_PHY_LED_ON_ENABLE BIT(15) + +#define AIR_PHY_LED_BLINK(i) (0x025 + ((i) * 2)) +#define AIR_PHY_LED_BLINK_1000TX BIT(0) +#define AIR_PHY_LED_BLINK_1000RX BIT(1) +#define AIR_PHY_LED_BLINK_100TX BIT(2) +#define AIR_PHY_LED_BLINK_100RX BIT(3) +#define AIR_PHY_LED_BLINK_10TX BIT(4) +#define AIR_PHY_LED_BLINK_10RX BIT(5) +#define AIR_PHY_LED_BLINK_COLLISION BIT(6) +#define AIR_PHY_LED_BLINK_RX_CRC_ERR BIT(7) +#define AIR_PHY_LED_BLINK_RX_IDLE_ERR BIT(8) +#define AIR_PHY_LED_BLINK_FORCE_BLINK BIT(9) +#define AIR_PHY_LED_BLINK_2500TX BIT(10) +#define AIR_PHY_LED_BLINK_2500RX BIT(11) + +/* Registers on BUCKPBUS */ +#define EN8811H_2P5G_LPA 0x3b30 +#define EN8811H_2P5G_LPA_2P5G BIT(0) + +#define EN8811H_FW_VERSION 0x3b3c + +#define EN8811H_POLARITY 0xca0f8 +#define EN8811H_POLARITY_TX_NORMAL BIT(0) +#define EN8811H_POLARITY_RX_REVERSE BIT(1) + +#define EN8811H_GPIO_OUTPUT 0xcf8b8 +#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5)) + +#define EN8811H_FW_CTRL_1 0x0f0018 +#define EN8811H_FW_CTRL_1_START 0x0 +#define EN8811H_FW_CTRL_1_FINISH 0x1 +#define EN8811H_FW_CTRL_2 0x800000 +#define EN8811H_FW_CTRL_2_LOADING BIT(11) + +/* Led definitions */ +#define EN8811H_LED_COUNT 3 + +/* Default LED setup: + * GPIO5 <-> LED0 On: Link detected, blink Rx/Tx + * GPIO4 <-> LED1 On: Link detected at 2500 or 1000 Mbps + * GPIO3 <-> LED2 On: Link detected at 2500 or 100 Mbps + */ +#define AIR_DEFAULT_TRIGGER_LED0 (BIT(TRIGGER_NETDEV_LINK) | \ + BIT(TRIGGER_NETDEV_RX) | \ + BIT(TRIGGER_NETDEV_TX)) +#define AIR_DEFAULT_TRIGGER_LED1 (BIT(TRIGGER_NETDEV_LINK_2500) | \ + BIT(TRIGGER_NETDEV_LINK_1000)) +#define AIR_DEFAULT_TRIGGER_LED2 (BIT(TRIGGER_NETDEV_LINK_2500) | \ + BIT(TRIGGER_NETDEV_LINK_100)) + +struct led { + unsigned long rules; + unsigned long state; +}; + +struct en8811h_priv { + u32 firmware_version; + bool mcu_needs_restart; + struct led led[EN8811H_LED_COUNT]; +}; + +enum { + AIR_PHY_LED_STATE_FORCE_ON, + AIR_PHY_LED_STATE_FORCE_BLINK, +}; + +enum { + AIR_PHY_LED_DUR_BLINK_32MS, + AIR_PHY_LED_DUR_BLINK_64MS, + AIR_PHY_LED_DUR_BLINK_128MS, + AIR_PHY_LED_DUR_BLINK_256MS, + AIR_PHY_LED_DUR_BLINK_512MS, + AIR_PHY_LED_DUR_BLINK_1024MS, +}; + +enum { + AIR_LED_DISABLE, + AIR_LED_ENABLE, +}; + +enum { + AIR_ACTIVE_LOW, + AIR_ACTIVE_HIGH, +}; + +enum { + AIR_LED_MODE_DISABLE, + AIR_LED_MODE_USER_DEFINE, +}; + +#define AIR_PHY_LED_DUR_UNIT 1024 +#define AIR_PHY_LED_DUR (AIR_PHY_LED_DUR_UNIT << AIR_PHY_LED_DUR_BLINK_64MS) + +static const unsigned long en8811h_led_trig = BIT(TRIGGER_NETDEV_FULL_DUPLEX) | + BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX); + +static int air_phy_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, AIR_EXT_PAGE_ACCESS); +} + +static int air_phy_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, AIR_EXT_PAGE_ACCESS, page); +} + +static int __air_buckpbus_reg_write(struct phy_device *phydev, + u32 pbus_address, u32 pbus_data) +{ + int ret; + + ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH, + upper_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW, + lower_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH, + upper_16_bits(pbus_data)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW, + lower_16_bits(pbus_data)); + if (ret < 0) + return ret; + + return 0; +} + +static int air_buckpbus_reg_write(struct phy_device *phydev, + u32 pbus_address, u32 pbus_data) +{ + int saved_page; + int ret = 0; + + saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4); + + if (saved_page >= 0) { + ret = __air_buckpbus_reg_write(phydev, pbus_address, + pbus_data); + if (ret < 0) + phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__, + pbus_address, ret); + } + + return phy_restore_page(phydev, saved_page, ret); +} + +static int __air_buckpbus_reg_read(struct phy_device *phydev, + u32 pbus_address, u32 *pbus_data) +{ + int pbus_data_low, pbus_data_high; + int ret; + + ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH, + upper_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW, + lower_16_bits(pbus_address)); + if (ret < 0) + return ret; + + pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH); + if (pbus_data_high < 0) + return pbus_data_high; + + pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW); + if (pbus_data_low < 0) + return pbus_data_low; + + *pbus_data = pbus_data_low | (pbus_data_high << 16); + return 0; +} + +static int air_buckpbus_reg_read(struct phy_device *phydev, + u32 pbus_address, u32 *pbus_data) +{ + int saved_page; + int ret = 0; + + saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4); + + if (saved_page >= 0) { + ret = __air_buckpbus_reg_read(phydev, pbus_address, pbus_data); + if (ret < 0) + phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__, + pbus_address, ret); + } + + return phy_restore_page(phydev, saved_page, ret); +} + +static int __air_buckpbus_reg_modify(struct phy_device *phydev, + u32 pbus_address, u32 mask, u32 set) +{ + int pbus_data_low, pbus_data_high; + u32 pbus_data_old, pbus_data_new; + int ret; + + ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH, + upper_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW, + lower_16_bits(pbus_address)); + if (ret < 0) + return ret; + + pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH); + if (pbus_data_high < 0) + return pbus_data_high; + + pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW); + if (pbus_data_low < 0) + return pbus_data_low; + + pbus_data_old = pbus_data_low | (pbus_data_high << 16); + pbus_data_new = (pbus_data_old & ~mask) | set; + if (pbus_data_new == pbus_data_old) + return 0; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH, + upper_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW, + lower_16_bits(pbus_address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH, + upper_16_bits(pbus_data_new)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW, + lower_16_bits(pbus_data_new)); + if (ret < 0) + return ret; + + return 0; +} + +static int air_buckpbus_reg_modify(struct phy_device *phydev, + u32 pbus_address, u32 mask, u32 set) +{ + int saved_page; + int ret = 0; + + saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4); + + if (saved_page >= 0) { + ret = __air_buckpbus_reg_modify(phydev, pbus_address, mask, + set); + if (ret < 0) + phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__, + pbus_address, ret); + } + + return phy_restore_page(phydev, saved_page, ret); +} + +static int __air_write_buf(struct phy_device *phydev, u32 address, + const struct firmware *fw) +{ + unsigned int offset; + int ret; + u16 val; + + ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_INCR); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH, + upper_16_bits(address)); + if (ret < 0) + return ret; + + ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW, + lower_16_bits(address)); + if (ret < 0) + return ret; + + for (offset = 0; offset < fw->size; offset += 4) { + val = get_unaligned_le16(&fw->data[offset + 2]); + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH, val); + if (ret < 0) + return ret; + + val = get_unaligned_le16(&fw->data[offset]); + ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW, val); + if (ret < 0) + return ret; + } + + return 0; +} + +static int air_write_buf(struct phy_device *phydev, u32 address, + const struct firmware *fw) +{ + int saved_page; + int ret = 0; + + saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4); + + if (saved_page >= 0) { + ret = __air_write_buf(phydev, address, fw); + if (ret < 0) + phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__, + address, ret); + } + + return phy_restore_page(phydev, saved_page, ret); +} + +static int en8811h_wait_mcu_ready(struct phy_device *phydev) +{ + int ret, reg_value; + + /* Because of mdio-lock, may have to wait for multiple loads */ + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + EN8811H_PHY_FW_STATUS, reg_value, + reg_value == EN8811H_PHY_READY, + 20000, 7500000, true); + if (ret) { + phydev_err(phydev, "MCU not ready: 0x%x\n", reg_value); + return -ENODEV; + } + + return 0; +} + +static int en8811h_load_firmware(struct phy_device *phydev) +{ + struct en8811h_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + const struct firmware *fw1, *fw2; + int ret; + + ret = request_firmware_direct(&fw1, EN8811H_MD32_DM, dev); + if (ret < 0) + return ret; + + ret = request_firmware_direct(&fw2, EN8811H_MD32_DSP, dev); + if (ret < 0) + goto en8811h_load_firmware_rel1; + + ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1, + EN8811H_FW_CTRL_1_START); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2, + EN8811H_FW_CTRL_2_LOADING, + EN8811H_FW_CTRL_2_LOADING); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = air_write_buf(phydev, AIR_FW_ADDR_DM, fw1); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = air_write_buf(phydev, AIR_FW_ADDR_DSP, fw2); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2, + EN8811H_FW_CTRL_2_LOADING, 0); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1, + EN8811H_FW_CTRL_1_FINISH); + if (ret < 0) + goto en8811h_load_firmware_out; + + ret = en8811h_wait_mcu_ready(phydev); + + air_buckpbus_reg_read(phydev, EN8811H_FW_VERSION, + &priv->firmware_version); + phydev_info(phydev, "MD32 firmware version: %08x\n", + priv->firmware_version); + +en8811h_load_firmware_out: + release_firmware(fw2); + +en8811h_load_firmware_rel1: + release_firmware(fw1); + + if (ret < 0) + phydev_err(phydev, "Load firmware failed: %d\n", ret); + + return ret; +} + +static int en8811h_restart_mcu(struct phy_device *phydev) +{ + int ret; + + ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1, + EN8811H_FW_CTRL_1_START); + if (ret < 0) + return ret; + + ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1, + EN8811H_FW_CTRL_1_FINISH); + if (ret < 0) + return ret; + + return en8811h_wait_mcu_ready(phydev); +} + +static int air_hw_led_on_set(struct phy_device *phydev, u8 index, bool on) +{ + struct en8811h_priv *priv = phydev->priv; + bool changed; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + if (on) + changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_ON, + &priv->led[index].state); + else + changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_ON, + &priv->led[index].state); + + changed |= (priv->led[index].rules != 0); + + /* clear netdev trigger rules in case LED_OFF has been set */ + if (!on) + priv->led[index].rules = 0; + + if (changed) + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, + AIR_PHY_LED_ON(index), + AIR_PHY_LED_ON_MASK, + on ? AIR_PHY_LED_ON_FORCE_ON : 0); + + return 0; +} + +static int air_hw_led_blink_set(struct phy_device *phydev, u8 index, + bool blinking) +{ + struct en8811h_priv *priv = phydev->priv; + bool changed; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + if (blinking) + changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_BLINK, + &priv->led[index].state); + else + changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK, + &priv->led[index].state); + + changed |= (priv->led[index].rules != 0); + + if (changed) + return phy_write_mmd(phydev, MDIO_MMD_VEND2, + AIR_PHY_LED_BLINK(index), + blinking ? + AIR_PHY_LED_BLINK_FORCE_BLINK : 0); + else + return 0; +} + +static int air_led_blink_set(struct phy_device *phydev, u8 index, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct en8811h_priv *priv = phydev->priv; + bool blinking = false; + int err; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) { + blinking = true; + *delay_on = 50; + *delay_off = 50; + } + + err = air_hw_led_blink_set(phydev, index, blinking); + if (err) + return err; + + /* led-blink set, so switch led-on off */ + err = air_hw_led_on_set(phydev, index, false); + if (err) + return err; + + /* hw-control is off*/ + if (!!test_bit(AIR_PHY_LED_STATE_FORCE_BLINK, &priv->led[index].state)) + priv->led[index].rules = 0; + + return 0; +} + +static int air_led_brightness_set(struct phy_device *phydev, u8 index, + enum led_brightness value) +{ + struct en8811h_priv *priv = phydev->priv; + int err; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + /* led-on set, so switch led-blink off */ + err = air_hw_led_blink_set(phydev, index, false); + if (err) + return err; + + err = air_hw_led_on_set(phydev, index, (value != LED_OFF)); + if (err) + return err; + + /* hw-control is off */ + if (!!test_bit(AIR_PHY_LED_STATE_FORCE_ON, &priv->led[index].state)) + priv->led[index].rules = 0; + + return 0; +} + +static int air_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + struct en8811h_priv *priv = phydev->priv; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + *rules = priv->led[index].rules; + + return 0; +}; + +static int air_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + struct en8811h_priv *priv = phydev->priv; + u16 on = 0, blink = 0; + int ret; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + priv->led[index].rules = rules; + + if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX)) + on |= AIR_PHY_LED_ON_FDX; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK))) + on |= AIR_PHY_LED_ON_LINK10; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK))) + on |= AIR_PHY_LED_ON_LINK100; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK))) + on |= AIR_PHY_LED_ON_LINK1000; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK))) + on |= AIR_PHY_LED_ON_LINK2500; + + if (rules & BIT(TRIGGER_NETDEV_RX)) { + blink |= AIR_PHY_LED_BLINK_10RX | + AIR_PHY_LED_BLINK_100RX | + AIR_PHY_LED_BLINK_1000RX | + AIR_PHY_LED_BLINK_2500RX; + } + + if (rules & BIT(TRIGGER_NETDEV_TX)) { + blink |= AIR_PHY_LED_BLINK_10TX | + AIR_PHY_LED_BLINK_100TX | + AIR_PHY_LED_BLINK_1000TX | + AIR_PHY_LED_BLINK_2500TX; + } + + if (blink || on) { + /* switch hw-control on, so led-on and led-blink are off */ + clear_bit(AIR_PHY_LED_STATE_FORCE_ON, + &priv->led[index].state); + clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK, + &priv->led[index].state); + } else { + priv->led[index].rules = 0; + } + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index), + AIR_PHY_LED_ON_MASK, on); + + if (ret < 0) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BLINK(index), + blink); +}; + +static int air_led_init(struct phy_device *phydev, u8 index, u8 state, u8 pol) +{ + int val = 0; + int err; + + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + if (state == AIR_LED_ENABLE) + val |= AIR_PHY_LED_ON_ENABLE; + else + val &= ~AIR_PHY_LED_ON_ENABLE; + + if (pol == AIR_ACTIVE_HIGH) + val |= AIR_PHY_LED_ON_POLARITY; + else + val &= ~AIR_PHY_LED_ON_POLARITY; + + err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index), + AIR_PHY_LED_ON_ENABLE | + AIR_PHY_LED_ON_POLARITY, val); + + if (err < 0) + return err; + + return 0; +} + +static int air_leds_init(struct phy_device *phydev, int num, int dur, int mode) +{ + struct en8811h_priv *priv = phydev->priv; + int ret, i; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_BLINK, + dur); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_ON, + dur >> 1); + if (ret < 0) + return ret; + + switch (mode) { + case AIR_LED_MODE_DISABLE: + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR, + AIR_PHY_LED_BCR_EXT_CTRL | + AIR_PHY_LED_BCR_MODE_MASK, 0); + if (ret < 0) + return ret; + break; + case AIR_LED_MODE_USER_DEFINE: + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR, + AIR_PHY_LED_BCR_EXT_CTRL | + AIR_PHY_LED_BCR_CLK_EN, + AIR_PHY_LED_BCR_EXT_CTRL | + AIR_PHY_LED_BCR_CLK_EN); + if (ret < 0) + return ret; + break; + default: + phydev_err(phydev, "LED mode %d is not supported\n", mode); + return -EINVAL; + } + + for (i = 0; i < num; ++i) { + ret = air_led_init(phydev, i, AIR_LED_ENABLE, AIR_ACTIVE_HIGH); + if (ret < 0) { + phydev_err(phydev, "LED%d init failed: %d\n", i, ret); + return ret; + } + air_led_hw_control_set(phydev, i, priv->led[i].rules); + } + + return 0; +} + +static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + if (index >= EN8811H_LED_COUNT) + return -EINVAL; + + /* All combinations of the supported triggers are allowed */ + if (rules & ~en8811h_led_trig) + return -EOPNOTSUPP; + + return 0; +}; + +static int en8811h_probe(struct phy_device *phydev) +{ + struct en8811h_priv *priv; + int ret; + + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct en8811h_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + phydev->priv = priv; + + ret = en8811h_load_firmware(phydev); + if (ret < 0) + return ret; + + /* mcu has just restarted after firmware load */ + priv->mcu_needs_restart = false; + + priv->led[0].rules = AIR_DEFAULT_TRIGGER_LED0; + priv->led[1].rules = AIR_DEFAULT_TRIGGER_LED1; + priv->led[2].rules = AIR_DEFAULT_TRIGGER_LED2; + + /* MDIO_DEVS1/2 empty, so set mmds_present bits here */ + phydev->c45_ids.mmds_present |= MDIO_DEVS_PMAPMD | MDIO_DEVS_AN; + + ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR, + AIR_LED_MODE_DISABLE); + if (ret < 0) { + phydev_err(phydev, "Failed to disable leds: %d\n", ret); + return ret; + } + + /* Configure led gpio pins as output */ + ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT, + EN8811H_GPIO_OUTPUT_345, + EN8811H_GPIO_OUTPUT_345); + if (ret < 0) + return ret; + + return 0; +} + +static int en8811h_config_init(struct phy_device *phydev) +{ + struct en8811h_priv *priv = phydev->priv; + struct device *dev = &phydev->mdio.dev; + u32 pbus_value; + int ret; + + /* If restart happened in .probe(), no need to restart now */ + if (priv->mcu_needs_restart) { + ret = en8811h_restart_mcu(phydev); + if (ret < 0) + return ret; + } else { + /* Next calls to .config_init() mcu needs to restart */ + priv->mcu_needs_restart = true; + } + + /* Select mode 1, the only mode supported. + * Configures the SerDes for 2500Base-X with rate adaptation + */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_1, + AIR_PHY_MCU_CMD_1_MODE1); + if (ret < 0) + return ret; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_2, + AIR_PHY_MCU_CMD_2_MODE1); + if (ret < 0) + return ret; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3, + AIR_PHY_MCU_CMD_3_MODE1); + if (ret < 0) + return ret; + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4, + AIR_PHY_MCU_CMD_4_MODE1); + if (ret < 0) + return ret; + + /* Serdes polarity */ + pbus_value = 0; + if (device_property_read_bool(dev, "airoha,pnswap-rx")) + pbus_value |= EN8811H_POLARITY_RX_REVERSE; + else + pbus_value &= ~EN8811H_POLARITY_RX_REVERSE; + if (device_property_read_bool(dev, "airoha,pnswap-tx")) + pbus_value &= ~EN8811H_POLARITY_TX_NORMAL; + else + pbus_value |= EN8811H_POLARITY_TX_NORMAL; + ret = air_buckpbus_reg_modify(phydev, EN8811H_POLARITY, + EN8811H_POLARITY_RX_REVERSE | + EN8811H_POLARITY_TX_NORMAL, pbus_value); + if (ret < 0) + return ret; + + ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR, + AIR_LED_MODE_USER_DEFINE); + if (ret < 0) { + phydev_err(phydev, "Failed to initialize leds: %d\n", ret); + return ret; + } + + return 0; +} + +static int en8811h_get_features(struct phy_device *phydev) +{ + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + +static int en8811h_get_rate_matching(struct phy_device *phydev, + phy_interface_t iface) +{ + return RATE_MATCH_PAUSE; +} + +static int en8811h_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret; + u32 adv; + + if (phydev->autoneg == AUTONEG_DISABLE) { + phydev_warn(phydev, "Disabling autoneg is not supported\n"); + return -EINVAL; + } + + adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV2_5G, adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return __genphy_config_aneg(phydev, changed); +} + +static int en8811h_read_status(struct phy_device *phydev) +{ + struct en8811h_priv *priv = phydev->priv; + u32 pbus_value; + int ret, val; + + ret = genphy_update_link(phydev); + if (ret) + return ret; + + phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + phydev->rate_matching = RATE_MATCH_PAUSE; + + ret = genphy_read_master_slave(phydev); + if (ret < 0) + return ret; + + ret = genphy_read_lpa(phydev); + if (ret < 0) + return ret; + + /* Get link partner 2.5GBASE-T ability from vendor register */ + ret = air_buckpbus_reg_read(phydev, EN8811H_2P5G_LPA, &pbus_value); + if (ret < 0) + return ret; + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->lp_advertising, + pbus_value & EN8811H_2P5G_LPA_2P5G); + + if (phydev->autoneg_complete) + phy_resolve_aneg_pause(phydev); + + if (!phydev->link) + return 0; + + /* Get real speed from vendor register */ + val = phy_read(phydev, AIR_AUX_CTRL_STATUS); + if (val < 0) + return val; + switch (val & AIR_AUX_CTRL_STATUS_SPEED_MASK) { + case AIR_AUX_CTRL_STATUS_SPEED_2500: + phydev->speed = SPEED_2500; + break; + case AIR_AUX_CTRL_STATUS_SPEED_1000: + phydev->speed = SPEED_1000; + break; + case AIR_AUX_CTRL_STATUS_SPEED_100: + phydev->speed = SPEED_100; + break; + } + + /* Firmware before version 24011202 has no vendor register 2P5G_LPA. + * Assume link partner advertised it if connected at 2500Mbps. + */ + if (priv->firmware_version < 0x24011202) { + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->lp_advertising, + phydev->speed == SPEED_2500); + } + + /* Only supports full duplex */ + phydev->duplex = DUPLEX_FULL; + + return 0; +} + +static int en8811h_clear_intr(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3, + AIR_PHY_MCU_CMD_3_DOCMD); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4, + AIR_PHY_MCU_CMD_4_INTCLR); + if (ret < 0) + return ret; + + return 0; +} + +static irqreturn_t en8811h_handle_interrupt(struct phy_device *phydev) +{ + int ret; + + ret = en8811h_clear_intr(phydev); + if (ret < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static struct phy_driver en8811h_driver[] = { +{ + PHY_ID_MATCH_MODEL(EN8811H_PHY_ID), + .name = "Airoha EN8811H", + .probe = en8811h_probe, + .get_features = en8811h_get_features, + .config_init = en8811h_config_init, + .get_rate_matching = en8811h_get_rate_matching, + .config_aneg = en8811h_config_aneg, + .read_status = en8811h_read_status, + .config_intr = en8811h_clear_intr, + .handle_interrupt = en8811h_handle_interrupt, + .led_hw_is_supported = en8811h_led_hw_is_supported, + .read_page = air_phy_read_page, + .write_page = air_phy_write_page, + .led_blink_set = air_led_blink_set, + .led_brightness_set = air_led_brightness_set, + .led_hw_control_set = air_led_hw_control_set, + .led_hw_control_get = air_led_hw_control_get, +} }; + +module_phy_driver(en8811h_driver); + +static struct mdio_device_id __maybe_unused en8811h_tbl[] = { + { PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, en8811h_tbl); +MODULE_FIRMWARE(EN8811H_MD32_DM); +MODULE_FIRMWARE(EN8811H_MD32_DSP); + +MODULE_DESCRIPTION("Airoha EN8811H PHY drivers"); +MODULE_AUTHOR("Airoha"); +MODULE_AUTHOR("Eric Woudstra <[email protected]>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index 71bfddb8f453..d34cdec47636 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -28,6 +28,7 @@ #define PHY_ID_AQR412 0x03a1b712 #define PHY_ID_AQR113 0x31c31c40 #define PHY_ID_AQR113C 0x31c31c12 +#define PHY_ID_AQR114C 0x31c31c22 #define PHY_ID_AQR813 0x31c31cb2 #define MDIO_PHYXS_VEND_IF_STATUS 0xe812 @@ -963,6 +964,25 @@ static struct phy_driver aqr_driver[] = { .link_change_notify = aqr107_link_change_notify, }, { + PHY_ID_MATCH_MODEL(PHY_ID_AQR114C), + .name = "Aquantia AQR114C", + .probe = aqr107_probe, + .get_rate_matching = aqr107_get_rate_matching, + .config_init = aqr111_config_init, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .handle_interrupt = aqr_handle_interrupt, + .read_status = aqr107_read_status, + .get_tunable = aqr107_get_tunable, + .set_tunable = aqr107_set_tunable, + .suspend = aqr107_suspend, + .resume = aqr107_resume, + .get_sset_count = aqr107_get_sset_count, + .get_strings = aqr107_get_strings, + .get_stats = aqr107_get_stats, + .link_change_notify = aqr107_link_change_notify, +}, +{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813), .name = "Aquantia AQR813", .probe = aqr107_probe, @@ -999,6 +1019,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR412) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) }, { } }; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index c3426a17e6d0..efeb643c1373 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -140,10 +140,11 @@ struct dp83822_private { u16 fx_sd_enable; u8 cfg_dac_minus; u8 cfg_dac_plus; + struct ethtool_wolinfo wol; }; -static int dp83822_set_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) +static int dp83822_config_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) { struct net_device *ndev = phydev->attached_dev; u16 value; @@ -197,10 +198,25 @@ static int dp83822_set_wol(struct phy_device *phydev, MII_DP83822_WOL_CFG, value); } else { return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_WOL_CFG, DP83822_WOL_EN); + MII_DP83822_WOL_CFG, + DP83822_WOL_EN | + DP83822_WOL_MAGIC_EN | + DP83822_WOL_SECURE_ON); } } +static int dp83822_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct dp83822_private *dp83822 = phydev->priv; + int ret; + + ret = dp83822_config_wol(phydev, wol); + if (!ret) + memcpy(&dp83822->wol, wol, sizeof(*wol)); + return ret; +} + static void dp83822_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { @@ -346,13 +362,6 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) return IRQ_HANDLED; } -static int dp8382x_disable_wol(struct phy_device *phydev) -{ - return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, - DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | - DP83822_WOL_SECURE_ON); -} - static int dp83822_read_status(struct phy_device *phydev) { struct dp83822_private *dp83822 = phydev->priv; @@ -496,7 +505,7 @@ static int dp83822_config_init(struct phy_device *phydev) return err; } } - return dp8382x_disable_wol(phydev); + return dp83822_config_wol(phydev, &dp83822->wol); } static int dp83826_config_rmii_mode(struct phy_device *phydev) @@ -575,12 +584,14 @@ static int dp83826_config_init(struct phy_device *phydev) return ret; } - return dp8382x_disable_wol(phydev); + return dp83822_config_wol(phydev, &dp83822->wol); } static int dp8382x_config_init(struct phy_device *phydev) { - return dp8382x_disable_wol(phydev); + struct dp83822_private *dp83822 = phydev->priv; + + return dp83822_config_wol(phydev, &dp83822->wol); } static int dp83822_phy_reset(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c index 6b4bd9883304..c812f16eaa3a 100644 --- a/drivers/net/phy/marvell-88q2xxx.c +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -12,6 +12,8 @@ #include <linux/hwmon.h> #define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1) +#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2) +#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3) #define MDIO_MMD_AN_MV_STAT 32769 #define MDIO_MMD_AN_MV_STAT_ANEG 0x0100 @@ -129,6 +131,49 @@ static const struct mmd_val mv88q222x_revb0_init_seq1[] = { { MDIO_MMD_PCS, 0xfe05, 0x755c }, }; +static const struct mmd_val mv88q222x_revb1_init_seq0[] = { + { MDIO_MMD_PCS, 0xffe4, 0x0007 }, + { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 }, + { MDIO_MMD_PCS, 0xffe3, 0x7000 }, + { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 }, +}; + +static const struct mmd_val mv88q222x_revb2_init_seq0[] = { + { MDIO_MMD_PCS, 0xffe4, 0x0007 }, + { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 }, + { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 }, +}; + +static const struct mmd_val mv88q222x_revb1_revb2_init_seq1[] = { + { MDIO_MMD_PCS, 0xfe07, 0x125a }, + { MDIO_MMD_PCS, 0xfe09, 0x1288 }, + { MDIO_MMD_PCS, 0xfe08, 0x2588 }, + { MDIO_MMD_PCS, 0xfe72, 0x042c }, + { MDIO_MMD_PCS, 0xffe4, 0x0071 }, + { MDIO_MMD_PCS, 0xffe4, 0x0001 }, + { MDIO_MMD_PCS, 0xfe1b, 0x0048 }, + { MDIO_MMD_PMAPMD, 0x0000, 0x0000 }, + { MDIO_MMD_PCS, 0x0000, 0x0000 }, + { MDIO_MMD_PCS, 0xffdb, 0xfc10 }, + { MDIO_MMD_PCS, 0xfe1b, 0x58 }, + { MDIO_MMD_PCS, 0xfcad, 0x030c }, + { MDIO_MMD_PCS, 0x8032, 0x6001 }, + { MDIO_MMD_PCS, 0xfdff, 0x05a5 }, + { MDIO_MMD_PCS, 0xfdec, 0xdbaf }, + { MDIO_MMD_PCS, 0xfcab, 0x1054 }, + { MDIO_MMD_PCS, 0xfcac, 0x1483 }, + { MDIO_MMD_PCS, 0x8033, 0xc801 }, + { MDIO_MMD_AN, 0x8032, 0x2020 }, + { MDIO_MMD_AN, 0x8031, 0xa28 }, + { MDIO_MMD_AN, 0x8031, 0xc28 }, + { MDIO_MMD_PCS, 0xfbba, 0x0cb2 }, + { MDIO_MMD_PCS, 0xfbbb, 0x0c4a }, + { MDIO_MMD_PCS, 0xfe5f, 0xe8 }, + { MDIO_MMD_PCS, 0xfe05, 0x755c }, + { MDIO_MMD_PCS, 0xfa20, 0x002a }, + { MDIO_MMD_PCS, 0xfe11, 0x1105 }, +}; + static int mv88q2xxx_soft_reset(struct phy_device *phydev) { int ret; @@ -687,31 +732,72 @@ static int mv88q222x_soft_reset(struct phy_device *phydev) return 0; } -static int mv88q222x_revb0_config_init(struct phy_device *phydev) +static int mv88q222x_write_mmd_vals(struct phy_device *phydev, + const struct mmd_val *vals, size_t len) { - int ret, i; + int ret; - for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) { - ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad, - mv88q222x_revb0_init_seq0[i].regnum, - mv88q222x_revb0_init_seq0[i].val); + for (; len; vals++, len--) { + ret = phy_write_mmd(phydev, vals->devad, vals->regnum, + vals->val); if (ret < 0) return ret; } + return 0; +} + +static int mv88q222x_revb0_config_init(struct phy_device *phydev) +{ + int ret; + + ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0, + ARRAY_SIZE(mv88q222x_revb0_init_seq0)); + if (ret < 0) + return ret; + usleep_range(5000, 10000); - for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) { - ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad, - mv88q222x_revb0_init_seq1[i].regnum, - mv88q222x_revb0_init_seq1[i].val); - if (ret < 0) - return ret; - } + ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1, + ARRAY_SIZE(mv88q222x_revb0_init_seq1)); + if (ret < 0) + return ret; + + return mv88q2xxx_config_init(phydev); +} + +static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev) +{ + bool is_rev_b1 = phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB1; + int ret; + + if (is_rev_b1) + ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0, + ARRAY_SIZE(mv88q222x_revb1_init_seq0)); + else + ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0, + ARRAY_SIZE(mv88q222x_revb2_init_seq0)); + if (ret < 0) + return ret; + + usleep_range(3000, 5000); + + ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1, + ARRAY_SIZE(mv88q222x_revb1_revb2_init_seq1)); + if (ret < 0) + return ret; return mv88q2xxx_config_init(phydev); } +static int mv88q222x_config_init(struct phy_device *phydev) +{ + if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0) + return mv88q222x_revb0_config_init(phydev); + else + return mv88q222x_revb1_revb2_config_init(phydev); +} + static int mv88q222x_cable_test_start(struct phy_device *phydev) { int ret; @@ -810,14 +896,15 @@ static struct phy_driver mv88q2xxx_driver[] = { .get_sqi_max = mv88q2xxx_get_sqi_max, }, { - PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), + .phy_id = MARVELL_PHY_ID_88Q2220, + .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88q2220", .flags = PHY_POLL_CABLE_TEST, .probe = mv88q2xxx_probe, .get_features = mv88q2xxx_get_features, .config_aneg = mv88q2xxx_config_aneg, .aneg_done = genphy_c45_aneg_done, - .config_init = mv88q222x_revb0_config_init, + .config_init = mv88q222x_config_init, .read_status = mv88q2xxx_read_status, .soft_reset = mv88q222x_soft_reset, .config_intr = mv88q2xxx_config_intr, @@ -836,7 +923,7 @@ module_phy_driver(mv88q2xxx_driver); static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = { { MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK }, - { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), }, + { MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK }, { /*sentinel*/ } }; MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 42ed013385bf..b89fbffa6a93 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -279,10 +279,29 @@ #define MII_VCT7_CTRL_METERS BIT(10) #define MII_VCT7_CTRL_CENTIMETERS 0 +#define MII_VCT_TXPINS 0x1A +#define MII_VCT_RXPINS 0x1B +#define MII_VCT_SR 0x1C +#define MII_VCT_TXPINS_ENVCT BIT(15) +#define MII_VCT_TXRXPINS_VCTTST GENMASK(14, 13) +#define MII_VCT_TXRXPINS_VCTTST_SHIFT 13 +#define MII_VCT_TXRXPINS_VCTTST_OK 0 +#define MII_VCT_TXRXPINS_VCTTST_SHORT 1 +#define MII_VCT_TXRXPINS_VCTTST_OPEN 2 +#define MII_VCT_TXRXPINS_VCTTST_FAIL 3 +#define MII_VCT_TXRXPINS_AMPRFLN GENMASK(12, 8) +#define MII_VCT_TXRXPINS_AMPRFLN_SHIFT 8 +#define MII_VCT_TXRXPINS_DISTRFLN GENMASK(7, 0) +#define MII_VCT_TXRXPINS_DISTRFLN_MAX 0xff + +#define M88E3082_PAIR_A BIT(0) +#define M88E3082_PAIR_B BIT(1) + #define LPA_PAUSE_FIBER 0x180 #define LPA_PAUSE_ASYM_FIBER 0x100 #define NB_FIBER_STATS 1 +#define NB_STAT_MAX 3 MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); @@ -295,14 +314,37 @@ struct marvell_hw_stat { u8 bits; }; -static struct marvell_hw_stat marvell_hw_stats[] = { +static const struct marvell_hw_stat marvell_hw_stats[] = { { "phy_receive_errors_copper", 0, 21, 16}, { "phy_idle_errors", 0, 10, 8 }, { "phy_receive_errors_fiber", 1, 21, 16}, }; +static_assert(ARRAY_SIZE(marvell_hw_stats) <= NB_STAT_MAX); + +/* "simple" stat list + corresponding marvell_get_*_simple functions are used + * on PHYs without a page register + */ +struct marvell_hw_stat_simple { + const char *string; + u8 reg; + u8 bits; +}; + +static const struct marvell_hw_stat_simple marvell_hw_stats_simple[] = { + { "phy_receive_errors", 21, 16}, +}; + +static_assert(ARRAY_SIZE(marvell_hw_stats_simple) <= NB_STAT_MAX); + +enum { + M88E3082_VCT_OFF, + M88E3082_VCT_PHASE1, + M88E3082_VCT_PHASE2, +}; + struct marvell_priv { - u64 stats[ARRAY_SIZE(marvell_hw_stats)]; + u64 stats[NB_STAT_MAX]; char *hwmon_name; struct device *hwmon_dev; bool cable_test_tdr; @@ -310,6 +352,7 @@ struct marvell_priv { u32 last; u32 step; s8 pair; + u8 vct_phase; }; static int marvell_read_page(struct phy_device *phydev) @@ -1953,6 +1996,11 @@ static int marvell_get_sset_count(struct phy_device *phydev) return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS; } +static int marvell_get_sset_count_simple(struct phy_device *phydev) +{ + return ARRAY_SIZE(marvell_hw_stats_simple); +} + static void marvell_get_strings(struct phy_device *phydev, u8 *data) { int count = marvell_get_sset_count(phydev); @@ -1964,6 +2012,17 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } } +static void marvell_get_strings_simple(struct phy_device *phydev, u8 *data) +{ + int count = marvell_get_sset_count_simple(phydev); + int i; + + for (i = 0; i < count; i++) { + strscpy(data + i * ETH_GSTRING_LEN, + marvell_hw_stats_simple[i].string, ETH_GSTRING_LEN); + } +} + static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; @@ -1983,6 +2042,25 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) return ret; } +static u64 marvell_get_stat_simple(struct phy_device *phydev, int i) +{ + struct marvell_hw_stat_simple stat = marvell_hw_stats_simple[i]; + struct marvell_priv *priv = phydev->priv; + int val; + u64 ret; + + val = phy_read(phydev, stat.reg); + if (val < 0) { + ret = U64_MAX; + } else { + val = val & ((1 << stat.bits) - 1); + priv->stats[i] += val; + ret = priv->stats[i]; + } + + return ret; +} + static void marvell_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data) { @@ -1993,6 +2071,16 @@ static void marvell_get_stats(struct phy_device *phydev, data[i] = marvell_get_stat(phydev, i); } +static void marvell_get_stats_simple(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + int count = marvell_get_sset_count_simple(phydev); + int i; + + for (i = 0; i < count; i++) + data[i] = marvell_get_stat_simple(phydev, i); +} + static int m88e1510_loopback(struct phy_device *phydev, bool enable) { int err; @@ -2417,6 +2505,274 @@ static int marvell_vct7_cable_test_get_status(struct phy_device *phydev, return 0; } +static int m88e3082_vct_cable_test_start(struct phy_device *phydev) +{ + struct marvell_priv *priv = phydev->priv; + int ret; + + /* It needs some magic workarounds described in VCT manual for this PHY. + */ + ret = phy_write(phydev, 29, 0x0003); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x6440); + if (ret < 0) + return ret; + + if (priv->vct_phase == M88E3082_VCT_PHASE1) { + ret = phy_write(phydev, 29, 0x000a); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x0002); + if (ret < 0) + return ret; + } + + ret = phy_write(phydev, MII_BMCR, + BMCR_RESET | BMCR_SPEED100 | BMCR_FULLDPLX); + if (ret < 0) + return ret; + + ret = phy_write(phydev, MII_VCT_TXPINS, MII_VCT_TXPINS_ENVCT); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 29, 0x0003); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x0); + if (ret < 0) + return ret; + + if (priv->vct_phase == M88E3082_VCT_OFF) { + priv->vct_phase = M88E3082_VCT_PHASE1; + priv->pair = 0; + + return 0; + } + + ret = phy_write(phydev, 29, 0x000a); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x0); + if (ret < 0) + return ret; + + priv->vct_phase = M88E3082_VCT_PHASE2; + + return 0; +} + +static int m88e3082_vct_cable_test_report_trans(int result, u8 distance) +{ + switch (result) { + case MII_VCT_TXRXPINS_VCTTST_OK: + if (distance == MII_VCT_TXRXPINS_DISTRFLN_MAX) + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + return ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH; + case MII_VCT_TXRXPINS_VCTTST_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case MII_VCT_TXRXPINS_VCTTST_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } +} + +static u32 m88e3082_vct_distrfln_2_cm(u8 distrfln) +{ + if (distrfln < 24) + return 0; + + /* Original function for meters: y = 0.7861x - 18.862 */ + return (7861 * distrfln - 188620) / 100; +} + +static int m88e3082_vct_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + u8 tx_vcttst_res, rx_vcttst_res, tx_distrfln, rx_distrfln; + struct marvell_priv *priv = phydev->priv; + int ret, tx_result, rx_result; + bool done_phase = true; + + *finished = false; + + ret = phy_read(phydev, MII_VCT_TXPINS); + if (ret < 0) + return ret; + else if (ret & MII_VCT_TXPINS_ENVCT) + return 0; + + tx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN; + tx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >> + MII_VCT_TXRXPINS_VCTTST_SHIFT; + + ret = phy_read(phydev, MII_VCT_RXPINS); + if (ret < 0) + return ret; + + rx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN; + rx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >> + MII_VCT_TXRXPINS_VCTTST_SHIFT; + + *finished = true; + + switch (priv->vct_phase) { + case M88E3082_VCT_PHASE1: + tx_result = m88e3082_vct_cable_test_report_trans(tx_vcttst_res, + tx_distrfln); + rx_result = m88e3082_vct_cable_test_report_trans(rx_vcttst_res, + rx_distrfln); + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + tx_result); + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B, + rx_result); + + if (tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) { + done_phase = false; + priv->pair |= M88E3082_PAIR_A; + } else if (tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) { + u8 pair = ETHTOOL_A_CABLE_PAIR_A; + u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln); + + ethnl_cable_test_fault_length(phydev, pair, cm); + } + + if (rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) { + done_phase = false; + priv->pair |= M88E3082_PAIR_B; + } else if (rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) { + u8 pair = ETHTOOL_A_CABLE_PAIR_B; + u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln); + + ethnl_cable_test_fault_length(phydev, pair, cm); + } + + break; + case M88E3082_VCT_PHASE2: + if (priv->pair & M88E3082_PAIR_A && + tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN && + tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) { + u8 pair = ETHTOOL_A_CABLE_PAIR_A; + u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln); + + ethnl_cable_test_fault_length(phydev, pair, cm); + } + if (priv->pair & M88E3082_PAIR_B && + rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN && + rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) { + u8 pair = ETHTOOL_A_CABLE_PAIR_B; + u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln); + + ethnl_cable_test_fault_length(phydev, pair, cm); + } + + break; + default: + return -EINVAL; + } + + if (!done_phase) { + *finished = false; + return m88e3082_vct_cable_test_start(phydev); + } + if (*finished) + priv->vct_phase = M88E3082_VCT_OFF; + return 0; +} + +static int m88e1111_vct_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = marvell_cable_test_start_common(phydev); + if (ret) + return ret; + + /* It needs some magic workarounds described in VCT manual for this PHY. + */ + ret = phy_write(phydev, 29, 0x0018); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x00c2); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x00ca); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x00c2); + if (ret < 0) + return ret; + + ret = phy_write_paged(phydev, MII_MARVELL_COPPER_PAGE, MII_VCT_SR, + MII_VCT_TXPINS_ENVCT); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 29, 0x0018); + if (ret < 0) + return ret; + + ret = phy_write(phydev, 30, 0x0042); + if (ret < 0) + return ret; + + return 0; +} + +static u32 m88e1111_vct_distrfln_2_cm(u8 distrfln) +{ + if (distrfln < 36) + return 0; + + /* Original function for meters: y = 0.8018x - 28.751 */ + return (8018 * distrfln - 287510) / 100; +} + +static int m88e1111_vct_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + u8 vcttst_res, distrfln; + int ret, result; + + *finished = false; + + /* Each pair use one page: A-0, B-1, C-2, D-3 */ + for (u8 i = 0; i < 4; i++) { + ret = phy_read_paged(phydev, i, MII_VCT_SR); + if (ret < 0) + return ret; + else if (i == 0 && ret & MII_VCT_TXPINS_ENVCT) + return 0; + + distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN; + vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >> + MII_VCT_TXRXPINS_VCTTST_SHIFT; + + result = m88e3082_vct_cable_test_report_trans(vcttst_res, + distrfln); + ethnl_cable_test_result(phydev, i, result); + + if (distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) { + u32 cm = m88e1111_vct_distrfln_2_cm(distrfln); + + ethnl_cable_test_fault_length(phydev, i, cm); + } + } + + *finished = true; + return 0; +} + #ifdef CONFIG_HWMON struct marvell_hwmon_ops { int (*config)(struct phy_device *phydev); @@ -3290,6 +3646,20 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, }, { + .phy_id = MARVELL_PHY_ID_88E3082, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E308X/88E609X Family", + /* PHY_BASIC_FEATURES */ + .probe = marvell_probe, + .config_init = marvell_config_init, + .aneg_done = marvell_aneg_done, + .read_status = marvell_read_status, + .resume = genphy_resume, + .suspend = genphy_suspend, + .cable_test_start = m88e3082_vct_cable_test_start, + .cable_test_get_status = m88e3082_vct_cable_test_get_status, + }, + { .phy_id = MARVELL_PHY_ID_88E1112, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1112", @@ -3314,6 +3684,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1111", /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, .probe = marvell_probe, .config_init = m88e1111gbe_config_init, .config_aneg = m88e1111_config_aneg, @@ -3329,6 +3700,8 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, .get_tunable = m88e1111_get_tunable, .set_tunable = m88e1111_set_tunable, + .cable_test_start = m88e1111_vct_cable_test_start, + .cable_test_get_status = m88e1111_vct_cable_test_get_status, }, { .phy_id = MARVELL_PHY_ID_88E1111_FINISAR, @@ -3422,6 +3795,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1145", /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, .probe = marvell_probe, .config_init = m88e1145_config_init, .config_aneg = m88e1101_config_aneg, @@ -3436,6 +3810,8 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, .get_tunable = m88e1111_get_tunable, .set_tunable = m88e1111_set_tunable, + .cable_test_start = m88e1111_vct_cable_test_start, + .cable_test_get_status = m88e1111_vct_cable_test_get_status, }, { .phy_id = MARVELL_PHY_ID_88E1149R, @@ -3610,6 +3986,21 @@ static struct phy_driver marvell_drivers[] = { .get_stats = marvell_get_stats, }, { + .phy_id = MARVELL_PHY_ID_88E6250_FAMILY, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E6250 Family", + /* PHY_BASIC_FEATURES */ + .probe = marvell_probe, + .aneg_done = marvell_aneg_done, + .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .get_sset_count = marvell_get_sset_count_simple, + .get_strings = marvell_get_strings_simple, + .get_stats = marvell_get_stats_simple, + }, + { .phy_id = MARVELL_PHY_ID_88E6341_FAMILY, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6341 Family", @@ -3742,6 +4133,7 @@ module_phy_driver(marvell_drivers); static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK }, @@ -3756,6 +4148,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E6250_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6393_FAMILY, MARVELL_PHY_ID_MASK }, diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c index a493ae01b267..54ea64a37ab3 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek-ge.c @@ -23,9 +23,6 @@ static int mtk_gephy_write_page(struct phy_device *phydev, int page) static void mtk_gephy_config_init(struct phy_device *phydev) { - /* Disable EEE */ - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); - /* Enable HW auto downshift */ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4)); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ddb50a0e2bc8..13e30ea7eec5 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -167,6 +167,9 @@ #define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5) #define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6) +#define PTP_COMMON_INT_ENA 0x0204 +#define PTP_COMMON_INT_ENA_GPIO_CAP_EN BIT(2) + #define PTP_CLOCK_SET_SEC_HI 0x0205 #define PTP_CLOCK_SET_SEC_MID 0x0206 #define PTP_CLOCK_SET_SEC_LO 0x0207 @@ -179,6 +182,27 @@ #define PTP_CLOCK_READ_NS_HI 0x022C #define PTP_CLOCK_READ_NS_LO 0x022D +#define PTP_GPIO_SEL 0x0230 +#define PTP_GPIO_SEL_GPIO_SEL(pin) ((pin) << 8) +#define PTP_GPIO_CAP_MAP_LO 0x0232 + +#define PTP_GPIO_CAP_EN 0x0233 +#define PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(gpio) BIT(gpio) +#define PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(gpio) (BIT(gpio) << 8) + +#define PTP_GPIO_RE_LTC_SEC_HI_CAP 0x0235 +#define PTP_GPIO_RE_LTC_SEC_LO_CAP 0x0236 +#define PTP_GPIO_RE_LTC_NS_HI_CAP 0x0237 +#define PTP_GPIO_RE_LTC_NS_LO_CAP 0x0238 +#define PTP_GPIO_FE_LTC_SEC_HI_CAP 0x0239 +#define PTP_GPIO_FE_LTC_SEC_LO_CAP 0x023A +#define PTP_GPIO_FE_LTC_NS_HI_CAP 0x023B +#define PTP_GPIO_FE_LTC_NS_LO_CAP 0x023C + +#define PTP_GPIO_CAP_STS 0x023D +#define PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(gpio) BIT(gpio) +#define PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(gpio) (BIT(gpio) << 8) + #define PTP_OPERATING_MODE 0x0241 #define PTP_OPERATING_MODE_STANDALONE_ BIT(0) @@ -272,6 +296,67 @@ #define PS_TO_REG 200 #define FIFO_SIZE 8 +#define LAN8814_PTP_GPIO_NUM 24 +#define LAN8814_PTP_PEROUT_NUM 2 +#define LAN8814_PTP_EXTTS_NUM 3 + +#define LAN8814_BUFFER_TIME 2 + +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS 13 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS 12 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS 11 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS 10 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS 9 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS 8 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US 7 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US 6 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US 5 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US 4 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US 3 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US 2 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS 1 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS 0 + +#define LAN8814_GPIO_EN1 0x20 +#define LAN8814_GPIO_EN2 0x21 +#define LAN8814_GPIO_DIR1 0x22 +#define LAN8814_GPIO_DIR2 0x23 +#define LAN8814_GPIO_BUF1 0x24 +#define LAN8814_GPIO_BUF2 0x25 + +#define LAN8814_GPIO_EN_ADDR(pin) \ + ((pin) > 15 ? LAN8814_GPIO_EN1 : LAN8814_GPIO_EN2) +#define LAN8814_GPIO_EN_BIT(pin) BIT(pin) +#define LAN8814_GPIO_DIR_ADDR(pin) \ + ((pin) > 15 ? LAN8814_GPIO_DIR1 : LAN8814_GPIO_DIR2) +#define LAN8814_GPIO_DIR_BIT(pin) BIT(pin) +#define LAN8814_GPIO_BUF_ADDR(pin) \ + ((pin) > 15 ? LAN8814_GPIO_BUF1 : LAN8814_GPIO_BUF2) +#define LAN8814_GPIO_BUF_BIT(pin) BIT(pin) + +#define LAN8814_EVENT_A 0 +#define LAN8814_EVENT_B 1 + +#define LAN8814_PTP_GENERAL_CONFIG 0x0201 +#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) \ + ((event) ? GENMASK(11, 8) : GENMASK(7, 4)) +#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, value) \ + (((value) & GENMASK(3, 0)) << (4 + ((event) << 2))) +#define LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) \ + ((event) ? BIT(2) : BIT(0)) +#define LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event) \ + ((event) ? BIT(3) : BIT(1)) + +#define LAN8814_PTP_CLOCK_TARGET_SEC_HI(event) ((event) ? 0x21F : 0x215) +#define LAN8814_PTP_CLOCK_TARGET_SEC_LO(event) ((event) ? 0x220 : 0x216) +#define LAN8814_PTP_CLOCK_TARGET_NS_HI(event) ((event) ? 0x221 : 0x217) +#define LAN8814_PTP_CLOCK_TARGET_NS_LO(event) ((event) ? 0x222 : 0x218) + +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event) ((event) ? 0x223 : 0x219) +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event) ((event) ? 0x224 : 0x21A) +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event) ((event) ? 0x225 : 0x21B) +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event) ((event) ? 0x226 : 0x21C) + /* Delay used to get the second part from the LTC */ #define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC) @@ -304,13 +389,9 @@ struct lan8814_shared_priv { struct phy_device *phydev; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; + struct ptp_pin_desc *pin_config; - /* Reference counter to how many ports in the package are enabling the - * timestamping - */ - u8 ref; - - /* Lock for ptp_clock and ref */ + /* Lock for ptp_clock */ struct mutex shared_lock; }; @@ -2426,8 +2507,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, { struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); - struct phy_device *phydev = ptp_priv->phydev; - struct lan8814_shared_priv *shared = phydev->shared->priv; struct lan8814_ptp_rx_ts *rx_ts, *tmp; int txcfg = 0, rxcfg = 0; int pkt_ts_enable; @@ -2492,20 +2571,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts, else lan8814_config_ts_intr(ptp_priv->phydev, false); - mutex_lock(&shared->shared_lock); - if (config->rx_filter != HWTSTAMP_FILTER_NONE) - shared->ref++; - else - shared->ref--; - - if (shared->ref) - lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL, - PTP_CMD_CTL_PTP_ENABLE_); - else - lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL, - PTP_CMD_CTL_PTP_DISABLE_); - mutex_unlock(&shared->shared_lock); - /* In case of multiple starts and stops, these needs to be cleared */ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) { list_del(&rx_ts->list); @@ -2677,6 +2742,29 @@ static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci, return 0; } +static void lan8814_ptp_set_target(struct phy_device *phydev, int event, + s64 start_sec, u32 start_nsec) +{ + /* Set the start time */ + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event), + lower_16_bits(start_sec)); + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event), + upper_16_bits(start_sec)); + + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event), + lower_16_bits(start_nsec)); + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event), + upper_16_bits(start_nsec) & 0x3fff); +} + +static void lan8814_ptp_update_target(struct phy_device *phydev, time64_t sec) +{ + lan8814_ptp_set_target(phydev, LAN8814_EVENT_A, + sec + LAN8814_BUFFER_TIME, 0); + lan8814_ptp_set_target(phydev, LAN8814_EVENT_B, + sec + LAN8814_BUFFER_TIME, 0); +} + static void lan8814_ptp_clock_step(struct phy_device *phydev, s64 time_step_ns) { @@ -2698,6 +2786,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev, nano_seconds -= 1000000000; } lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds); + lan8814_ptp_update_target(phydev, set_seconds); return; } else if (time_step_ns < -15000000000LL) { /* convert to clock set */ @@ -2713,6 +2802,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev, } nano_seconds -= nano_seconds_step; lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds); + lan8814_ptp_update_target(phydev, set_seconds); return; } @@ -2749,6 +2839,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev, } while (seconds) { + u32 nsec; + if (seconds > 0) { u32 adjustment_value = (u32)seconds; u16 adjustment_value_lo, adjustment_value_hi; @@ -2765,6 +2857,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev, PTP_LTC_STEP_ADJ_DIR_ | adjustment_value_hi); seconds -= ((s32)adjustment_value); + + lan8814_ptp_clock_get(phydev, &set_seconds, &nsec); + set_seconds -= adjustment_value; + lan8814_ptp_update_target(phydev, set_seconds); } else { u32 adjustment_value = (u32)(-seconds); u16 adjustment_value_lo, adjustment_value_hi; @@ -2780,6 +2876,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev, lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI, adjustment_value_hi); seconds += ((s32)adjustment_value); + + lan8814_ptp_clock_get(phydev, &set_seconds, &nsec); + set_seconds += adjustment_value; + lan8814_ptp_update_target(phydev, set_seconds); } lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_LTC_STEP_SEC_); @@ -2845,6 +2945,335 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm) return 0; } +static void lan8814_ptp_set_reload(struct phy_device *phydev, int event, + s64 period_sec, u32 period_nsec) +{ + lanphy_write_page_reg(phydev, 4, + LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event), + lower_16_bits(period_sec)); + lanphy_write_page_reg(phydev, 4, + LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event), + upper_16_bits(period_sec)); + + lanphy_write_page_reg(phydev, 4, + LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event), + lower_16_bits(period_nsec)); + lanphy_write_page_reg(phydev, 4, + LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event), + upper_16_bits(period_nsec) & 0x3fff); +} + +static void lan8814_ptp_enable_event(struct phy_device *phydev, int event, + int pulse_width) +{ + u16 val; + + val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG); + /* Set the pulse width of the event */ + val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event)); + /* Make sure that the target clock will be incremented each time when + * local time reaches or pass it + */ + val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width); + val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event)); + /* Set the polarity high */ + val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event); + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val); +} + +static void lan8814_ptp_disable_event(struct phy_device *phydev, int event) +{ + u16 val; + + /* Set target to too far in the future, effectively disabling it */ + lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0); + + /* And then reload once it recheas the target */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG); + val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event); + lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val); +} + +static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin) +{ + u16 val; + + /* Disable gpio alternate function, + * 1: select as gpio, + * 0: select alt func + */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin)); + val |= LAN8814_GPIO_EN_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val); + + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin)); + val &= ~LAN8814_GPIO_DIR_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val); + + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin)); + val &= ~LAN8814_GPIO_BUF_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val); +} + +static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin) +{ + int val; + + /* Set as gpio output */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin)); + val |= LAN8814_GPIO_DIR_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val); + + /* Enable gpio 0:for alternate function, 1:gpio */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin)); + val &= ~LAN8814_GPIO_EN_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val); + + /* Set buffer type to push pull */ + val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin)); + val |= LAN8814_GPIO_BUF_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val); +} + +static int lan8814_ptp_perout(struct ptp_clock_info *ptpci, + struct ptp_clock_request *rq, int on) +{ + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; + struct timespec64 ts_on, ts_period; + s64 on_nsec, period_nsec; + int pulse_width; + int pin, event; + + /* Reject requests with unsupported flags */ + if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE) + return -EOPNOTSUPP; + + mutex_lock(&shared->shared_lock); + event = rq->perout.index; + pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event); + if (pin < 0 || pin >= LAN8814_PTP_PEROUT_NUM) { + mutex_unlock(&shared->shared_lock); + return -EBUSY; + } + + if (!on) { + lan8814_ptp_perout_off(phydev, pin); + lan8814_ptp_disable_event(phydev, event); + mutex_unlock(&shared->shared_lock); + return 0; + } + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + on_nsec = timespec64_to_ns(&ts_on); + + ts_period.tv_sec = rq->perout.period.sec; + ts_period.tv_nsec = rq->perout.period.nsec; + period_nsec = timespec64_to_ns(&ts_period); + + if (period_nsec < 200) { + pr_warn_ratelimited("%s: perout period too small, minimum is 200 nsec\n", + phydev_name(phydev)); + mutex_unlock(&shared->shared_lock); + return -EOPNOTSUPP; + } + + if (on_nsec >= period_nsec) { + pr_warn_ratelimited("%s: pulse width must be smaller than period\n", + phydev_name(phydev)); + mutex_unlock(&shared->shared_lock); + return -EINVAL; + } + + switch (on_nsec) { + case 200000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS; + break; + case 100000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS; + break; + case 50000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS; + break; + case 10000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS; + break; + case 5000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS; + break; + case 1000000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS; + break; + case 500000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US; + break; + case 100000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US; + break; + case 50000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US; + break; + case 10000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US; + break; + case 5000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US; + break; + case 1000: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US; + break; + case 500: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS; + break; + case 100: + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS; + break; + default: + pr_warn_ratelimited("%s: Use default duty cycle of 100ns\n", + phydev_name(phydev)); + pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS; + break; + } + + /* Configure to pulse every period */ + lan8814_ptp_enable_event(phydev, event, pulse_width); + lan8814_ptp_set_target(phydev, event, rq->perout.start.sec, + rq->perout.start.nsec); + lan8814_ptp_set_reload(phydev, event, rq->perout.period.sec, + rq->perout.period.nsec); + lan8814_ptp_perout_on(phydev, pin); + mutex_unlock(&shared->shared_lock); + + return 0; +} + +static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags) +{ + u16 tmp; + + /* Set as gpio input */ + tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin)); + tmp &= ~LAN8814_GPIO_DIR_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp); + + /* Map the pin to ltc pin 0 of the capture map registers */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO); + tmp |= pin; + lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp); + + /* Enable capture on the edges of the ltc pin */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN); + if (flags & PTP_RISING_EDGE) + tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0); + if (flags & PTP_FALLING_EDGE) + tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0); + lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp); + + /* Enable interrupt top interrupt */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA); + tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN; + lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp); +} + +static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin) +{ + u16 tmp; + + /* Set as gpio out */ + tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin)); + tmp |= LAN8814_GPIO_DIR_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp); + + /* Enable alternate, 0:for alternate function, 1:gpio */ + tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin)); + tmp &= ~LAN8814_GPIO_EN_BIT(pin); + lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp); + + /* Clear the mapping of pin to registers 0 of the capture registers */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO); + tmp &= ~GENMASK(3, 0); + lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp); + + /* Disable capture on both of the edges */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN); + tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin); + tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin); + lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp); + + /* Disable interrupt top interrupt */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA); + tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN; + lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp); +} + +static int lan8814_ptp_extts(struct ptp_clock_info *ptpci, + struct ptp_clock_request *rq, int on) +{ + struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv, + ptp_clock_info); + struct phy_device *phydev = shared->phydev; + int pin; + + if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | + PTP_EXTTS_EDGES | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + + pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM) + return -EINVAL; + + mutex_lock(&shared->shared_lock); + if (on) + lan8814_ptp_extts_on(phydev, pin, rq->extts.flags); + else + lan8814_ptp_extts_off(phydev, pin); + + mutex_unlock(&shared->shared_lock); + + return 0; +} + +static int lan8814_ptpci_enable(struct ptp_clock_info *ptpci, + struct ptp_clock_request *rq, int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + return lan8814_ptp_perout(ptpci, rq, on); + case PTP_CLK_REQ_EXTTS: + return lan8814_ptp_extts(ptpci, rq, on); + default: + return -EINVAL; + } +} + +static int lan8814_ptpci_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_PEROUT: + /* Only pins 0 and 1 can generate perout signals. And for pin 0 + * there is only chan 0 (event A) and for pin 1 there is only + * chan 1 (event B) + */ + if (pin >= LAN8814_PTP_PEROUT_NUM || pin != chan) + return -1; + break; + case PTP_PF_EXTTS: + if (pin != LAN8814_PTP_EXTTS_NUM) + return -1; + break; + default: + return -1; + } + + return 0; +} + static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig) { struct ptp_header *ptp_header; @@ -3010,6 +3439,64 @@ static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status) } } +static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared) +{ + struct phy_device *phydev = shared->phydev; + struct ptp_clock_event ptp_event = {0}; + unsigned long nsec; + s64 sec; + u16 tmp; + + /* This is 0 because whatever was the input pin it was mapped it to + * ltc gpio pin 0 + */ + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL); + tmp |= PTP_GPIO_SEL_GPIO_SEL(0); + lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp); + + tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS); + if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) && + !(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0))) + return -1; + + if (tmp & BIT(0)) { + sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP); + sec <<= 16; + sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP); + + nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff; + nsec <<= 16; + nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP); + } else { + sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP); + sec <<= 16; + sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP); + + nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff; + nsec <<= 16; + nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP); + } + + ptp_event.index = 0; + ptp_event.timestamp = ktime_set(sec, nsec); + ptp_event.type = PTP_CLOCK_EXTTS; + ptp_clock_event(shared->ptp_clock, &ptp_event); + + return 0; +} + +static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status) +{ + struct lan8814_shared_priv *shared = phydev->shared->priv; + int ret; + + mutex_lock(&shared->shared_lock); + ret = lan8814_gpio_process_cap(shared); + mutex_unlock(&shared->shared_lock); + + return ret; +} + static int lan8804_config_init(struct phy_device *phydev) { int val; @@ -3114,6 +3601,9 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) ret = IRQ_HANDLED; } + if (!lan8814_handle_gpio_interrupt(phydev, irq_status)) + ret = IRQ_HANDLED; + return ret; } @@ -3210,19 +3700,39 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); + shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev, + LAN8814_PTP_GPIO_NUM, + sizeof(*shared->pin_config), + GFP_KERNEL); + if (!shared->pin_config) + return -ENOMEM; + + for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) { + struct ptp_pin_desc *ptp_pin = &shared->pin_config[i]; + + memset(ptp_pin, 0, sizeof(*ptp_pin)); + snprintf(ptp_pin->name, + sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i); + ptp_pin->index = i; + ptp_pin->func = PTP_PF_NONE; + } + shared->ptp_clock_info.owner = THIS_MODULE; snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name); shared->ptp_clock_info.max_adj = 31249999; shared->ptp_clock_info.n_alarm = 0; - shared->ptp_clock_info.n_ext_ts = 0; - shared->ptp_clock_info.n_pins = 0; + shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM; + shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM; shared->ptp_clock_info.pps = 0; - shared->ptp_clock_info.pin_config = NULL; + shared->ptp_clock_info.pin_config = shared->pin_config; + shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM; shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine; shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime; shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64; shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64; shared->ptp_clock_info.getcrosststamp = NULL; + shared->ptp_clock_info.enable = lan8814_ptpci_enable; + shared->ptp_clock_info.verify = lan8814_ptpci_verify; shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info, &phydev->mdio.dev); @@ -3247,6 +3757,9 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev) lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE, PTP_OPERATING_MODE_STANDALONE_); + /* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */ + lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_); + return 0; } @@ -4676,7 +5189,8 @@ static int lan8841_suspend(struct phy_device *phydev) struct kszphy_priv *priv = phydev->priv; struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; - ptp_cancel_worker_sync(ptp_priv->ptp_clock); + if (ptp_priv->ptp_clock) + ptp_cancel_worker_sync(ptp_priv->ptp_clock); return genphy_suspend(phydev); } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 503fd7c40523..994471fad833 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1042,6 +1042,21 @@ static void phylink_pcs_poll_start(struct phylink *pl) mod_timer(&pl->link_poll, jiffies + HZ); } +int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs) +{ + int ret = 0; + + /* Signal to PCS driver that MAC requires RX clock for init */ + if (pl->config->mac_requires_rxc) + pcs->rxc_always_on = true; + + if (pcs->ops->pcs_pre_init) + ret = pcs->ops->pcs_pre_init(pcs); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_pcs_pre_init); + static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { @@ -1823,6 +1838,9 @@ static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy, interfaces); } + phylink_dbg(pl, "PHY %s doesn't supply possible interfaces\n", + phydev_name(phy)); + /* Check whether we would use rate matching for the proposed interface * mode. */ @@ -1923,6 +1941,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, phy_interface_t interface) { + u32 flags = 0; + if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && phy_interface_mode_is_8023z(interface) && !pl->sfp_bus))) @@ -1931,7 +1951,10 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, if (pl->phydev) return -EBUSY; - return phy_attach_direct(pl->netdev, phy, 0, interface); + if (pl->config->mac_requires_rxc) + flags |= PHY_F_RXC_ALWAYS_ON; + + return phy_attach_direct(pl->netdev, phy, flags, interface); } /** @@ -2034,6 +2057,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl, pl->link_config.interface = pl->link_interface; } + if (pl->config->mac_requires_rxc) + flags |= PHY_F_RXC_ALWAYS_ON; + ret = phy_attach_direct(pl->netdev, phy_dev, flags, pl->link_interface); phy_device_free(phy_dev); diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index e79657f76bea..c8f83e5f78ab 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -426,7 +426,8 @@ static int at803x_hibernation_mode_config(struct phy_device *phydev) /* The default after hardware reset is hibernation mode enabled. After * software reset, the value is retained. */ - if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE)) + if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE) && + !(phydev->dev_flags & PHY_F_RXC_ALWAYS_ON)) return 0; return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 1fa70427b2a2..7ab41f95dae5 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -54,6 +54,25 @@ RTL8201F_ISR_LINK) #define RTL8201F_IER 0x13 +#define RTL822X_VND1_SERDES_OPTION 0x697a +#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0) +#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII 0 +#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX 2 + +#define RTL822X_VND1_SERDES_CTRL3 0x7580 +#define RTL822X_VND1_SERDES_CTRL3_MODE_MASK GENMASK(5, 0) +#define RTL822X_VND1_SERDES_CTRL3_MODE_SGMII 0x02 +#define RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX 0x16 + +/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45 + * is set, they cannot be accessed by C45-over-C22. + */ +#define RTL822X_VND2_GBCR 0xa412 + +#define RTL822X_VND2_GANLPAR 0xa414 + +#define RTL822X_VND2_PHYSR 0xa434 + #define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE_ON BIT(12) @@ -64,6 +83,9 @@ #define RTL_GENERIC_PHYID 0x001cc800 #define RTL_8211FVD_PHYID 0x001cc878 +#define RTL_8221B_VB_CG 0x001cc849 +#define RTL_8221B_VN_CG 0x001cc84a +#define RTL_8251B 0x001cc862 MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); @@ -531,17 +553,8 @@ static int rtl8366rb_config_init(struct phy_device *phydev) } /* get actual speed to cover the downshift case */ -static int rtlgen_get_speed(struct phy_device *phydev) +static void rtlgen_decode_speed(struct phy_device *phydev, int val) { - int val; - - if (!phydev->link) - return 0; - - val = phy_read_paged(phydev, 0xa43, 0x12); - if (val < 0) - return val; - switch (val & RTLGEN_SPEED_MASK) { case 0x0000: phydev->speed = SPEED_10; @@ -564,19 +577,26 @@ static int rtlgen_get_speed(struct phy_device *phydev) default: break; } - - return 0; } static int rtlgen_read_status(struct phy_device *phydev) { - int ret; + int ret, val; ret = genphy_read_status(phydev); if (ret < 0) return ret; - return rtlgen_get_speed(phydev); + if (!phydev->link) + return 0; + + val = phy_read_paged(phydev, 0xa43, 0x12); + if (val < 0) + return val; + + rtlgen_decode_speed(phydev, val); + + return 0; } static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) @@ -659,6 +679,84 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, return ret; } +static int rtl822xb_config_init(struct phy_device *phydev) +{ + bool has_2500, has_sgmii; + u16 mode; + int ret; + + has_2500 = test_bit(PHY_INTERFACE_MODE_2500BASEX, + phydev->host_interfaces) || + phydev->interface == PHY_INTERFACE_MODE_2500BASEX; + + has_sgmii = test_bit(PHY_INTERFACE_MODE_SGMII, + phydev->host_interfaces) || + phydev->interface == PHY_INTERFACE_MODE_SGMII; + + /* fill in possible interfaces */ + __assign_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces, + has_2500); + __assign_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces, + has_sgmii); + + if (!has_2500 && !has_sgmii) + return 0; + + /* determine SerDes option mode */ + if (has_2500 && !has_sgmii) { + mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX; + phydev->rate_matching = RATE_MATCH_PAUSE; + } else { + mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII; + phydev->rate_matching = RATE_MATCH_NONE; + } + + /* the following sequence with magic numbers sets up the SerDes + * option mode + */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0); + if (ret < 0) + return ret; + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1, + RTL822X_VND1_SERDES_OPTION, + RTL822X_VND1_SERDES_OPTION_MODE_MASK, + mode); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503); + if (ret < 0) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f10, 0xd455); + if (ret < 0) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020); +} + +static int rtl822xb_get_rate_matching(struct phy_device *phydev, + phy_interface_t iface) +{ + int val; + + /* Only rate matching at 2500base-x */ + if (iface != PHY_INTERFACE_MODE_2500BASEX) + return RATE_MATCH_NONE; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_OPTION); + if (val < 0) + return val; + + if ((val & RTL822X_VND1_SERDES_OPTION_MODE_MASK) == + RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX) + return RATE_MATCH_PAUSE; + + /* RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII */ + return RATE_MATCH_NONE; +} + static int rtl822x_get_features(struct phy_device *phydev) { int val; @@ -695,10 +793,30 @@ static int rtl822x_config_aneg(struct phy_device *phydev) return __genphy_config_aneg(phydev, ret); } -static int rtl822x_read_status(struct phy_device *phydev) +static void rtl822xb_update_interface(struct phy_device *phydev) { - int ret; + int val; + + if (!phydev->link) + return; + + /* Change interface according to serdes mode */ + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_CTRL3); + if (val < 0) + return; + switch (val & RTL822X_VND1_SERDES_CTRL3_MODE_MASK) { + case RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case RTL822X_VND1_SERDES_CTRL3_MODE_SGMII: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + } +} + +static int rtl822x_read_status(struct phy_device *phydev) +{ if (phydev->autoneg == AUTONEG_ENABLE) { int lpadv = phy_read_paged(phydev, 0xa5d, 0x13); @@ -709,11 +827,99 @@ static int rtl822x_read_status(struct phy_device *phydev) lpadv); } - ret = genphy_read_status(phydev); + return rtlgen_read_status(phydev); +} + +static int rtl822xb_read_status(struct phy_device *phydev) +{ + int ret; + + ret = rtl822x_read_status(phydev); + if (ret < 0) + return ret; + + rtl822xb_update_interface(phydev); + + return 0; +} + +static int rtl822x_c45_get_features(struct phy_device *phydev) +{ + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + phydev->supported); + + return genphy_c45_pma_read_abilities(phydev); +} + +static int rtl822x_c45_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + int ret, val; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); + + /* Vendor register as C45 has no standardized support for 1000BaseT */ + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR, + ADVERTISE_1000FULL, val); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int rtl822x_c45_read_status(struct phy_device *phydev) +{ + int ret, val; + + ret = genphy_c45_read_status(phydev); + if (ret < 0) + return ret; + + /* Vendor register as C45 has no standardized support for 1000BaseT */ + if (phydev->autoneg == AUTONEG_ENABLE) { + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_GANLPAR); + if (val < 0) + return val; + + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val); + } + + if (!phydev->link) + return 0; + + /* Read actual speed from vendor register. */ + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_PHYSR); + if (val < 0) + return val; + + rtlgen_decode_speed(phydev, val); + + return 0; +} + +static int rtl822xb_c45_read_status(struct phy_device *phydev) +{ + int ret; + + ret = rtl822x_c45_read_status(phydev); if (ret < 0) return ret; - return rtlgen_get_speed(phydev); + rtl822xb_update_interface(phydev); + + return 0; } static bool rtlgen_supports_2_5gbps(struct phy_device *phydev) @@ -739,6 +945,35 @@ static int rtl8226_match_phy_device(struct phy_device *phydev) rtlgen_supports_2_5gbps(phydev); } +static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id, + bool is_c45) +{ + if (phydev->is_c45) + return is_c45 && (id == phydev->c45_ids.device_ids[1]); + else + return !is_c45 && (id == phydev->phy_id); +} + +static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false); +} + +static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true); +} + +static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false); +} + +static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev) +{ + return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); +} + static int rtlgen_resume(struct phy_device *phydev) { int ret = genphy_resume(phydev); @@ -749,6 +984,15 @@ static int rtlgen_resume(struct phy_device *phydev) return ret; } +static int rtlgen_c45_resume(struct phy_device *phydev) +{ + int ret = genphy_c45_pma_resume(phydev); + + msleep(20); + + return ret; +} + static int rtl9000a_config_init(struct phy_device *phydev) { phydev->autoneg = AUTONEG_DISABLE; @@ -988,7 +1232,9 @@ static struct phy_driver realtek_drvs[] = { .name = "RTL8226B_RTL8221B 2.5Gbps PHY", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, - .read_status = rtl822x_read_status, + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .read_status = rtl822xb_read_status, .suspend = genphy_suspend, .resume = rtlgen_resume, .read_page = rtl821x_read_page, @@ -1010,32 +1256,58 @@ static struct phy_driver realtek_drvs[] = { .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, - .read_status = rtl822x_read_status, + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .read_status = rtl822xb_read_status, .suspend = genphy_suspend, .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - PHY_ID_MATCH_EXACT(0x001cc849), - .name = "RTL8221B-VB-CG 2.5Gbps PHY", + .match_phy_device = rtl8221b_vb_cg_c22_match_phy_device, + .name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, - .read_status = rtl822x_read_status, + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .read_status = rtl822xb_read_status, .suspend = genphy_suspend, .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - PHY_ID_MATCH_EXACT(0x001cc84a), - .name = "RTL8221B-VM-CG 2.5Gbps PHY", + .match_phy_device = rtl8221b_vb_cg_c45_match_phy_device, + .name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)", + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .get_features = rtl822x_c45_get_features, + .config_aneg = rtl822x_c45_config_aneg, + .read_status = rtl822xb_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = rtlgen_c45_resume, + }, { + .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device, + .name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)", .get_features = rtl822x_get_features, .config_aneg = rtl822x_config_aneg, - .read_status = rtl822x_read_status, + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .read_status = rtl822xb_read_status, .suspend = genphy_suspend, .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { + .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device, + .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)", + .config_init = rtl822xb_config_init, + .get_rate_matching = rtl822xb_get_rate_matching, + .get_features = rtl822x_c45_get_features, + .config_aneg = rtl822x_c45_config_aneg, + .read_status = rtl822xb_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = rtlgen_c45_resume, + }, { PHY_ID_MATCH_EXACT(0x001cc862), .name = "RTL8251B 5Gbps PHY", .get_features = rtl822x_get_features, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index db39dec7f247..2f44fc51848f 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -355,7 +355,7 @@ EXPORT_SYMBOL_GPL(sfp_parse_support); * modes mask. */ phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes) + const unsigned long *link_modes) { if (phylink_test(link_modes, 25000baseCR_Full) || phylink_test(link_modes, 25000baseKR_Full) || @@ -373,7 +373,8 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus, if (phylink_test(link_modes, 5000baseT_Full)) return PHY_INTERFACE_MODE_5GBASER; - if (phylink_test(link_modes, 2500baseX_Full)) + if (phylink_test(link_modes, 2500baseX_Full) || + phylink_test(link_modes, 2500baseT_Full)) return PHY_INTERFACE_MODE_2500BASEX; if (phylink_test(link_modes, 1000baseT_Half) || diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index f75c9eb3958e..3f9cbd797fd6 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp) sfp->phy_t_retry = msecs_to_jiffies(1000); } -static void sfp_fixup_fs_10gt(struct sfp *sfp) +static void sfp_fixup_fs_2_5gt(struct sfp *sfp) { - sfp_fixup_10gbaset_30m(sfp); sfp_fixup_rollball(sfp); - /* The RollBall fixup is not enough for FS modules, the AQR chip inside + /* The RollBall fixup is not enough for FS modules, the PHY chip inside * them does not return 0xffff for PHY ID registers in all MMDs for the * while initializing. They need a 4 second wait before accessing PHY. */ sfp->module_t_wait = msecs_to_jiffies(4000); } +static void sfp_fixup_fs_10gt(struct sfp *sfp) +{ + sfp_fixup_10gbaset_30m(sfp); + sfp_fixup_fs_2_5gt(sfp); +} + static void sfp_fixup_halny_gsfp(struct sfp *sfp) { /* Ignore the TX_FAULT and LOS signals on this module. @@ -468,10 +473,15 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex, sfp_fixup_nokia), - // Fiberstore SFP-10G-T doesn't identify as copper, and uses the - // Rollball protocol to talk to the PHY. + // Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball + // protocol to talk to the PHY and needs 4 sec wait before probing the + // PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), + // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and + // needs 4 sec wait before probing the PHY. + SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt), + // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd // NRZ in their EEPROM SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex, @@ -488,9 +498,6 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex, sfp_fixup_ignore_tx_fault), - // FS 2.5G Base-T - SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g), - // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report // 2500MBd NRZ in their EEPROM SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex), @@ -502,10 +509,14 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt), SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt), + // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator + SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault), + SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g), SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc), + SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball), SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball), SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball), }; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index fe380fe196e7..0a65b6d690fe 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1357,7 +1357,7 @@ static struct net *ppp_nl_get_link_net(const struct net_device *dev) { struct ppp *ppp = netdev_priv(dev); - return ppp->ppp_net; + return READ_ONCE(ppp->ppp_net); } static struct rtnl_link_ops ppp_link_ops __read_mostly = { diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig index 687dec49c1e1..577ea904b3d9 100644 --- a/drivers/net/pse-pd/Kconfig +++ b/drivers/net/pse-pd/Kconfig @@ -5,6 +5,7 @@ menuconfig PSE_CONTROLLER bool "Ethernet Power Sourcing Equipment Support" + depends on REGULATOR help Generic Power Sourcing Equipment Controller support. @@ -14,10 +15,29 @@ if PSE_CONTROLLER config PSE_REGULATOR tristate "Regulator based PSE controller" - depends on REGULATOR || COMPILE_TEST help This module provides support for simple regulator based Ethernet Power Sourcing Equipment without automatic classification support. For example for basic implementation of PoDL (802.3bu) specification. +config PSE_PD692X0 + tristate "PD692X0 PSE controller" + depends on I2C + select FW_UPLOAD + help + This module provides support for PD692x0 regulator based Ethernet + Power Sourcing Equipment. + + To compile this driver as a module, choose M here: the + module will be called pd692x0. + +config PSE_TPS23881 + tristate "TPS23881 PSE controller" + depends on I2C + help + This module provides support for TPS23881 regulator based Ethernet + Power Sourcing Equipment. + + To compile this driver as a module, choose M here: the + module will be called tps23881. endif diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile index 1b8aa4c70f0b..9d2898b36737 100644 --- a/drivers/net/pse-pd/Makefile +++ b/drivers/net/pse-pd/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o +obj-$(CONFIG_PSE_PD692X0) += pd692x0.o +obj-$(CONFIG_PSE_TPS23881) += tps23881.o diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c new file mode 100644 index 000000000000..6488b941703c --- /dev/null +++ b/drivers/net/pse-pd/pd692x0.c @@ -0,0 +1,1223 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the Microchip PD692X0 PoE PSE Controller driver (I2C bus) + * + * Copyright (c) 2023 Bootlin, Kory Maincent <[email protected]> + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pse-pd/pse.h> + +#define PD692X0_PSE_NAME "pd692x0_pse" + +#define PD692X0_MAX_PIS 48 +#define PD692X0_MAX_MANAGERS 12 +#define PD692X0_MAX_MANAGER_PORTS 8 +#define PD692X0_MAX_HW_PORTS (PD692X0_MAX_MANAGERS * PD692X0_MAX_MANAGER_PORTS) + +#define PD69200_BT_PROD_VER 24 +#define PD69210_BT_PROD_VER 26 +#define PD69220_BT_PROD_VER 29 + +#define PD692X0_FW_MAJ_VER 3 +#define PD692X0_FW_MIN_VER 5 +#define PD692X0_FW_PATCH_VER 5 + +enum pd692x0_fw_state { + PD692X0_FW_UNKNOWN, + PD692X0_FW_OK, + PD692X0_FW_BROKEN, + PD692X0_FW_NEED_UPDATE, + PD692X0_FW_PREPARE, + PD692X0_FW_WRITE, + PD692X0_FW_COMPLETE, +}; + +struct pd692x0_msg { + u8 key; + u8 echo; + u8 sub[3]; + u8 data[8]; + __be16 chksum; +} __packed; + +struct pd692x0_msg_ver { + u8 prod; + u8 maj_sw_ver; + u8 min_sw_ver; + u8 pa_sw_ver; + u8 param; + u8 build; +}; + +enum { + PD692X0_KEY_CMD, + PD692X0_KEY_PRG, + PD692X0_KEY_REQ, + PD692X0_KEY_TLM, + PD692X0_KEY_TEST, + PD692X0_KEY_REPORT = 0x52 +}; + +enum { + PD692X0_MSG_RESET, + PD692X0_MSG_GET_SYS_STATUS, + PD692X0_MSG_GET_SW_VER, + PD692X0_MSG_SET_TMP_PORT_MATRIX, + PD692X0_MSG_PRG_PORT_MATRIX, + PD692X0_MSG_SET_PORT_PARAM, + PD692X0_MSG_GET_PORT_STATUS, + PD692X0_MSG_DOWNLOAD_CMD, + + /* add new message above here */ + PD692X0_MSG_CNT +}; + +struct pd692x0_priv { + struct i2c_client *client; + struct pse_controller_dev pcdev; + struct device_node *np; + + enum pd692x0_fw_state fw_state; + struct fw_upload *fwl; + bool cancel_request; + + u8 msg_id; + bool last_cmd_key; + unsigned long last_cmd_key_time; + + enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS]; +}; + +/* Template list of communication messages. The non-null bytes defined here + * constitute the fixed portion of the messages. The remaining bytes will + * be configured later within the functions. Refer to the "PD692x0 BT Serial + * Communication Protocol User Guide" for comprehensive details on messages + * content. + */ +static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = { + [PD692X0_MSG_RESET] = { + .key = PD692X0_KEY_CMD, + .sub = {0x07, 0x55, 0x00}, + .data = {0x55, 0x00, 0x55, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_GET_SYS_STATUS] = { + .key = PD692X0_KEY_REQ, + .sub = {0x07, 0xd0, 0x4e}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_GET_SW_VER] = { + .key = PD692X0_KEY_REQ, + .sub = {0x07, 0x1e, 0x21}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_SET_TMP_PORT_MATRIX] = { + .key = PD692X0_KEY_CMD, + .sub = {0x05, 0x43}, + .data = { 0, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_PRG_PORT_MATRIX] = { + .key = PD692X0_KEY_CMD, + .sub = {0x07, 0x43, 0x4e}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_SET_PORT_PARAM] = { + .key = PD692X0_KEY_CMD, + .sub = {0x05, 0xc0}, + .data = { 0, 0xff, 0xff, 0xff, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_GET_PORT_STATUS] = { + .key = PD692X0_KEY_REQ, + .sub = {0x05, 0xc1}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_DOWNLOAD_CMD] = { + .key = PD692X0_KEY_PRG, + .sub = {0xff, 0x99, 0x15}, + .data = {0x16, 0x16, 0x99, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, +}; + +static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo) +{ + u8 *data = (u8 *)msg; + u16 chksum = 0; + int i; + + msg->echo = echo++; + if (echo == 0xff) + echo = 0; + + for (i = 0; i < sizeof(*msg) - sizeof(msg->chksum); i++) + chksum += data[i]; + + msg->chksum = cpu_to_be16(chksum); + + return echo; +} + +static int pd692x0_send_msg(struct pd692x0_priv *priv, struct pd692x0_msg *msg) +{ + const struct i2c_client *client = priv->client; + int ret; + + if (msg->key == PD692X0_KEY_CMD && priv->last_cmd_key) { + int cmd_msleep; + + cmd_msleep = 30 - jiffies_to_msecs(jiffies - priv->last_cmd_key_time); + if (cmd_msleep > 0) + msleep(cmd_msleep); + } + + /* Add echo and checksum bytes to the message */ + priv->msg_id = pd692x0_build_msg(msg, priv->msg_id); + + ret = i2c_master_send(client, (u8 *)msg, sizeof(*msg)); + if (ret != sizeof(*msg)) + return -EIO; + + return 0; +} + +static int pd692x0_reset(struct pd692x0_priv *priv) +{ + const struct i2c_client *client = priv->client; + struct pd692x0_msg msg, buf = {0}; + int ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_RESET]; + ret = pd692x0_send_msg(priv, &msg); + if (ret) { + dev_err(&client->dev, + "Failed to reset the controller (%pe)\n", ERR_PTR(ret)); + return ret; + } + + msleep(30); + + ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf)); + if (ret != sizeof(buf)) + return ret < 0 ? ret : -EIO; + + /* Is the reply a successful report message */ + if (buf.key != PD692X0_KEY_REPORT || buf.sub[0] || buf.sub[1]) + return -EIO; + + msleep(300); + + ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf)); + if (ret != sizeof(buf)) + return ret < 0 ? ret : -EIO; + + /* Is the boot status without error */ + if (buf.key != 0x03 || buf.echo != 0xff || buf.sub[0] & 0x1) { + dev_err(&client->dev, "PSE controller error\n"); + return -EIO; + } + + return 0; +} + +static bool pd692x0_try_recv_msg(const struct i2c_client *client, + struct pd692x0_msg *msg, + struct pd692x0_msg *buf) +{ + /* Wait 30ms before readback as mandated by the protocol */ + msleep(30); + + memset(buf, 0, sizeof(*buf)); + i2c_master_recv(client, (u8 *)buf, sizeof(*buf)); + if (buf->key) + return 0; + + msleep(100); + + memset(buf, 0, sizeof(*buf)); + i2c_master_recv(client, (u8 *)buf, sizeof(*buf)); + if (buf->key) + return 0; + + return 1; +} + +/* Implementation of I2C communication, specifically addressing scenarios + * involving communication loss. Refer to the "Synchronization During + * Communication Loss" section in the Communication Protocol document for + * further details. + */ +static int pd692x0_recv_msg(struct pd692x0_priv *priv, + struct pd692x0_msg *msg, + struct pd692x0_msg *buf) +{ + const struct i2c_client *client = priv->client; + int ret; + + ret = pd692x0_try_recv_msg(client, msg, buf); + if (!ret) + goto out_success; + + dev_warn(&client->dev, + "Communication lost, rtnl is locked until communication is back!"); + + ret = pd692x0_send_msg(priv, msg); + if (ret) + return ret; + + ret = pd692x0_try_recv_msg(client, msg, buf); + if (!ret) + goto out_success2; + + msleep(10000); + + ret = pd692x0_send_msg(priv, msg); + if (ret) + return ret; + + ret = pd692x0_try_recv_msg(client, msg, buf); + if (!ret) + goto out_success2; + + return pd692x0_reset(priv); + +out_success2: + dev_warn(&client->dev, "Communication is back, rtnl is unlocked!"); +out_success: + if (msg->key == PD692X0_KEY_CMD) { + priv->last_cmd_key = true; + priv->last_cmd_key_time = jiffies; + } else { + priv->last_cmd_key = false; + } + + return 0; +} + +static int pd692x0_sendrecv_msg(struct pd692x0_priv *priv, + struct pd692x0_msg *msg, + struct pd692x0_msg *buf) +{ + struct device *dev = &priv->client->dev; + int ret; + + ret = pd692x0_send_msg(priv, msg); + if (ret) + return ret; + + ret = pd692x0_recv_msg(priv, msg, buf); + if (ret) + return ret; + + if (msg->echo != buf->echo) { + dev_err(dev, + "Wrong match in message ID, expect %d received %d.\n", + msg->echo, buf->echo); + return -EIO; + } + + /* If the reply is a report message is it successful */ + if (buf->key == PD692X0_KEY_REPORT && + (buf->sub[0] || buf->sub[1])) { + return -EIO; + } + + return 0; +} + +static struct pd692x0_priv *to_pd692x0_priv(struct pse_controller_dev *pcdev) +{ + return container_of(pcdev, struct pd692x0_priv, pcdev); +} + +static int pd692x0_fw_unavailable(struct pd692x0_priv *priv) +{ + switch (priv->fw_state) { + case PD692X0_FW_OK: + return 0; + case PD692X0_FW_PREPARE: + case PD692X0_FW_WRITE: + case PD692X0_FW_COMPLETE: + dev_err(&priv->client->dev, "Firmware update in progress!\n"); + return -EBUSY; + case PD692X0_FW_BROKEN: + case PD692X0_FW_NEED_UPDATE: + default: + dev_err(&priv->client->dev, + "Firmware issue. Please update it!\n"); + return -EOPNOTSUPP; + } +} + +static int pd692x0_pi_enable(struct pse_controller_dev *pcdev, int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED) + return 0; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM]; + msg.data[0] = 0x1; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED; + + return 0; +} + +static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED) + return 0; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM]; + msg.data[0] = 0x0; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED; + + return 0; +} + +static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + if (buf.sub[1]) { + priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED; + return 1; + } else { + priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED; + return 0; + } +} + +static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev, + unsigned long id, + struct netlink_ext_ack *extack, + struct pse_control_status *status) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + /* Compare Port Status (Communication Protocol Document par. 7.1) */ + if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90) + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING; + else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22) + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING; + else if (buf.sub[0] == 0x12) + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT; + else + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED; + + if (buf.sub[1]) + status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED; + else + status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED; + + priv->admin_state[id] = status->c33_admin_state; + + return 0; +} + +static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv) +{ + struct device *dev = &priv->client->dev; + struct pd692x0_msg msg, buf = {0}; + struct pd692x0_msg_ver ver = {0}; + int ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SW_VER]; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) { + dev_err(dev, "Failed to get PSE version (%pe)\n", ERR_PTR(ret)); + return ver; + } + + /* Extract version from the message */ + ver.prod = buf.sub[2]; + ver.maj_sw_ver = (buf.data[0] << 8 | buf.data[1]) / 100; + ver.min_sw_ver = ((buf.data[0] << 8 | buf.data[1]) / 10) % 10; + ver.pa_sw_ver = (buf.data[0] << 8 | buf.data[1]) % 10; + ver.param = buf.data[2]; + ver.build = buf.data[3]; + + return ver; +} + +struct pd692x0_manager { + struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS]; + int nports; +}; + +struct pd692x0_matrix { + u8 hw_port_a; + u8 hw_port_b; +}; + +static int +pd692x0_of_get_ports_manager(struct pd692x0_priv *priv, + struct pd692x0_manager *manager, + struct device_node *np) +{ + struct device_node *node; + int ret, nports, i; + + nports = 0; + for_each_child_of_node(np, node) { + u32 port; + + if (!of_node_name_eq(node, "port")) + continue; + + ret = of_property_read_u32(node, "reg", &port); + if (ret) + goto out; + + if (port >= PD692X0_MAX_MANAGER_PORTS || port != nports) { + dev_err(&priv->client->dev, + "wrong number or order of manager ports (%d)\n", + port); + ret = -EINVAL; + goto out; + } + + of_node_get(node); + manager->port_node[port] = node; + nports++; + } + + manager->nports = nports; + return 0; + +out: + for (i = 0; i < nports; i++) { + of_node_put(manager->port_node[i]); + manager->port_node[i] = NULL; + } + of_node_put(node); + return ret; +} + +static int +pd692x0_of_get_managers(struct pd692x0_priv *priv, + struct pd692x0_manager manager[PD692X0_MAX_MANAGERS]) +{ + struct device_node *managers_node, *node; + int ret, nmanagers, i, j; + + if (!priv->np) + return -EINVAL; + + nmanagers = 0; + managers_node = of_get_child_by_name(priv->np, "managers"); + if (!managers_node) + return -EINVAL; + + for_each_child_of_node(managers_node, node) { + u32 manager_id; + + if (!of_node_name_eq(node, "manager")) + continue; + + ret = of_property_read_u32(node, "reg", &manager_id); + if (ret) + goto out; + + if (manager_id >= PD692X0_MAX_MANAGERS || + manager_id != nmanagers) { + dev_err(&priv->client->dev, + "wrong number or order of managers (%d)\n", + manager_id); + ret = -EINVAL; + goto out; + } + + ret = pd692x0_of_get_ports_manager(priv, &manager[manager_id], + node); + if (ret) + goto out; + + nmanagers++; + } + + of_node_put(managers_node); + return nmanagers; + +out: + for (i = 0; i < nmanagers; i++) { + for (j = 0; j < manager[i].nports; j++) { + of_node_put(manager[i].port_node[j]); + manager[i].port_node[j] = NULL; + } + } + + of_node_put(node); + of_node_put(managers_node); + return ret; +} + +static int +pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset, + const struct pd692x0_manager *manager, + int nmanagers, struct pd692x0_matrix *port_matrix) +{ + int i, j, port_cnt; + bool found = false; + + if (!pairset->np) + return 0; + + /* Look on every managers */ + port_cnt = 0; + for (i = 0; i < nmanagers; i++) { + /* Look on every ports of the manager */ + for (j = 0; j < manager[i].nports; j++) { + if (pairset->np == manager[i].port_node[j]) { + found = true; + break; + } + } + port_cnt += j; + + if (found) + break; + } + + if (!found) + return -ENODEV; + + if (pairset->pinout == ALTERNATIVE_A) + port_matrix->hw_port_a = port_cnt; + else if (pairset->pinout == ALTERNATIVE_B) + port_matrix->hw_port_b = port_cnt; + + return 0; +} + +static int +pd692x0_set_ports_matrix(struct pd692x0_priv *priv, + const struct pd692x0_manager *manager, + int nmanagers, + struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]) +{ + struct pse_controller_dev *pcdev = &priv->pcdev; + int i, ret; + + /* Init Matrix */ + for (i = 0; i < PD692X0_MAX_PIS; i++) { + port_matrix[i].hw_port_a = 0xff; + port_matrix[i].hw_port_b = 0xff; + } + + /* Update with values for every PSE PIs */ + for (i = 0; i < pcdev->nr_lines; i++) { + ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[0], + manager, nmanagers, + &port_matrix[i]); + if (ret) { + dev_err(&priv->client->dev, + "unable to configure pi %d pairset 0", i); + return ret; + } + + ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[1], + manager, nmanagers, + &port_matrix[i]); + if (ret) { + dev_err(&priv->client->dev, + "unable to configure pi %d pairset 1", i); + return ret; + } + } + + return 0; +} + +static int +pd692x0_write_ports_matrix(struct pd692x0_priv *priv, + const struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]) +{ + struct pd692x0_msg msg, buf; + int ret, i; + + /* Write temporary Matrix */ + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_TMP_PORT_MATRIX]; + for (i = 0; i < PD692X0_MAX_PIS; i++) { + msg.sub[2] = i; + msg.data[0] = port_matrix[i].hw_port_b; + msg.data[1] = port_matrix[i].hw_port_a; + + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + } + + /* Program Matrix */ + msg = pd692x0_msg_template_list[PD692X0_MSG_PRG_PORT_MATRIX]; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + return 0; +} + +static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev) +{ + struct pd692x0_manager manager[PD692X0_MAX_MANAGERS] = {0}; + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS]; + int ret, i, j, nmanagers; + + /* Should we flash the port matrix */ + if (priv->fw_state != PD692X0_FW_OK && + priv->fw_state != PD692X0_FW_COMPLETE) + return 0; + + ret = pd692x0_of_get_managers(priv, manager); + if (ret < 0) + return ret; + + nmanagers = ret; + ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix); + if (ret) + goto out; + + ret = pd692x0_write_ports_matrix(priv, port_matrix); + if (ret) + goto out; + +out: + for (i = 0; i < nmanagers; i++) { + for (j = 0; j < manager[i].nports; j++) + of_node_put(manager[i].port_node[j]); + } + return ret; +} + +static const struct pse_controller_ops pd692x0_ops = { + .setup_pi_matrix = pd692x0_setup_pi_matrix, + .ethtool_get_status = pd692x0_ethtool_get_status, + .pi_enable = pd692x0_pi_enable, + .pi_disable = pd692x0_pi_disable, + .pi_is_enabled = pd692x0_pi_is_enabled, +}; + +#define PD692X0_FW_LINE_MAX_SZ 0xff +static int pd692x0_fw_get_next_line(const u8 *data, + char *line, size_t size) +{ + size_t line_size; + int i; + + line_size = min_t(size_t, size, PD692X0_FW_LINE_MAX_SZ); + + memset(line, 0, PD692X0_FW_LINE_MAX_SZ); + for (i = 0; i < line_size - 1; i++) { + if (*data == '\r' && *(data + 1) == '\n') { + line[i] = '\r'; + line[i + 1] = '\n'; + return i + 2; + } + line[i] = *data; + data++; + } + + return -EIO; +} + +static enum fw_upload_err +pd692x0_fw_recv_resp(const struct i2c_client *client, unsigned long ms_timeout, + const char *msg_ok, unsigned int msg_size) +{ + /* Maximum controller response size */ + char fw_msg_buf[5] = {0}; + unsigned long timeout; + int ret; + + if (msg_size > sizeof(fw_msg_buf)) + return FW_UPLOAD_ERR_RW_ERROR; + + /* Read until we get something */ + timeout = msecs_to_jiffies(ms_timeout) + jiffies; + while (true) { + if (time_is_before_jiffies(timeout)) + return FW_UPLOAD_ERR_TIMEOUT; + + ret = i2c_master_recv(client, fw_msg_buf, 1); + if (ret < 0 || *fw_msg_buf == 0) { + usleep_range(1000, 2000); + continue; + } else { + break; + } + } + + /* Read remaining characters */ + ret = i2c_master_recv(client, fw_msg_buf + 1, msg_size - 1); + if (strncmp(fw_msg_buf, msg_ok, msg_size)) { + dev_err(&client->dev, + "Wrong FW download process answer (%*pE)\n", + msg_size, fw_msg_buf); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +static int pd692x0_fw_write_line(const struct i2c_client *client, + const char line[PD692X0_FW_LINE_MAX_SZ], + const bool last_line) +{ + int ret; + + while (*line != 0) { + ret = i2c_master_send(client, line, 1); + if (ret < 0) + return FW_UPLOAD_ERR_RW_ERROR; + line++; + } + + if (last_line) { + ret = pd692x0_fw_recv_resp(client, 100, "TP\r\n", + sizeof("TP\r\n") - 1); + if (ret) + return ret; + } else { + ret = pd692x0_fw_recv_resp(client, 100, "T*\r\n", + sizeof("T*\r\n") - 1); + if (ret) + return ret; + } + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err pd692x0_fw_reset(const struct i2c_client *client) +{ + const struct pd692x0_msg zero = {0}; + struct pd692x0_msg buf = {0}; + unsigned long timeout; + char cmd[] = "RST"; + int ret; + + ret = i2c_master_send(client, cmd, strlen(cmd)); + if (ret < 0) { + dev_err(&client->dev, + "Failed to reset the controller (%pe)\n", + ERR_PTR(ret)); + return ret; + } + + timeout = msecs_to_jiffies(10000) + jiffies; + while (true) { + if (time_is_before_jiffies(timeout)) + return FW_UPLOAD_ERR_TIMEOUT; + + ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf)); + if (ret < 0 || + !memcmp(&buf, &zero, sizeof(buf))) + usleep_range(1000, 2000); + else + break; + } + + /* Is the reply a successful report message */ + if (buf.key != PD692X0_KEY_TLM || buf.echo != 0xff || + buf.sub[0] & 0x01) { + dev_err(&client->dev, "PSE controller error\n"); + return FW_UPLOAD_ERR_HW_ERROR; + } + + /* Is the firmware operational */ + if (buf.sub[0] & 0x02) { + dev_err(&client->dev, + "PSE firmware error. Please update it.\n"); + return FW_UPLOAD_ERR_HW_ERROR; + } + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err pd692x0_fw_prepare(struct fw_upload *fwl, + const u8 *data, u32 size) +{ + struct pd692x0_priv *priv = fwl->dd_handle; + const struct i2c_client *client = priv->client; + enum pd692x0_fw_state last_fw_state; + int ret; + + priv->cancel_request = false; + last_fw_state = priv->fw_state; + + priv->fw_state = PD692X0_FW_PREPARE; + + /* Enter program mode */ + if (last_fw_state == PD692X0_FW_BROKEN) { + const char *msg = "ENTR"; + const char *c; + + c = msg; + do { + ret = i2c_master_send(client, c, 1); + if (ret < 0) + return FW_UPLOAD_ERR_RW_ERROR; + if (*(c + 1)) + usleep_range(10000, 20000); + } while (*(++c)); + } else { + struct pd692x0_msg msg, buf; + + msg = pd692x0_msg_template_list[PD692X0_MSG_DOWNLOAD_CMD]; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) { + dev_err(&client->dev, + "Failed to enter programming mode (%pe)\n", + ERR_PTR(ret)); + return FW_UPLOAD_ERR_RW_ERROR; + } + } + + ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1); + if (ret) + goto err_out; + + if (priv->cancel_request) { + ret = FW_UPLOAD_ERR_CANCELED; + goto err_out; + } + + return FW_UPLOAD_ERR_NONE; + +err_out: + pd692x0_fw_reset(priv->client); + priv->fw_state = last_fw_state; + return ret; +} + +static enum fw_upload_err pd692x0_fw_write(struct fw_upload *fwl, + const u8 *data, u32 offset, + u32 size, u32 *written) +{ + struct pd692x0_priv *priv = fwl->dd_handle; + char line[PD692X0_FW_LINE_MAX_SZ]; + const struct i2c_client *client; + int ret, i; + char cmd; + + client = priv->client; + priv->fw_state = PD692X0_FW_WRITE; + + /* Erase */ + cmd = 'E'; + ret = i2c_master_send(client, &cmd, 1); + if (ret < 0) { + dev_err(&client->dev, + "Failed to boot programming mode (%pe)\n", + ERR_PTR(ret)); + return FW_UPLOAD_ERR_RW_ERROR; + } + + ret = pd692x0_fw_recv_resp(client, 100, "TOE\r\n", sizeof("TOE\r\n") - 1); + if (ret) + return ret; + + ret = pd692x0_fw_recv_resp(client, 5000, "TE\r\n", sizeof("TE\r\n") - 1); + if (ret) + dev_warn(&client->dev, + "Failed to erase internal memory, however still try to write Firmware\n"); + + ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1); + if (ret) + dev_warn(&client->dev, + "Failed to erase internal memory, however still try to write Firmware\n"); + + if (priv->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + /* Program */ + cmd = 'P'; + ret = i2c_master_send(client, &cmd, sizeof(char)); + if (ret < 0) { + dev_err(&client->dev, + "Failed to boot programming mode (%pe)\n", + ERR_PTR(ret)); + return ret; + } + + ret = pd692x0_fw_recv_resp(client, 100, "TOP\r\n", sizeof("TOP\r\n") - 1); + if (ret) + return ret; + + i = 0; + while (i < size) { + ret = pd692x0_fw_get_next_line(data, line, size - i); + if (ret < 0) { + ret = FW_UPLOAD_ERR_FW_INVALID; + goto err; + } + + i += ret; + data += ret; + if (line[0] == 'S' && line[1] == '0') { + continue; + } else if (line[0] == 'S' && line[1] == '7') { + ret = pd692x0_fw_write_line(client, line, true); + if (ret) + goto err; + } else { + ret = pd692x0_fw_write_line(client, line, false); + if (ret) + goto err; + } + + if (priv->cancel_request) { + ret = FW_UPLOAD_ERR_CANCELED; + goto err; + } + } + *written = i; + + msleep(400); + + return FW_UPLOAD_ERR_NONE; + +err: + strscpy_pad(line, "S7\r\n", sizeof(line)); + pd692x0_fw_write_line(client, line, true); + return ret; +} + +static enum fw_upload_err pd692x0_fw_poll_complete(struct fw_upload *fwl) +{ + struct pd692x0_priv *priv = fwl->dd_handle; + const struct i2c_client *client = priv->client; + struct pd692x0_msg_ver ver; + int ret; + + priv->fw_state = PD692X0_FW_COMPLETE; + + ret = pd692x0_fw_reset(client); + if (ret) + return ret; + + ver = pd692x0_get_sw_version(priv); + if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) { + dev_err(&client->dev, + "Too old firmware version. Please update it\n"); + priv->fw_state = PD692X0_FW_NEED_UPDATE; + return FW_UPLOAD_ERR_FW_INVALID; + } + + ret = pd692x0_setup_pi_matrix(&priv->pcdev); + if (ret < 0) { + dev_err(&client->dev, "Error configuring ports matrix (%pe)\n", + ERR_PTR(ret)); + priv->fw_state = PD692X0_FW_NEED_UPDATE; + return FW_UPLOAD_ERR_HW_ERROR; + } + + priv->fw_state = PD692X0_FW_OK; + return FW_UPLOAD_ERR_NONE; +} + +static void pd692x0_fw_cancel(struct fw_upload *fwl) +{ + struct pd692x0_priv *priv = fwl->dd_handle; + + priv->cancel_request = true; +} + +static void pd692x0_fw_cleanup(struct fw_upload *fwl) +{ + struct pd692x0_priv *priv = fwl->dd_handle; + + switch (priv->fw_state) { + case PD692X0_FW_WRITE: + pd692x0_fw_reset(priv->client); + fallthrough; + case PD692X0_FW_COMPLETE: + priv->fw_state = PD692X0_FW_BROKEN; + break; + default: + break; + } +} + +static const struct fw_upload_ops pd692x0_fw_ops = { + .prepare = pd692x0_fw_prepare, + .write = pd692x0_fw_write, + .poll_complete = pd692x0_fw_poll_complete, + .cancel = pd692x0_fw_cancel, + .cleanup = pd692x0_fw_cleanup, +}; + +static int pd692x0_i2c_probe(struct i2c_client *client) +{ + struct pd692x0_msg msg, buf = {0}, zero = {0}; + struct device *dev = &client->dev; + struct pd692x0_msg_ver ver; + struct pd692x0_priv *priv; + struct fw_upload *fwl; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "i2c check functionality failed\n"); + return -ENXIO; + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->client = client; + i2c_set_clientdata(client, priv); + + ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf)); + if (ret != sizeof(buf)) { + dev_err(dev, "Failed to get device status\n"); + return -EIO; + } + + /* Probe has been already run and the status dumped */ + if (!memcmp(&buf, &zero, sizeof(buf))) { + /* Ask again the controller status */ + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SYS_STATUS]; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) { + dev_err(dev, "Failed to get device status\n"); + return ret; + } + } + + if (buf.key != 0x03 || buf.sub[0] & 0x01) { + dev_err(dev, "PSE controller error\n"); + return -EIO; + } + if (buf.sub[0] & 0x02) { + dev_err(dev, "PSE firmware error. Please update it.\n"); + priv->fw_state = PD692X0_FW_BROKEN; + } else { + ver = pd692x0_get_sw_version(priv); + dev_info(&client->dev, "Software version %d.%02d.%d.%d\n", + ver.prod, ver.maj_sw_ver, ver.min_sw_ver, + ver.pa_sw_ver); + + if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) { + dev_err(dev, "Too old firmware version. Please update it\n"); + priv->fw_state = PD692X0_FW_NEED_UPDATE; + } else { + priv->fw_state = PD692X0_FW_OK; + } + } + + priv->np = dev->of_node; + priv->pcdev.nr_lines = PD692X0_MAX_PIS; + priv->pcdev.owner = THIS_MODULE; + priv->pcdev.ops = &pd692x0_ops; + priv->pcdev.dev = dev; + priv->pcdev.types = ETHTOOL_PSE_C33; + ret = devm_pse_controller_register(dev, &priv->pcdev); + if (ret) + return dev_err_probe(dev, ret, + "failed to register PSE controller\n"); + + fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), + &pd692x0_fw_ops, priv); + if (IS_ERR(fwl)) + return dev_err_probe(dev, PTR_ERR(fwl), + "failed to register to the Firmware Upload API\n"); + priv->fwl = fwl; + + return 0; +} + +static void pd692x0_i2c_remove(struct i2c_client *client) +{ + struct pd692x0_priv *priv = i2c_get_clientdata(client); + + firmware_upload_unregister(priv->fwl); +} + +static const struct i2c_device_id pd692x0_id[] = { + { PD692X0_PSE_NAME, 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, pd692x0_id); + +static const struct of_device_id pd692x0_of_match[] = { + { .compatible = "microchip,pd69200", }, + { .compatible = "microchip,pd69210", }, + { .compatible = "microchip,pd69220", }, + { }, +}; +MODULE_DEVICE_TABLE(of, pd692x0_of_match); + +static struct i2c_driver pd692x0_driver = { + .probe = pd692x0_i2c_probe, + .remove = pd692x0_i2c_remove, + .id_table = pd692x0_id, + .driver = { + .name = PD692X0_PSE_NAME, + .of_match_table = pd692x0_of_match, + }, +}; +module_i2c_driver(pd692x0_driver); + +MODULE_AUTHOR("Kory Maincent <[email protected]>"); +MODULE_DESCRIPTION("Microchip PD692x0 PoE PSE Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 146b81f08a89..795ab264eaf2 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -8,6 +8,8 @@ #include <linux/device.h> #include <linux/of.h> #include <linux/pse-pd/pse.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> static DEFINE_MUTEX(pse_list_mutex); static LIST_HEAD(pse_controller_list); @@ -16,67 +18,357 @@ static LIST_HEAD(pse_controller_list); * struct pse_control - a PSE control * @pcdev: a pointer to the PSE controller device * this PSE control belongs to + * @ps: PSE PI supply of the PSE control * @list: list entry for the pcdev's PSE controller list * @id: ID of the PSE line in the PSE controller device * @refcnt: Number of gets of this pse_control */ struct pse_control { struct pse_controller_dev *pcdev; + struct regulator *ps; struct list_head list; unsigned int id; struct kref refcnt; }; +static int of_load_single_pse_pi_pairset(struct device_node *node, + struct pse_pi *pi, + int pairset_num) +{ + struct device_node *pairset_np; + const char *name; + int ret; + + ret = of_property_read_string_index(node, "pairset-names", + pairset_num, &name); + if (ret) + return ret; + + if (!strcmp(name, "alternative-a")) { + pi->pairset[pairset_num].pinout = ALTERNATIVE_A; + } else if (!strcmp(name, "alternative-b")) { + pi->pairset[pairset_num].pinout = ALTERNATIVE_B; + } else { + pr_err("pse: wrong pairset-names value %s (%pOF)\n", + name, node); + return -EINVAL; + } + + pairset_np = of_parse_phandle(node, "pairsets", pairset_num); + if (!pairset_np) + return -ENODEV; + + pi->pairset[pairset_num].np = pairset_np; + + return 0; +} + /** - * of_pse_zero_xlate - dummy function for controllers with one only control - * @pcdev: a pointer to the PSE controller device - * @pse_spec: PSE line specifier as found in the device tree + * of_load_pse_pi_pairsets - load PSE PI pairsets pinout and polarity + * @node: a pointer of the device node + * @pi: a pointer of the PSE PI to fill + * @npairsets: the number of pairsets (1 or 2) used by the PI * - * This static translation function is used by default if of_xlate in - * :c:type:`pse_controller_dev` is not set. It is useful for all PSE - * controllers with #pse-cells = <0>. + * Return: 0 on success and failure value on error */ -static int of_pse_zero_xlate(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec) +static int of_load_pse_pi_pairsets(struct device_node *node, + struct pse_pi *pi, + int npairsets) { - return 0; + int i, ret; + + ret = of_property_count_strings(node, "pairset-names"); + if (ret != npairsets) { + pr_err("pse: amount of pairsets and pairset-names is not equal %d != %d (%pOF)\n", + npairsets, ret, node); + return -EINVAL; + } + + for (i = 0; i < npairsets; i++) { + ret = of_load_single_pse_pi_pairset(node, pi, i); + if (ret) + goto out; + } + + if (npairsets == 2 && + pi->pairset[0].pinout == pi->pairset[1].pinout) { + pr_err("pse: two PI pairsets can not have identical pinout (%pOF)", + node); + ret = -EINVAL; + } + +out: + /* If an error appears, release all the pairset device node kref */ + if (ret) { + of_node_put(pi->pairset[0].np); + pi->pairset[0].np = NULL; + of_node_put(pi->pairset[1].np); + pi->pairset[1].np = NULL; + } + + return ret; +} + +static void pse_release_pis(struct pse_controller_dev *pcdev) +{ + int i; + + for (i = 0; i <= pcdev->nr_lines; i++) { + of_node_put(pcdev->pi[i].pairset[0].np); + of_node_put(pcdev->pi[i].pairset[1].np); + of_node_put(pcdev->pi[i].np); + } + kfree(pcdev->pi); } /** - * of_pse_simple_xlate - translate pse_spec to the PSE line number + * of_load_pse_pis - load all the PSE PIs * @pcdev: a pointer to the PSE controller device - * @pse_spec: PSE line specifier as found in the device tree * - * This static translation function is used by default if of_xlate in - * :c:type:`pse_controller_dev` is not set. It is useful for all PSE - * controllers with 1:1 mapping, where PSE lines can be indexed by number - * without gaps. + * Return: 0 on success and failure value on error */ -static int of_pse_simple_xlate(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec) +static int of_load_pse_pis(struct pse_controller_dev *pcdev) { - if (pse_spec->args[0] >= pcdev->nr_lines) - return -EINVAL; + struct device_node *np = pcdev->dev->of_node; + struct device_node *node, *pis; + int ret; - return pse_spec->args[0]; + if (!np) + return -ENODEV; + + pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL); + if (!pcdev->pi) + return -ENOMEM; + + pis = of_get_child_by_name(np, "pse-pis"); + if (!pis) { + /* no description of PSE PIs */ + pcdev->no_of_pse_pi = true; + return 0; + } + + for_each_child_of_node(pis, node) { + struct pse_pi pi = {0}; + u32 id; + + if (!of_node_name_eq(node, "pse-pi")) + continue; + + ret = of_property_read_u32(node, "reg", &id); + if (ret) { + dev_err(pcdev->dev, + "can't get reg property for node '%pOF'", + node); + goto out; + } + + if (id >= pcdev->nr_lines) { + dev_err(pcdev->dev, + "reg value (%u) is out of range (%u) (%pOF)\n", + id, pcdev->nr_lines, node); + ret = -EINVAL; + goto out; + } + + if (pcdev->pi[id].np) { + dev_err(pcdev->dev, + "other node with same reg value was already registered. %pOF : %pOF\n", + pcdev->pi[id].np, node); + ret = -EINVAL; + goto out; + } + + ret = of_count_phandle_with_args(node, "pairsets", NULL); + /* npairsets is limited to value one or two */ + if (ret == 1 || ret == 2) { + ret = of_load_pse_pi_pairsets(node, &pi, ret); + if (ret) + goto out; + } else if (ret != ENOENT) { + dev_err(pcdev->dev, + "error: wrong number of pairsets. Should be 1 or 2, got %d (%pOF)\n", + ret, node); + ret = -EINVAL; + goto out; + } + + of_node_get(node); + pi.np = node; + memcpy(&pcdev->pi[id], &pi, sizeof(pi)); + } + + of_node_put(pis); + return 0; + +out: + pse_release_pis(pcdev); + of_node_put(node); + of_node_put(pis); + return ret; +} + +static int pse_pi_is_enabled(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_is_enabled) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_is_enabled(pcdev, id); + mutex_unlock(&pcdev->lock); + + return ret; +} + +static int pse_pi_enable(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_enable) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_enable(pcdev, id); + if (!ret) + pcdev->pi[id].admin_state_enabled = 1; + mutex_unlock(&pcdev->lock); + + return ret; +} + +static int pse_pi_disable(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_disable) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_disable(pcdev, id); + if (!ret) + pcdev->pi[id].admin_state_enabled = 0; + mutex_unlock(&pcdev->lock); + + return ret; +} + +static const struct regulator_ops pse_pi_ops = { + .is_enabled = pse_pi_is_enabled, + .enable = pse_pi_enable, + .disable = pse_pi_disable, +}; + +static int +devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev, + char *name, int id) +{ + struct regulator_init_data *rinit_data; + struct regulator_config rconfig = {0}; + struct regulator_desc *rdesc; + struct regulator_dev *rdev; + + rinit_data = devm_kzalloc(pcdev->dev, sizeof(*rinit_data), + GFP_KERNEL); + if (!rinit_data) + return -ENOMEM; + + rdesc = devm_kzalloc(pcdev->dev, sizeof(*rdesc), GFP_KERNEL); + if (!rdesc) + return -ENOMEM; + + /* Regulator descriptor id have to be the same as its associated + * PSE PI id for the well functioning of the PSE controls. + */ + rdesc->id = id; + rdesc->name = name; + rdesc->type = REGULATOR_VOLTAGE; + rdesc->ops = &pse_pi_ops; + rdesc->owner = pcdev->owner; + + rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; + rinit_data->supply_regulator = "vpwr"; + + rconfig.dev = pcdev->dev; + rconfig.driver_data = pcdev; + rconfig.init_data = rinit_data; + + rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig); + if (IS_ERR(rdev)) { + dev_err_probe(pcdev->dev, PTR_ERR(rdev), + "Failed to register regulator\n"); + return PTR_ERR(rdev); + } + + pcdev->pi[id].rdev = rdev; + + return 0; } /** * pse_controller_register - register a PSE controller device * @pcdev: a pointer to the initialized PSE controller device + * + * Return: 0 on success and failure value on error */ int pse_controller_register(struct pse_controller_dev *pcdev) { - if (!pcdev->of_xlate) { - if (pcdev->of_pse_n_cells == 0) - pcdev->of_xlate = of_pse_zero_xlate; - else if (pcdev->of_pse_n_cells == 1) - pcdev->of_xlate = of_pse_simple_xlate; - } + size_t reg_name_len; + int ret, i; mutex_init(&pcdev->lock); INIT_LIST_HEAD(&pcdev->pse_control_head); + if (!pcdev->nr_lines) + pcdev->nr_lines = 1; + + ret = of_load_pse_pis(pcdev); + if (ret) + return ret; + + if (pcdev->ops->setup_pi_matrix) { + ret = pcdev->ops->setup_pi_matrix(pcdev); + if (ret) + return ret; + } + + /* Each regulator name len is pcdev dev name + 7 char + + * int max digit number (10) + 1 + */ + reg_name_len = strlen(dev_name(pcdev->dev)) + 18; + + /* Register PI regulators */ + for (i = 0; i < pcdev->nr_lines; i++) { + char *reg_name; + + /* Do not register regulator for PIs not described */ + if (!pcdev->no_of_pse_pi && !pcdev->pi[i].np) + continue; + + reg_name = devm_kzalloc(pcdev->dev, reg_name_len, GFP_KERNEL); + if (!reg_name) + return -ENOMEM; + + snprintf(reg_name, reg_name_len, "pse-%s_pi%d", + dev_name(pcdev->dev), i); + + ret = devm_pse_pi_regulator_register(pcdev, reg_name, i); + if (ret) + return ret; + } + mutex_lock(&pse_list_mutex); list_add(&pcdev->list, &pse_controller_list); mutex_unlock(&pse_list_mutex); @@ -91,6 +383,7 @@ EXPORT_SYMBOL_GPL(pse_controller_register); */ void pse_controller_unregister(struct pse_controller_dev *pcdev) { + pse_release_pis(pcdev); mutex_lock(&pse_list_mutex); list_del(&pcdev->list); mutex_unlock(&pse_list_mutex); @@ -110,6 +403,8 @@ static void devm_pse_controller_release(struct device *dev, void *res) * Managed pse_controller_register(). For PSE controllers registered by * this function, pse_controller_unregister() is automatically called on * driver detach. See pse_controller_register() for more information. + * + * Return: 0 on success and failure value on error */ int devm_pse_controller_register(struct device *dev, struct pse_controller_dev *pcdev) @@ -144,6 +439,10 @@ static void __pse_control_release(struct kref *kref) lockdep_assert_held(&pse_list_mutex); + if (psec->pcdev->pi[psec->id].admin_state_enabled) + regulator_disable(psec->ps); + devm_regulator_put(psec->ps); + module_put(psec->pcdev->owner); list_del(&psec->list); @@ -176,6 +475,7 @@ static struct pse_control * pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) { struct pse_control *psec; + int ret; lockdep_assert_held(&pse_list_mutex); @@ -191,20 +491,82 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) return ERR_PTR(-ENOMEM); if (!try_module_get(pcdev->owner)) { - kfree(psec); - return ERR_PTR(-ENODEV); + ret = -ENODEV; + goto free_psec; } + psec->ps = devm_regulator_get_exclusive(pcdev->dev, + rdev_get_name(pcdev->pi[index].rdev)); + if (IS_ERR(psec->ps)) { + ret = PTR_ERR(psec->ps); + goto put_module; + } + + ret = regulator_is_enabled(psec->ps); + if (ret < 0) + goto regulator_put; + + pcdev->pi[index].admin_state_enabled = ret; + psec->pcdev = pcdev; list_add(&psec->list, &pcdev->pse_control_head); psec->id = index; kref_init(&psec->refcnt); return psec; + +regulator_put: + devm_regulator_put(psec->ps); +put_module: + module_put(pcdev->owner); +free_psec: + kfree(psec); + + return ERR_PTR(ret); +} + +/** + * of_pse_match_pi - Find the PSE PI id matching the device node phandle + * @pcdev: a pointer to the PSE controller device + * @np: a pointer to the device node + * + * Return: id of the PSE PI, -EINVAL if not found + */ +static int of_pse_match_pi(struct pse_controller_dev *pcdev, + struct device_node *np) +{ + int i; + + for (i = 0; i <= pcdev->nr_lines; i++) { + if (pcdev->pi[i].np == np) + return i; + } + + return -EINVAL; +} + +/** + * psec_id_xlate - translate pse_spec to the PSE line number according + * to the number of pse-cells in case of no pse_pi node + * @pcdev: a pointer to the PSE controller device + * @pse_spec: PSE line specifier as found in the device tree + * + * Return: 0 if #pse-cells = <0>. Return PSE line number otherwise. + */ +static int psec_id_xlate(struct pse_controller_dev *pcdev, + const struct of_phandle_args *pse_spec) +{ + if (!pcdev->of_pse_n_cells) + return 0; + + if (pcdev->of_pse_n_cells > 1 || + pse_spec->args[0] >= pcdev->nr_lines) + return -EINVAL; + + return pse_spec->args[0]; } -struct pse_control * -of_pse_control_get(struct device_node *node) +struct pse_control *of_pse_control_get(struct device_node *node) { struct pse_controller_dev *r, *pcdev; struct of_phandle_args args; @@ -222,7 +584,14 @@ of_pse_control_get(struct device_node *node) mutex_lock(&pse_list_mutex); pcdev = NULL; list_for_each_entry(r, &pse_controller_list, list) { - if (args.np == r->dev->of_node) { + if (!r->no_of_pse_pi) { + ret = of_pse_match_pi(r, args.np); + if (ret >= 0) { + pcdev = r; + psec_id = ret; + break; + } + } else if (args.np == r->dev->of_node) { pcdev = r; break; } @@ -238,10 +607,12 @@ of_pse_control_get(struct device_node *node) goto out; } - psec_id = pcdev->of_xlate(pcdev, &args); - if (psec_id < 0) { - psec = ERR_PTR(psec_id); - goto out; + if (pcdev->no_of_pse_pi) { + psec_id = psec_id_xlate(pcdev, &args); + if (psec_id < 0) { + psec = ERR_PTR(psec_id); + goto out; + } } /* pse_list_mutex also protects the pcdev's pse_control list */ @@ -260,6 +631,8 @@ EXPORT_SYMBOL_GPL(of_pse_control_get); * @psec: PSE control pointer * @extack: extack for reporting useful error messages * @status: struct to store PSE status + * + * Return: 0 on success and failure value on error */ int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, @@ -284,31 +657,89 @@ int pse_ethtool_get_status(struct pse_control *psec, } EXPORT_SYMBOL_GPL(pse_ethtool_get_status); +static int pse_ethtool_c33_set_config(struct pse_control *psec, + const struct pse_control_config *config) +{ + int err = 0; + + /* Look at admin_state_enabled status to not call regulator_enable + * or regulator_disable twice creating a regulator counter mismatch + */ + switch (config->c33_admin_control) { + case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: + if (!psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_enable(psec->ps); + break; + case ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED: + if (psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_disable(psec->ps); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int pse_ethtool_podl_set_config(struct pse_control *psec, + const struct pse_control_config *config) +{ + int err = 0; + + /* Look at admin_state_enabled status to not call regulator_enable + * or regulator_disable twice creating a regulator counter mismatch + */ + switch (config->podl_admin_control) { + case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: + if (!psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_enable(psec->ps); + break; + case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: + if (psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_disable(psec->ps); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + /** * pse_ethtool_set_config - set PSE control configuration * @psec: PSE control pointer * @extack: extack for reporting useful error messages * @config: Configuration of the test to run + * + * Return: 0 on success and failure value on error */ int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config) { - const struct pse_controller_ops *ops; - int err; - - ops = psec->pcdev->ops; + int err = 0; - if (!ops->ethtool_set_config) { - NL_SET_ERR_MSG(extack, - "PSE driver does not configuration"); - return -EOPNOTSUPP; + if (pse_has_c33(psec)) { + err = pse_ethtool_c33_set_config(psec, config); + if (err) + return err; } - mutex_lock(&psec->pcdev->lock); - err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config); - mutex_unlock(&psec->pcdev->lock); + if (pse_has_podl(psec)) + err = pse_ethtool_podl_set_config(psec, config); return err; } EXPORT_SYMBOL_GPL(pse_ethtool_set_config); + +bool pse_has_podl(struct pse_control *psec) +{ + return psec->pcdev->types & ETHTOOL_PSE_PODL; +} +EXPORT_SYMBOL_GPL(pse_has_podl); + +bool pse_has_c33(struct pse_control *psec) +{ + return psec->pcdev->types & ETHTOOL_PSE_C33; +} +EXPORT_SYMBOL_GPL(pse_has_c33); diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c index e2bf8306ca90..64ab36974fe0 100644 --- a/drivers/net/pse-pd/pse_regulator.c +++ b/drivers/net/pse-pd/pse_regulator.c @@ -24,38 +24,42 @@ static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev) } static int -pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id, - struct netlink_ext_ack *extack, - const struct pse_control_config *config) +pse_reg_pi_enable(struct pse_controller_dev *pcdev, int id) { struct pse_reg_priv *priv = to_pse_reg(pcdev); int ret; - if (priv->admin_state == config->admin_cotrol) - return 0; - - switch (config->admin_cotrol) { - case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: - ret = regulator_enable(priv->ps); - break; - case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: - ret = regulator_disable(priv->ps); - break; - default: - dev_err(pcdev->dev, "Unknown admin state %i\n", - config->admin_cotrol); - ret = -ENOTSUPP; - } - + ret = regulator_enable(priv->ps); if (ret) return ret; - priv->admin_state = config->admin_cotrol; + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED; + return 0; +} + +static int +pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + int ret; + ret = regulator_disable(priv->ps); + if (ret) + return ret; + + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED; return 0; } static int +pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + + return regulator_is_enabled(priv->ps); +} + +static int pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, struct pse_control_status *status) @@ -80,7 +84,9 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, static const struct pse_controller_ops pse_reg_ops = { .ethtool_get_status = pse_reg_ethtool_get_status, - .ethtool_set_config = pse_reg_ethtool_set_config, + .pi_enable = pse_reg_pi_enable, + .pi_is_enabled = pse_reg_pi_is_enabled, + .pi_disable = pse_reg_pi_disable, }; static int @@ -116,6 +122,7 @@ pse_reg_probe(struct platform_device *pdev) priv->pcdev.owner = THIS_MODULE; priv->pcdev.ops = &pse_reg_ops; priv->pcdev.dev = dev; + priv->pcdev.types = ETHTOOL_PSE_PODL; ret = devm_pse_controller_register(dev, &priv->pcdev); if (ret) { dev_err(dev, "failed to register PSE controller (%pe)\n", diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c new file mode 100644 index 000000000000..98ffbb1bbf13 --- /dev/null +++ b/drivers/net/pse-pd/tps23881.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the TI TPS23881 PoE PSE Controller driver (I2C bus) + * + * Copyright (c) 2023 Bootlin, Kory Maincent <[email protected]> + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pse-pd/pse.h> + +#define TPS23881_MAX_CHANS 8 + +#define TPS23881_REG_PW_STATUS 0x10 +#define TPS23881_REG_OP_MODE 0x12 +#define TPS23881_OP_MODE_SEMIAUTO 0xaaaa +#define TPS23881_REG_DIS_EN 0x13 +#define TPS23881_REG_DET_CLA_EN 0x14 +#define TPS23881_REG_GEN_MASK 0x17 +#define TPS23881_REG_NBITACC BIT(5) +#define TPS23881_REG_PW_EN 0x19 +#define TPS23881_REG_PORT_MAP 0x26 +#define TPS23881_REG_PORT_POWER 0x29 +#define TPS23881_REG_POEPLUS 0x40 +#define TPS23881_REG_TPON BIT(0) +#define TPS23881_REG_FWREV 0x41 +#define TPS23881_REG_DEVID 0x43 +#define TPS23881_REG_SRAM_CTRL 0x60 +#define TPS23881_REG_SRAM_DATA 0x61 + +struct tps23881_port_desc { + u8 chan[2]; + bool is_4p; +}; + +struct tps23881_priv { + struct i2c_client *client; + struct pse_controller_dev pcdev; + struct device_node *np; + struct tps23881_port_desc port[TPS23881_MAX_CHANS]; +}; + +static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev) +{ + return container_of(pcdev, struct tps23881_priv, pcdev); +} + +static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + u8 chan; + u16 val; + int ret; + + if (id >= TPS23881_MAX_CHANS) + return -ERANGE; + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS); + if (ret < 0) + return ret; + + chan = priv->port[id].chan[0]; + if (chan < 4) + val = (u16)(ret | BIT(chan)); + else + val = (u16)(ret | BIT(chan + 4)); + + if (priv->port[id].is_4p) { + chan = priv->port[id].chan[1]; + if (chan < 4) + val |= BIT(chan); + else + val |= BIT(chan + 4); + } + + ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val); + if (ret) + return ret; + + return 0; +} + +static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + u8 chan; + u16 val; + int ret; + + if (id >= TPS23881_MAX_CHANS) + return -ERANGE; + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS); + if (ret < 0) + return ret; + + chan = priv->port[id].chan[0]; + if (chan < 4) + val = (u16)(ret | BIT(chan + 4)); + else + val = (u16)(ret | BIT(chan + 8)); + + if (priv->port[id].is_4p) { + chan = priv->port[id].chan[1]; + if (chan < 4) + val |= BIT(chan + 4); + else + val |= BIT(chan + 8); + } + + ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val); + if (ret) + return ret; + + return 0; +} + +static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + bool enabled; + u8 chan; + int ret; + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS); + if (ret < 0) + return ret; + + chan = priv->port[id].chan[0]; + if (chan < 4) + enabled = ret & BIT(chan); + else + enabled = ret & BIT(chan + 4); + + if (priv->port[id].is_4p) { + chan = priv->port[id].chan[1]; + if (chan < 4) + enabled &= !!(ret & BIT(chan)); + else + enabled &= !!(ret & BIT(chan + 4)); + } + + /* Return enabled status only if both channel are on this state */ + return enabled; +} + +static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev, + unsigned long id, + struct netlink_ext_ack *extack, + struct pse_control_status *status) +{ + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + struct i2c_client *client = priv->client; + bool enabled, delivering; + u8 chan; + int ret; + + ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS); + if (ret < 0) + return ret; + + chan = priv->port[id].chan[0]; + if (chan < 4) { + enabled = ret & BIT(chan); + delivering = ret & BIT(chan + 4); + } else { + enabled = ret & BIT(chan + 4); + delivering = ret & BIT(chan + 8); + } + + if (priv->port[id].is_4p) { + chan = priv->port[id].chan[1]; + if (chan < 4) { + enabled &= !!(ret & BIT(chan)); + delivering &= !!(ret & BIT(chan + 4)); + } else { + enabled &= !!(ret & BIT(chan + 4)); + delivering &= !!(ret & BIT(chan + 8)); + } + } + + /* Return delivering status only if both channel are on this state */ + if (delivering) + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING; + else + status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED; + + /* Return enabled status only if both channel are on this state */ + if (enabled) + status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED; + else + status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED; + + return 0; +} + +/* Parse managers subnode into a array of device node */ +static int +tps23881_get_of_channels(struct tps23881_priv *priv, + struct device_node *chan_node[TPS23881_MAX_CHANS]) +{ + struct device_node *channels_node, *node; + int i, ret; + + if (!priv->np) + return -EINVAL; + + channels_node = of_find_node_by_name(priv->np, "channels"); + if (!channels_node) + return -EINVAL; + + for_each_child_of_node(channels_node, node) { + u32 chan_id; + + if (!of_node_name_eq(node, "channel")) + continue; + + ret = of_property_read_u32(node, "reg", &chan_id); + if (ret) { + ret = -EINVAL; + goto out; + } + + if (chan_id >= TPS23881_MAX_CHANS || chan_node[chan_id]) { + dev_err(&priv->client->dev, + "wrong number of port (%d)\n", chan_id); + ret = -EINVAL; + goto out; + } + + of_node_get(node); + chan_node[chan_id] = node; + } + + of_node_put(channels_node); + return 0; + +out: + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + of_node_put(chan_node[i]); + chan_node[i] = NULL; + } + + of_node_put(node); + of_node_put(channels_node); + return ret; +} + +struct tps23881_port_matrix { + u8 pi_id; + u8 lgcl_chan[2]; + u8 hw_chan[2]; + bool is_4p; + bool exist; +}; + +static int +tps23881_match_channel(const struct pse_pi_pairset *pairset, + struct device_node *chan_node[TPS23881_MAX_CHANS]) +{ + int i; + + /* Look on every channels */ + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (pairset->np == chan_node[i]) + return i; + } + + return -ENODEV; +} + +static bool +tps23881_is_chan_free(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS], + int chan) +{ + int i; + + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (port_matrix[i].exist && + (port_matrix[i].hw_chan[0] == chan || + port_matrix[i].hw_chan[1] == chan)) + return false; + } + + return true; +} + +/* Fill port matrix with the matching channels */ +static int +tps23881_match_port_matrix(struct pse_pi *pi, int pi_id, + struct device_node *chan_node[TPS23881_MAX_CHANS], + struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS]) +{ + int ret; + + if (!pi->pairset[0].np) + return 0; + + ret = tps23881_match_channel(&pi->pairset[0], chan_node); + if (ret < 0) + return ret; + + if (!tps23881_is_chan_free(port_matrix, ret)) { + pr_err("tps23881: channel %d already used\n", ret); + return -ENODEV; + } + + port_matrix[pi_id].hw_chan[0] = ret; + port_matrix[pi_id].exist = true; + + if (!pi->pairset[1].np) + return 0; + + ret = tps23881_match_channel(&pi->pairset[1], chan_node); + if (ret < 0) + return ret; + + if (!tps23881_is_chan_free(port_matrix, ret)) { + pr_err("tps23881: channel %d already used\n", ret); + return -ENODEV; + } + + if (port_matrix[pi_id].hw_chan[0] / 4 != ret / 4) { + pr_err("tps23881: 4-pair PSE can only be set within the same 4 ports group"); + return -ENODEV; + } + + port_matrix[pi_id].hw_chan[1] = ret; + port_matrix[pi_id].is_4p = true; + + return 0; +} + +static int +tps23881_get_unused_chan(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS], + int port_cnt) +{ + bool used; + int i, j; + + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + used = false; + + for (j = 0; j < port_cnt; j++) { + if (port_matrix[j].hw_chan[0] == i) { + used = true; + break; + } + + if (port_matrix[j].is_4p && + port_matrix[j].hw_chan[1] == i) { + used = true; + break; + } + } + + if (!used) + return i; + } + + return -ENODEV; +} + +/* Sort the port matrix to following particular hardware ports matrix + * specification of the tps23881. The device has two 4-ports groups and + * each 4-pair powered device has to be configured to use two consecutive + * logical channel in each 4 ports group (1 and 2 or 3 and 4). Also the + * hardware matrix has to be fully configured even with unused chan to be + * valid. + */ +static int +tps23881_sort_port_matrix(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS]) +{ + struct tps23881_port_matrix tmp_port_matrix[TPS23881_MAX_CHANS] = {0}; + int i, ret, port_cnt = 0, cnt_4ch_grp1 = 0, cnt_4ch_grp2 = 4; + + /* Configure 4p port matrix */ + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + int *cnt; + + if (!port_matrix[i].exist || !port_matrix[i].is_4p) + continue; + + if (port_matrix[i].hw_chan[0] < 4) + cnt = &cnt_4ch_grp1; + else + cnt = &cnt_4ch_grp2; + + tmp_port_matrix[port_cnt].exist = true; + tmp_port_matrix[port_cnt].is_4p = true; + tmp_port_matrix[port_cnt].pi_id = i; + tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0]; + tmp_port_matrix[port_cnt].hw_chan[1] = port_matrix[i].hw_chan[1]; + + /* 4-pair ports have to be configured with consecutive + * logical channels 0 and 1, 2 and 3. + */ + tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++; + tmp_port_matrix[port_cnt].lgcl_chan[1] = (*cnt)++; + + port_cnt++; + } + + /* Configure 2p port matrix */ + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + int *cnt; + + if (!port_matrix[i].exist || port_matrix[i].is_4p) + continue; + + if (port_matrix[i].hw_chan[0] < 4) + cnt = &cnt_4ch_grp1; + else + cnt = &cnt_4ch_grp2; + + tmp_port_matrix[port_cnt].exist = true; + tmp_port_matrix[port_cnt].pi_id = i; + tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++; + tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0]; + + port_cnt++; + } + + /* Complete the rest of the first 4 port group matrix even if + * channels are unused + */ + while (cnt_4ch_grp1 < 4) { + ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt); + if (ret < 0) { + pr_err("tps23881: port matrix issue, no chan available\n"); + return ret; + } + + if (port_cnt >= TPS23881_MAX_CHANS) { + pr_err("tps23881: wrong number of channels\n"); + return -ENODEV; + } + tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp1; + tmp_port_matrix[port_cnt].hw_chan[0] = ret; + cnt_4ch_grp1++; + port_cnt++; + } + + /* Complete the rest of the second 4 port group matrix even if + * channels are unused + */ + while (cnt_4ch_grp2 < 8) { + ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt); + if (ret < 0) { + pr_err("tps23881: port matrix issue, no chan available\n"); + return -ENODEV; + } + + if (port_cnt >= TPS23881_MAX_CHANS) { + pr_err("tps23881: wrong number of channels\n"); + return -ENODEV; + } + tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp2; + tmp_port_matrix[port_cnt].hw_chan[0] = ret; + cnt_4ch_grp2++; + port_cnt++; + } + + memcpy(port_matrix, tmp_port_matrix, sizeof(tmp_port_matrix)); + + return port_cnt; +} + +/* Write port matrix to the hardware port matrix and the software port + * matrix. + */ +static int +tps23881_write_port_matrix(struct tps23881_priv *priv, + struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS], + int port_cnt) +{ + struct i2c_client *client = priv->client; + u8 pi_id, lgcl_chan, hw_chan; + u16 val = 0; + int i, ret; + + for (i = 0; i < port_cnt; i++) { + pi_id = port_matrix[i].pi_id; + lgcl_chan = port_matrix[i].lgcl_chan[0]; + hw_chan = port_matrix[i].hw_chan[0] % 4; + + /* Set software port matrix for existing ports */ + if (port_matrix[i].exist) + priv->port[pi_id].chan[0] = lgcl_chan; + + /* Set hardware port matrix for all ports */ + val |= hw_chan << (lgcl_chan * 2); + + if (!port_matrix[i].is_4p) + continue; + + lgcl_chan = port_matrix[i].lgcl_chan[1]; + hw_chan = port_matrix[i].hw_chan[1] % 4; + + /* Set software port matrix for existing ports */ + if (port_matrix[i].exist) { + priv->port[pi_id].is_4p = true; + priv->port[pi_id].chan[1] = lgcl_chan; + } + + /* Set hardware port matrix for all ports */ + val |= hw_chan << (lgcl_chan * 2); + } + + /* Write hardware ports matrix */ + ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val); + if (ret) + return ret; + + return 0; +} + +static int +tps23881_set_ports_conf(struct tps23881_priv *priv, + struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS]) +{ + struct i2c_client *client = priv->client; + int i, ret; + u16 val; + + /* Set operating mode */ + ret = i2c_smbus_write_word_data(client, TPS23881_REG_OP_MODE, + TPS23881_OP_MODE_SEMIAUTO); + if (ret) + return ret; + + /* Disable DC disconnect */ + ret = i2c_smbus_write_word_data(client, TPS23881_REG_DIS_EN, 0x0); + if (ret) + return ret; + + /* Set port power allocation */ + val = 0; + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (!port_matrix[i].exist) + continue; + + if (port_matrix[i].is_4p) + val |= 0xf << ((port_matrix[i].lgcl_chan[0] / 2) * 4); + else + val |= 0x3 << ((port_matrix[i].lgcl_chan[0] / 2) * 4); + } + ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_POWER, val); + if (ret) + return ret; + + /* Enable detection and classification */ + val = 0; + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + if (!port_matrix[i].exist) + continue; + + val |= BIT(port_matrix[i].lgcl_chan[0]) | + BIT(port_matrix[i].lgcl_chan[0] + 4); + if (port_matrix[i].is_4p) + val |= BIT(port_matrix[i].lgcl_chan[1]) | + BIT(port_matrix[i].lgcl_chan[1] + 4); + } + ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val); + if (ret) + return ret; + + return 0; +} + +static int +tps23881_set_ports_matrix(struct tps23881_priv *priv, + struct device_node *chan_node[TPS23881_MAX_CHANS]) +{ + struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS] = {0}; + int i, ret; + + /* Update with values for every PSE PIs */ + for (i = 0; i < TPS23881_MAX_CHANS; i++) { + ret = tps23881_match_port_matrix(&priv->pcdev.pi[i], i, + chan_node, port_matrix); + if (ret) + return ret; + } + + ret = tps23881_sort_port_matrix(port_matrix); + if (ret < 0) + return ret; + + ret = tps23881_write_port_matrix(priv, port_matrix, ret); + if (ret) + return ret; + + ret = tps23881_set_ports_conf(priv, port_matrix); + if (ret) + return ret; + + return 0; +} + +static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev) +{ + struct device_node *chan_node[TPS23881_MAX_CHANS] = {NULL}; + struct tps23881_priv *priv = to_tps23881_priv(pcdev); + int ret, i; + + ret = tps23881_get_of_channels(priv, chan_node); + if (ret < 0) { + dev_warn(&priv->client->dev, + "Unable to parse port-matrix, default matrix will be used\n"); + return 0; + } + + ret = tps23881_set_ports_matrix(priv, chan_node); + + for (i = 0; i < TPS23881_MAX_CHANS; i++) + of_node_put(chan_node[i]); + + return ret; +} + +static const struct pse_controller_ops tps23881_ops = { + .setup_pi_matrix = tps23881_setup_pi_matrix, + .pi_enable = tps23881_pi_enable, + .pi_disable = tps23881_pi_disable, + .pi_is_enabled = tps23881_pi_is_enabled, + .ethtool_get_status = tps23881_ethtool_get_status, +}; + +static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin"; +static const char fw_sram_name[] = "ti/tps23881/tps23881-sram-14.bin"; + +struct tps23881_fw_conf { + u8 reg; + u8 val; +}; + +static const struct tps23881_fw_conf tps23881_fw_parity_conf[] = { + {.reg = 0x60, .val = 0x01}, + {.reg = 0x62, .val = 0x00}, + {.reg = 0x63, .val = 0x80}, + {.reg = 0x60, .val = 0xC4}, + {.reg = 0x1D, .val = 0xBC}, + {.reg = 0xD7, .val = 0x02}, + {.reg = 0x91, .val = 0x00}, + {.reg = 0x90, .val = 0x00}, + {.reg = 0xD7, .val = 0x00}, + {.reg = 0x1D, .val = 0x00}, + { /* sentinel */ } +}; + +static const struct tps23881_fw_conf tps23881_fw_sram_conf[] = { + {.reg = 0x60, .val = 0xC5}, + {.reg = 0x62, .val = 0x00}, + {.reg = 0x63, .val = 0x80}, + {.reg = 0x60, .val = 0xC0}, + {.reg = 0x1D, .val = 0xBC}, + {.reg = 0xD7, .val = 0x02}, + {.reg = 0x91, .val = 0x00}, + {.reg = 0x90, .val = 0x00}, + {.reg = 0xD7, .val = 0x00}, + {.reg = 0x1D, .val = 0x00}, + { /* sentinel */ } +}; + +static int tps23881_flash_sram_fw_part(struct i2c_client *client, + const char *fw_name, + const struct tps23881_fw_conf *fw_conf) +{ + const struct firmware *fw = NULL; + int i, ret; + + ret = request_firmware(&fw, fw_name, &client->dev); + if (ret) + return ret; + + dev_dbg(&client->dev, "Flashing %s\n", fw_name); + + /* Prepare device for RAM download */ + while (fw_conf->reg) { + ret = i2c_smbus_write_byte_data(client, fw_conf->reg, + fw_conf->val); + if (ret) + goto out; + + fw_conf++; + } + + /* Flash the firmware file */ + for (i = 0; i < fw->size; i++) { + ret = i2c_smbus_write_byte_data(client, + TPS23881_REG_SRAM_DATA, + fw->data[i]); + if (ret) + goto out; + } + +out: + release_firmware(fw); + return ret; +} + +static int tps23881_flash_sram_fw(struct i2c_client *client) +{ + int ret; + + ret = tps23881_flash_sram_fw_part(client, fw_parity_name, + tps23881_fw_parity_conf); + if (ret) + return ret; + + ret = tps23881_flash_sram_fw_part(client, fw_sram_name, + tps23881_fw_sram_conf); + if (ret) + return ret; + + ret = i2c_smbus_write_byte_data(client, TPS23881_REG_SRAM_CTRL, 0x18); + if (ret) + return ret; + + mdelay(12); + + return 0; +} + +static int tps23881_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct tps23881_priv *priv; + int ret; + u8 val; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { + dev_err(dev, "i2c check functionality failed\n"); + return -ENXIO; + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID); + if (ret < 0) + return ret; + + if (ret != 0x22) { + dev_err(dev, "Wrong device ID\n"); + return -ENXIO; + } + + ret = tps23881_flash_sram_fw(client); + if (ret < 0) + return ret; + + ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FWREV); + if (ret < 0) + return ret; + + dev_info(&client->dev, "Firmware revision 0x%x\n", ret); + + /* Set configuration B, 16 bit access on a single device address */ + ret = i2c_smbus_read_byte_data(client, TPS23881_REG_GEN_MASK); + if (ret < 0) + return ret; + + val = ret | TPS23881_REG_NBITACC; + ret = i2c_smbus_write_byte_data(client, TPS23881_REG_GEN_MASK, val); + if (ret) + return ret; + + priv->client = client; + i2c_set_clientdata(client, priv); + priv->np = dev->of_node; + + priv->pcdev.owner = THIS_MODULE; + priv->pcdev.ops = &tps23881_ops; + priv->pcdev.dev = dev; + priv->pcdev.types = ETHTOOL_PSE_C33; + priv->pcdev.nr_lines = TPS23881_MAX_CHANS; + ret = devm_pse_controller_register(dev, &priv->pcdev); + if (ret) { + return dev_err_probe(dev, ret, + "failed to register PSE controller\n"); + } + + return ret; +} + +static const struct i2c_device_id tps23881_id[] = { + { "tps23881", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, tps23881_id); + +static const struct of_device_id tps23881_of_match[] = { + { .compatible = "ti,tps23881", }, + { }, +}; +MODULE_DEVICE_TABLE(of, tps23881_of_match); + +static struct i2c_driver tps23881_driver = { + .probe = tps23881_i2c_probe, + .id_table = tps23881_id, + .driver = { + .name = "tps23881", + .of_match_table = tps23881_of_match, + }, +}; +module_i2c_driver(tps23881_driver); + +MODULE_AUTHOR("Kory Maincent <[email protected]>"); +MODULE_DESCRIPTION("TI TPS23881 PoE PSE Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 0aba3569ccc0..fb362ee248ff 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -286,7 +286,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu) } } sl->mtu = mtu; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); sl->buffsize = len; err = 0; diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 9f0495e8df4d..bfdd3875fe86 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; - uarg->callback(NULL, uarg, false); + uarg->ops->complete(NULL, uarg, false); } dev_queue_xmit(skb); diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile index f582d81a5091..7a5aa20d286b 100644 --- a/drivers/net/team/Makefile +++ b/drivers/net/team/Makefile @@ -3,6 +3,7 @@ # Makefile for the network team driver # +team-y:= team_core.o team_nl.o obj-$(CONFIG_NET_TEAM) += team.o obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o diff --git a/drivers/net/team/team.c b/drivers/net/team/team_core.c index 0a44bbdcfb7b..ab1935a4aa2c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team_core.c @@ -27,6 +27,8 @@ #include <net/sch_generic.h> #include <linux/if_team.h> +#include "team_nl.h" + #define DRV_NAME "team" @@ -1829,7 +1831,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu) team->port_mtu_change_allowed = false; mutex_unlock(&team->lock); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; @@ -2254,28 +2256,7 @@ static struct rtnl_link_ops team_link_ops __read_mostly = { static struct genl_family team_nl_family; -static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { - [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, - [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, - [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, - [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, -}; - -static const struct nla_policy -team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { - [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, - [TEAM_ATTR_OPTION_NAME] = { - .type = NLA_STRING, - .len = TEAM_STRING_MAX_LEN, - }, - [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, - [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, - [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, - [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 }, - [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 }, -}; - -static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) +int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr; @@ -2513,7 +2494,7 @@ errout: return err; } -static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) +int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info) { struct team *team; struct team_option_inst *opt_inst; @@ -2538,7 +2519,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) static int team_nl_send_event_options_get(struct team *team, struct list_head *sel_opt_inst_list); -static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) +int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err = 0; @@ -2579,7 +2560,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) err = nla_parse_nested_deprecated(opt_attrs, TEAM_ATTR_OPTION_MAX, nl_option, - team_nl_option_policy, + team_attr_option_nl_policy, info->extack); if (err) goto team_put; @@ -2802,8 +2783,8 @@ errout: return err; } -static int team_nl_cmd_port_list_get(struct sk_buff *skb, - struct genl_info *info) +int team_nl_port_list_get_doit(struct sk_buff *skb, + struct genl_info *info) { struct team *team; int err; @@ -2820,32 +2801,6 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb, return err; } -static const struct genl_small_ops team_nl_ops[] = { - { - .cmd = TEAM_CMD_NOOP, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = team_nl_cmd_noop, - }, - { - .cmd = TEAM_CMD_OPTIONS_SET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = team_nl_cmd_options_set, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = TEAM_CMD_OPTIONS_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = team_nl_cmd_options_get, - .flags = GENL_ADMIN_PERM, - }, - { - .cmd = TEAM_CMD_PORT_LIST_GET, - .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .doit = team_nl_cmd_port_list_get, - .flags = GENL_ADMIN_PERM, - }, -}; - static const struct genl_multicast_group team_nl_mcgrps[] = { { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; @@ -2853,7 +2808,7 @@ static const struct genl_multicast_group team_nl_mcgrps[] = { static struct genl_family team_nl_family __ro_after_init = { .name = TEAM_GENL_NAME, .version = TEAM_GENL_VERSION, - .maxattr = TEAM_ATTR_MAX, + .maxattr = ARRAY_SIZE(team_nl_policy) - 1, .policy = team_nl_policy, .netnsok = true, .module = THIS_MODULE, diff --git a/drivers/net/team/team_nl.c b/drivers/net/team/team_nl.c new file mode 100644 index 000000000000..208424ab78f5 --- /dev/null +++ b/drivers/net/team/team_nl.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/team.yaml */ +/* YNL-GEN kernel source */ + +#include <net/netlink.h> +#include <net/genetlink.h> + +#include "team_nl.h" + +#include <uapi/linux/if_team.h> + +/* Common nested types */ +const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1] = { + [TEAM_ATTR_OPTION_NAME] = { .type = NLA_STRING, .len = TEAM_STRING_MAX_LEN, }, + [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG, }, + [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8, }, + [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY, }, + [TEAM_ATTR_OPTION_REMOVED] = { .type = NLA_FLAG, }, + [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32, }, + [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32, }, +}; + +const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1] = { + [TEAM_ATTR_ITEM_OPTION] = NLA_POLICY_NESTED(team_attr_option_nl_policy), +}; + +/* Global operation policy for team */ +const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1] = { + [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32, }, + [TEAM_ATTR_LIST_OPTION] = NLA_POLICY_NESTED(team_item_option_nl_policy), +}; + +/* Ops table for team */ +const struct genl_small_ops team_nl_ops[4] = { + { + .cmd = TEAM_CMD_NOOP, + .validate = GENL_DONT_VALIDATE_STRICT, + .doit = team_nl_noop_doit, + }, + { + .cmd = TEAM_CMD_OPTIONS_SET, + .validate = GENL_DONT_VALIDATE_STRICT, + .doit = team_nl_options_set_doit, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = TEAM_CMD_OPTIONS_GET, + .validate = GENL_DONT_VALIDATE_STRICT, + .doit = team_nl_options_get_doit, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = TEAM_CMD_PORT_LIST_GET, + .validate = GENL_DONT_VALIDATE_STRICT, + .doit = team_nl_port_list_get_doit, + .flags = GENL_ADMIN_PERM, + }, +}; diff --git a/drivers/net/team/team_nl.h b/drivers/net/team/team_nl.h new file mode 100644 index 000000000000..c9ec1b22ac4d --- /dev/null +++ b/drivers/net/team/team_nl.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/team.yaml */ +/* YNL-GEN kernel header */ + +#ifndef _LINUX_TEAM_GEN_H +#define _LINUX_TEAM_GEN_H + +#include <net/netlink.h> +#include <net/genetlink.h> + +#include <uapi/linux/if_team.h> + +/* Common nested types */ +extern const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1]; +extern const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1]; + +/* Global operation policy for team */ +extern const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1]; + +/* Ops table for team */ +extern const struct genl_small_ops team_nl_ops[4]; + +int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info); +int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info); +int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info); +int team_nl_port_list_get_doit(struct sk_buff *skb, struct genl_info *info); + +#endif /* _LINUX_TEAM_GEN_H */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 92da8c03d960..9254bca2813d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; - uarg->callback(NULL, uarg, false); + uarg->ops->complete(NULL, uarg, false); } skb_reset_network_header(skb); diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 7b8afa589a53..ff5be2cbf17b 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -424,7 +424,7 @@ static int aqc111_change_mtu(struct net_device *net, int new_mtu) u16 reg16 = 0; u8 buf[5]; - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, @@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) continue; } - /* Clone SKB */ - new_skb = skb_clone(skb, GFP_ATOMIC); + new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len); if (!new_skb) goto err; - new_skb->len = pkt_len; + skb_put(new_skb, pkt_len); + memcpy(new_skb->data, skb->data, pkt_len); skb_pull(new_skb, AQ_RX_HW_PAD); - skb_set_tail_pointer(new_skb, new_skb->len); - new_skb->truesize = SKB_TRUESIZE(new_skb->len); if (aqc111_data->rx_checksum) aqc111_rx_checksum(new_skb, pkt_desc); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index f7cff58fe044..57d6e5abc30e 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -1230,7 +1230,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu) if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; ax88178_set_mfb(dev); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index df9d767cb524..51c295e1e823 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -174,6 +174,7 @@ struct ax88179_data { u32 wol_supported; u32 wolopts; u8 disconnecting; + u8 initialized; }; struct ax88179_int_data { @@ -943,7 +944,7 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu) struct usbnet *dev = netdev_priv(net); u16 tmp16; - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; if (net->mtu > 1500) { @@ -1277,7 +1278,6 @@ static void ax88179_get_mac_addr(struct usbnet *dev) dev->net->addr_assign_type = NET_ADDR_PERM; } else { netdev_info(dev->net, "invalid MAC address, using random\n"); - eth_hw_addr_random(dev->net); } ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN, @@ -1287,8 +1287,11 @@ static void ax88179_get_mac_addr(struct usbnet *dev) static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) { struct ax88179_data *ax179_data; + int ret; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret < 0) + return ret; ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL); if (!ax179_data) @@ -1673,6 +1676,18 @@ static int ax88179_reset(struct usbnet *dev) return 0; } +static int ax88179_net_reset(struct usbnet *dev) +{ + struct ax88179_data *ax179_data = dev->driver_priv; + + if (ax179_data->initialized) + ax88179_reset(dev); + else + ax179_data->initialized = 1; + + return 0; +} + static int ax88179_stop(struct usbnet *dev) { u16 tmp16; @@ -1692,6 +1707,7 @@ static const struct driver_info ax88179_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1704,6 +1720,7 @@ static const struct driver_info ax88178a_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1716,7 +1733,7 @@ static const struct driver_info cypress_GX3_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1729,7 +1746,7 @@ static const struct driver_info dlink_dub1312_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1742,7 +1759,7 @@ static const struct driver_info sitecom_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1755,7 +1772,7 @@ static const struct driver_info samsung_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1768,7 +1785,7 @@ static const struct driver_info lenovo_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1781,7 +1798,7 @@ static const struct driver_info belkin_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1794,7 +1811,7 @@ static const struct driver_info toshiba_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1807,7 +1824,7 @@ static const struct driver_info mct_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1820,7 +1837,7 @@ static const struct driver_info at_umc2000_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1833,7 +1850,7 @@ static const struct driver_info at_umc200_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1846,7 +1863,7 @@ static const struct driver_info at_umc2000sp_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index db05622f1f70..bf76ecccc2e6 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -798,7 +798,7 @@ int cdc_ncm_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev)); return 0; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 80ee4fcdfb36..5a2c38b63012 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1692,15 +1692,10 @@ static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata) ret = lan78xx_read_reg(dev, MAC_CR, &buf); if (buf & MAC_CR_EEE_EN_) { - edata->eee_enabled = true; - edata->tx_lpi_enabled = true; /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); edata->tx_lpi_timer = buf; } else { - edata->eee_enabled = false; - edata->eee_active = false; - edata->tx_lpi_enabled = false; edata->tx_lpi_timer = 0; } @@ -1721,24 +1716,16 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata) if (ret < 0) return ret; - if (edata->eee_enabled) { - ret = lan78xx_read_reg(dev, MAC_CR, &buf); - buf |= MAC_CR_EEE_EN_; - ret = lan78xx_write_reg(dev, MAC_CR, buf); - - phy_ethtool_set_eee(net->phydev, edata); - - buf = (u32)edata->tx_lpi_timer; - ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf); - } else { - ret = lan78xx_read_reg(dev, MAC_CR, &buf); - buf &= ~MAC_CR_EEE_EN_; - ret = lan78xx_write_reg(dev, MAC_CR, buf); - } + ret = phy_ethtool_set_eee(net->phydev, edata); + if (ret < 0) + goto out; + buf = (u32)edata->tx_lpi_timer; + ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf); +out: usb_autopm_put_interface(dev->intf); - return 0; + return ret; } static u32 lan78xx_get_link(struct net_device *net) @@ -2114,7 +2101,20 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) static void lan78xx_link_status_change(struct net_device *net) { + struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; + u32 data; + int ret; + + ret = lan78xx_read_reg(dev, MAC_CR, &data); + if (ret < 0) + return; + + if (phydev->enable_tx_lpi) + data |= MAC_CR_EEE_EN_; + else + data &= ~MAC_CR_EEE_EN_; + lan78xx_write_reg(dev, MAC_CR, data); phy_print_status(phydev); } @@ -2408,6 +2408,8 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) mii_adv_to_linkmode_adv_t(fc, mii_adv); linkmode_or(phydev->advertising, fc, phydev->advertising); + phy_support_eee(phydev); + if (phydev->mdio.dev.of_node) { u32 reg; int len; @@ -2526,7 +2528,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len); if (!ret) - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); usb_autopm_put_interface(dev->intf); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index a5469cf5cf67..663e46348ce3 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -121,7 +121,6 @@ static const struct net_device_ops qmimux_netdev_ops = { .ndo_open = qmimux_open, .ndo_stop = qmimux_stop, .ndo_start_xmit = qmimux_start_xmit, - .ndo_get_stats64 = dev_get_tstats64, }; static void qmimux_setup(struct net_device *dev) @@ -133,6 +132,7 @@ static void qmimux_setup(struct net_device *dev) dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; dev->netdev_ops = &qmimux_netdev_ops; dev->mtu = 1500; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->needs_free_netdev = true; } @@ -257,12 +257,6 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) priv->mux_id = mux_id; priv->real_dev = real_dev; - new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!new_dev->tstats) { - err = -ENOBUFS; - goto out_free_newdev; - } - new_dev->sysfs_groups[0] = &qmi_wwan_sysfs_qmimux_attr_group; err = register_netdevice(new_dev); @@ -295,7 +289,6 @@ static void qmimux_unregister_device(struct net_device *dev, struct qmimux_priv *priv = netdev_priv(dev); struct net_device *real_dev = priv->real_dev; - free_percpu(dev->tstats); netdev_upper_dev_unlink(real_dev, dev); unregister_netdevice_queue(dev, head); @@ -644,7 +637,6 @@ static const struct net_device_ops qmi_wwan_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = qmi_wwan_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1383,6 +1375,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ + {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b05, 4)}, /* Longsung U8300 */ + {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b3c, 4)}, /* Longsung U9300 */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 5d6aeb086fc7..19df1cd9f072 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5274,7 +5274,7 @@ post_fw: rtl_reset_ocp_base(tp); strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE); - dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); + dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version); } static void rtl8152_release_firmware(struct r8152 *tp) @@ -9365,7 +9365,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) case RTL_VER_01: case RTL_VER_02: case RTL_VER_07: - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; default: break; @@ -9377,7 +9377,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) mutex_lock(&tp->control); - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); if (netif_running(dev)) { if (tp->rtl_ops.change_mtu) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 78ad2da3ee29..0726e18bee6f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2234,27 +2234,23 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ - skb->truesize = size + sizeof(struct sk_buff); return 1; } - ax_skb = skb_clone(skb, GFP_ATOMIC); + /* Use "size - 4" to remove fcs */ + ax_skb = netdev_alloc_skb_ip_align(dev->net, size - 4); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } - ax_skb->len = size; - ax_skb->data = packet; - skb_set_tail_pointer(ax_skb, size); + skb_put(ax_skb, size - 4); + memcpy(ax_skb->data, packet, size - 4); smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a, rx_cmd_b); - skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ - ax_skb->truesize = size + sizeof(struct sk_buff); - usbnet_skb_return(dev, ax_skb); } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 2fa46baa589e..cbea24666479 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1810,9 +1810,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf) static void smsc95xx_rx_csum_offload(struct sk_buff *skb) { - skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); + u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2); + + skb->csum = (__force __wsum)get_unaligned(csum_ptr); skb->ip_summed = CHECKSUM_COMPLETE; - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - 2); /* remove csum */ } static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) @@ -1870,25 +1872,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (dev->net->features & NETIF_F_RXCSUM) smsc95xx_rx_csum_offload(skb); skb_trim(skb, skb->len - 4); /* remove fcs */ - skb->truesize = size + sizeof(struct sk_buff); return 1; } - ax_skb = skb_clone(skb, GFP_ATOMIC); + ax_skb = netdev_alloc_skb_ip_align(dev->net, size); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } - ax_skb->len = size; - ax_skb->data = packet; - skb_set_tail_pointer(ax_skb, size); + skb_put(ax_skb, size); + memcpy(ax_skb->data, packet, size); if (dev->net->features & NETIF_F_RXCSUM) smsc95xx_rx_csum_offload(ax_skb); skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ - ax_skb->truesize = size + sizeof(struct sk_buff); usbnet_skb_return(dev, ax_skb); } diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 3164451e1010..0a662e42ed96 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, 3); skb->len = len; skb_set_tail_pointer(skb, len); - skb->truesize = len + sizeof(struct sk_buff); return 2; } - /* skb_clone is used for address align */ - sr_skb = skb_clone(skb, GFP_ATOMIC); + sr_skb = netdev_alloc_skb_ip_align(dev->net, len); if (!sr_skb) return 0; - sr_skb->len = len; - sr_skb->data = skb->data + 3; - skb_set_tail_pointer(sr_skb, len); - sr_skb->truesize = len + sizeof(struct sk_buff); + skb_put(sr_skb, len); + memcpy(sr_skb->data, skb->data + 3, len); usbnet_skb_return(dev, sr_skb); skb_pull(skb, len + SR_RX_OVERHEAD); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e84efa661589..9fd516e8bb10 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -398,7 +398,7 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu) // no second zero-length packet read wanted after mtu-sized packets if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - net->mtu = new_mtu; + WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; if (dev->rx_urb_size == old_hard_mtu) { @@ -1733,6 +1733,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->hard_mtu = net->mtu + net->hard_header_len; net->min_mtu = 0; net->max_mtu = ETH_MAX_MTU; + net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; net->netdev_ops = &usbnet_netdev_ops; net->watchdog_timeo = TX_TIMEOUT_JIFFIES; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index bcdfbf61eb66..426e68a95067 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -26,6 +26,7 @@ #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> +#include <linux/skbuff_ref.h> #include <net/page_pool/helpers.h> #define DRV_NAME "veth" diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 115c3c5414f2..19a9b50646c7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -24,6 +24,7 @@ #include <net/xdp.h> #include <net/net_failover.h> #include <net/netdev_rx_queue.h> +#include <net/netdev_queues.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -78,6 +79,7 @@ static const unsigned long guest_offloads[] = { struct virtnet_stat_desc { char desc[ETH_GSTRING_LEN]; size_t offset; + size_t qstat_offset; }; struct virtnet_sq_free_stats { @@ -93,6 +95,8 @@ struct virtnet_sq_stats { u64_stats_t xdp_tx_drops; u64_stats_t kicks; u64_stats_t tx_timeouts; + u64_stats_t stop; + u64_stats_t wake; }; struct virtnet_rq_stats { @@ -107,31 +111,159 @@ struct virtnet_rq_stats { u64_stats_t kicks; }; -#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) -#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) +#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1} +#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1} + +#define VIRTNET_SQ_STAT_QSTAT(name, m) \ + { \ + name, \ + offsetof(struct virtnet_sq_stats, m), \ + offsetof(struct netdev_queue_stats_tx, m), \ + } + +#define VIRTNET_RQ_STAT_QSTAT(name, m) \ + { \ + name, \ + offsetof(struct virtnet_rq_stats, m), \ + offsetof(struct netdev_queue_stats_rx, m), \ + } static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { - { "packets", VIRTNET_SQ_STAT(packets) }, - { "bytes", VIRTNET_SQ_STAT(bytes) }, - { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, - { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, - { "kicks", VIRTNET_SQ_STAT(kicks) }, - { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, + VIRTNET_SQ_STAT("xdp_tx", xdp_tx), + VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops), + VIRTNET_SQ_STAT("kicks", kicks), + VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts), }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { - { "packets", VIRTNET_RQ_STAT(packets) }, - { "bytes", VIRTNET_RQ_STAT(bytes) }, - { "drops", VIRTNET_RQ_STAT(drops) }, - { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, - { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, - { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, - { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, - { "kicks", VIRTNET_RQ_STAT(kicks) }, + VIRTNET_RQ_STAT("drops", drops), + VIRTNET_RQ_STAT("xdp_packets", xdp_packets), + VIRTNET_RQ_STAT("xdp_tx", xdp_tx), + VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects), + VIRTNET_RQ_STAT("xdp_drops", xdp_drops), + VIRTNET_RQ_STAT("kicks", kicks), +}; + +static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = { + VIRTNET_SQ_STAT_QSTAT("packets", packets), + VIRTNET_SQ_STAT_QSTAT("bytes", bytes), + VIRTNET_SQ_STAT_QSTAT("stop", stop), + VIRTNET_SQ_STAT_QSTAT("wake", wake), +}; + +static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = { + VIRTNET_RQ_STAT_QSTAT("packets", packets), + VIRTNET_RQ_STAT_QSTAT("bytes", bytes), +}; + +#define VIRTNET_STATS_DESC_CQ(name) \ + {#name, offsetof(struct virtio_net_stats_cvq, name), -1} + +#define VIRTNET_STATS_DESC_RX(class, name) \ + {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1} + +#define VIRTNET_STATS_DESC_TX(class, name) \ + {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1} + + +static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = { + VIRTNET_STATS_DESC_CQ(command_num), + VIRTNET_STATS_DESC_CQ(ok_num), +}; + +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = { + VIRTNET_STATS_DESC_RX(basic, packets), + VIRTNET_STATS_DESC_RX(basic, bytes), + + VIRTNET_STATS_DESC_RX(basic, notifications), + VIRTNET_STATS_DESC_RX(basic, interrupts), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = { + VIRTNET_STATS_DESC_TX(basic, packets), + VIRTNET_STATS_DESC_TX(basic, bytes), + + VIRTNET_STATS_DESC_TX(basic, notifications), + VIRTNET_STATS_DESC_TX(basic, interrupts), +}; + +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = { + VIRTNET_STATS_DESC_RX(csum, needs_csum), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = { + VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg), + VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg), +}; + +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = { + VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = { + VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes), +}; + +#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \ + { \ + #name, \ + offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \ + offsetof(struct netdev_queue_stats_rx, qstat_field), \ + } + +#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \ + { \ + #name, \ + offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \ + offsetof(struct netdev_queue_stats_tx, qstat_field), \ + } + +static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = { + VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops), + VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = { + VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops), + VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors), }; -#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) -#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc) +static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = { + VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary), + VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none), + VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = { + VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none), + VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum), +}; + +static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = { + VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets), + VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes), + VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets), + VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = { + VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets), + VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes), + VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets), + VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes), +}; + +static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = { + VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits), +}; + +static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = { + VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits), +}; + +#define VIRTNET_Q_TYPE_RX 0 +#define VIRTNET_Q_TYPE_TX 1 +#define VIRTNET_Q_TYPE_CQ 2 struct virtnet_interrupt_coalesce { u32 max_packets; @@ -184,6 +316,9 @@ struct receive_queue { /* Is dynamic interrupt moderation enabled? */ bool dim_enabled; + /* Used to protect dim_enabled and inter_coal */ + struct mutex dim_lock; + /* Dynamic Interrupt Moderation */ struct dim dim; @@ -213,9 +348,6 @@ struct receive_queue { /* Record the last dma info to free after new pages is allocated. */ struct virtnet_rq_dma *last_dma; - - /* Do dma by self */ - bool do_dma; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -240,15 +372,6 @@ struct virtio_net_ctrl_rss { struct control_buf { struct virtio_net_ctrl_hdr hdr; virtio_net_ctrl_ack status; - struct virtio_net_ctrl_mq mq; - u8 promisc; - u8 allmulti; - __virtio16 vid; - __virtio64 offloads; - struct virtio_net_ctrl_rss rss; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - struct virtio_net_ctrl_coal_vq coal_vq; }; struct virtnet_info { @@ -287,10 +410,14 @@ struct virtnet_info { u16 rss_indir_table_size; u32 rss_hash_types_supported; u32 rss_hash_types_saved; + struct virtio_net_ctrl_rss rss; /* Has control virtqueue */ bool has_cvq; + /* Lock to protect the control VQ */ + struct mutex cvq_lock; + /* Host can handle any s/g split between our header and packet data */ bool any_header_sg; @@ -340,6 +467,8 @@ struct virtnet_info { /* failover when STANDBY feature enabled */ struct failover *failover; + + u64 device_stats_cap; }; struct padded_vnet_hdr { @@ -425,6 +554,17 @@ static int rxq2vq(int rxq) return rxq * 2; } +static int vq_type(struct virtnet_info *vi, int qid) +{ + if (qid == vi->max_queue_pairs * 2) + return VIRTNET_Q_TYPE_CQ; + + if (qid % 2) + return VIRTNET_Q_TYPE_TX; + + return VIRTNET_Q_TYPE_RX; +} + static inline struct virtio_net_common_hdr * skb_vnet_common_hdr(struct sk_buff *skb) { @@ -603,7 +743,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - /* copy small packet so we can reuse these pages */ if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) { skb = virtnet_build_skb(buf, truesize, p - buf, len); if (unlikely(!skb)) @@ -707,7 +846,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx) void *buf; buf = virtqueue_get_buf_ctx(rq->vq, len, ctx); - if (buf && rq->do_dma) + if (buf) virtnet_rq_unmap(rq, buf, *len); return buf; @@ -720,11 +859,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len) u32 offset; void *head; - if (!rq->do_dma) { - sg_init_one(rq->sg, buf, len); - return; - } - head = page_address(rq->alloc_frag.page); offset = buf - head; @@ -750,44 +884,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp) head = page_address(alloc_frag->page); - if (rq->do_dma) { - dma = head; - - /* new pages */ - if (!alloc_frag->offset) { - if (rq->last_dma) { - /* Now, the new page is allocated, the last dma - * will not be used. So the dma can be unmapped - * if the ref is 0. - */ - virtnet_rq_unmap(rq, rq->last_dma, 0); - rq->last_dma = NULL; - } + dma = head; - dma->len = alloc_frag->size - sizeof(*dma); + /* new pages */ + if (!alloc_frag->offset) { + if (rq->last_dma) { + /* Now, the new page is allocated, the last dma + * will not be used. So the dma can be unmapped + * if the ref is 0. + */ + virtnet_rq_unmap(rq, rq->last_dma, 0); + rq->last_dma = NULL; + } - addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, - dma->len, DMA_FROM_DEVICE, 0); - if (virtqueue_dma_mapping_error(rq->vq, addr)) - return NULL; + dma->len = alloc_frag->size - sizeof(*dma); - dma->addr = addr; - dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); + addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1, + dma->len, DMA_FROM_DEVICE, 0); + if (virtqueue_dma_mapping_error(rq->vq, addr)) + return NULL; - /* Add a reference to dma to prevent the entire dma from - * being released during error handling. This reference - * will be freed after the pages are no longer used. - */ - get_page(alloc_frag->page); - dma->ref = 1; - alloc_frag->offset = sizeof(*dma); + dma->addr = addr; + dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr); - rq->last_dma = dma; - } + /* Add a reference to dma to prevent the entire dma from + * being released during error handling. This reference + * will be freed after the pages are no longer used. + */ + get_page(alloc_frag->page); + dma->ref = 1; + alloc_frag->offset = sizeof(*dma); - ++dma->ref; + rq->last_dma = dma; } + ++dma->ref; + buf = head + alloc_frag->offset; get_page(alloc_frag->page); @@ -804,12 +936,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi) if (!vi->mergeable_rx_bufs && vi->big_packets) return; - for (i = 0; i < vi->max_queue_pairs; i++) { - if (virtqueue_set_dma_premapped(vi->rq[i].vq)) - continue; - - vi->rq[i].do_dma = true; - } + for (i = 0; i < vi->max_queue_pairs; i++) + /* error should never happen */ + BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq)); } static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) @@ -820,7 +949,7 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) rq = &vi->rq[i]; - if (rq->do_dma) + if (!vi->big_packets || vi->mergeable_rx_bufs) virtnet_rq_unmap(rq, buf, 0); virtnet_rq_free_buf(vi, rq, buf); @@ -875,6 +1004,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { netif_stop_subqueue(dev, qnum); + u64_stats_update_begin(&sq->stats.syncp); + u64_stats_inc(&sq->stats.stop); + u64_stats_update_end(&sq->stats.syncp); if (use_napi) { if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) virtqueue_napi_schedule(&sq->napi, sq->vq); @@ -883,6 +1015,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, free_old_xmit(sq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); + u64_stats_update_begin(&sq->stats.syncp); + u64_stats_inc(&sq->stats.wake); + u64_stats_update_end(&sq->stats.syncp); virtqueue_disable_cb(sq->vq); } } @@ -1881,8 +2016,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { - if (rq->do_dma) - virtnet_rq_unmap(rq, buf, 0); + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } @@ -1996,8 +2130,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, ctx = mergeable_len_to_ctx(len + room, headroom); err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) { - if (rq->do_dma) - virtnet_rq_unmap(rq, buf, 0); + virtnet_rq_unmap(rq, buf, 0); put_page(virt_to_head_page(buf)); } @@ -2128,7 +2261,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } else { while (packets < budget && - (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) { + (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); packets++; } @@ -2145,7 +2278,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, u64_stats_set(&stats.packets, packets); u64_stats_update_begin(&rq->stats.syncp); - for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) { size_t offset = virtnet_rq_stats_desc[i].offset; u64_stats_t *item, *src; @@ -2153,6 +2286,10 @@ static int virtnet_receive(struct receive_queue *rq, int budget, src = (u64_stats_t *)((u8 *)&stats + offset); u64_stats_add(item, u64_stats_read(src)); } + + u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets)); + u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes)); + u64_stats_update_end(&rq->stats.syncp); return packets; @@ -2179,8 +2316,14 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) free_old_xmit(sq, true); } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); - if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { + if (netif_tx_queue_stopped(txq)) { + u64_stats_update_begin(&sq->stats.syncp); + u64_stats_inc(&sq->stats.wake); + u64_stats_update_end(&sq->stats.syncp); + } netif_tx_wake_queue(txq); + } __netif_tx_unlock(txq); } @@ -2225,6 +2368,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget) /* Out of packets? */ if (received < budget) { napi_complete = virtqueue_napi_complete(napi, rq->vq, received); + /* Intentionally not taking dim_lock here. This may result in a + * spurious net_dim call. But if that happens virtnet_rx_dim_work + * will not act on the scheduled work. + */ if (napi_complete && rq->dim_enabled) virtnet_rx_dim_update(vi, rq); } @@ -2326,8 +2473,14 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) virtqueue_disable_cb(sq->vq); free_old_xmit(sq, true); - if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) + if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { + if (netif_tx_queue_stopped(txq)) { + u64_stats_update_begin(&sq->stats.syncp); + u64_stats_inc(&sq->stats.wake); + u64_stats_update_end(&sq->stats.syncp); + } netif_tx_wake_queue(txq); + } opaque = virtqueue_enable_cb_prepare(sq->vq); @@ -2527,16 +2680,18 @@ static int virtnet_tx_resize(struct virtnet_info *vi, * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formatted. */ -static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, - struct scatterlist *out) +static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd, + struct scatterlist *out, + struct scatterlist *in) { - struct scatterlist *sgs[4], hdr, stat; - unsigned out_num = 0, tmp; + struct scatterlist *sgs[5], hdr, stat; + u32 out_num = 0, tmp, in_num = 0; int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); + mutex_lock(&vi->cvq_lock); vi->ctrl->status = ~0; vi->ctrl->hdr.class = class; vi->ctrl->hdr.cmd = cmd; @@ -2549,18 +2704,22 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, /* Add return status. */ sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); - sgs[out_num] = &stat; + sgs[out_num + in_num++] = &stat; + + if (in) + sgs[out_num + in_num++] = in; - BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); - ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); + ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); if (ret < 0) { dev_warn(&vi->vdev->dev, "Failed to add sgs for command vq: %d\n.", ret); + mutex_unlock(&vi->cvq_lock); return false; } if (unlikely(!virtqueue_kick(vi->cvq))) - return vi->ctrl->status == VIRTIO_NET_OK; + goto unlock; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -2571,9 +2730,17 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, cpu_relax(); } +unlock: + mutex_unlock(&vi->cvq_lock); return vi->ctrl->status == VIRTIO_NET_OK; } +static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, + struct scatterlist *out) +{ + return virtnet_send_command_reply(vi, class, cmd, out, NULL); +} + static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); @@ -2663,23 +2830,26 @@ static void virtnet_stats(struct net_device *dev, static void virtnet_ack_link_announce(struct virtnet_info *vi) { - rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); - rtnl_unlock(); } -static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { + struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; struct scatterlist sg; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); + mq = kzalloc(sizeof(*mq), GFP_KERNEL); + if (!mq) + return -ENOMEM; + + mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, mq, sizeof(*mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -2696,16 +2866,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) return 0; } -static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) -{ - int err; - - rtnl_lock(); - err = _virtnet_set_queues(vi, queue_pairs); - rtnl_unlock(); - return err; -} - static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2728,6 +2888,7 @@ static void virtnet_rx_mode_work(struct work_struct *work) { struct virtnet_info *vi = container_of(work, struct virtnet_info, rx_mode_work); + u8 *promisc_allmulti __free(kfree) = NULL; struct net_device *dev = vi->dev; struct scatterlist sg[2]; struct virtio_net_ctrl_mac *mac_data; @@ -2743,22 +2904,27 @@ static void virtnet_rx_mode_work(struct work_struct *work) rtnl_lock(); - vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); - vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC); + if (!promisc_allmulti) { + dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n"); + return; + } - sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); + *promisc_allmulti = !!(dev->flags & IFF_PROMISC); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - vi->ctrl->promisc ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); - sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); + *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI); + sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - vi->ctrl->allmulti ? "en" : "dis"); + *promisc_allmulti ? "en" : "dis"); netif_addr_lock_bh(dev); @@ -2819,10 +2985,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -2834,10 +3005,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct virtnet_info *vi = netdev_priv(dev); + __virtio16 *_vid __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); - sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); + _vid = kzalloc(sizeof(*_vid), GFP_KERNEL); + if (!_vid) + return -ENOMEM; + + *_vid = cpu_to_virtio16(vi->vdev, vid); + sg_init_one(&sg, _vid, sizeof(*_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -2950,12 +3126,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi) static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, u16 vqn, u32 max_usecs, u32 max_packets) { + struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL; struct scatterlist sgs; - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); + coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL); + if (!coal_vq) + return -ENOMEM; + + coal_vq->vqn = cpu_to_le16(vqn); + coal_vq->coal.max_usecs = cpu_to_le32(max_usecs); + coal_vq->coal.max_packets = cpu_to_le32(max_packets); + sg_init_one(&sgs, coal_vq, sizeof(*coal_vq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, @@ -3066,9 +3247,11 @@ static int virtnet_set_ringparam(struct net_device *dev, return err; /* The reason is same as the transmit virtqueue reset */ + mutex_lock(&vi->rq[i].dim_lock); err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, vi->intr_coal_rx.max_usecs, vi->intr_coal_rx.max_packets); + mutex_unlock(&vi->rq[i].dim_lock); if (err) return err; } @@ -3087,25 +3270,29 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) sg_init_table(sgs, 4); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table); - sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size); + sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); - sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1); - sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size); + sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1); + sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); - sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size); + sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); sg_buf_size = vi->rss_key_size; - sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size); + sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG - : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) { - dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); - return false; - } + : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) + goto err; + return true; + +err: + dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n"); + return false; + } static void virtnet_init_default_rss(struct virtnet_info *vi) @@ -3113,21 +3300,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi) u32 indir_val = 0; int i = 0; - vi->ctrl->rss.hash_types = vi->rss_hash_types_supported; + vi->rss.hash_types = vi->rss_hash_types_supported; vi->rss_hash_types_saved = vi->rss_hash_types_supported; - vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size + vi->rss.indirection_table_mask = vi->rss_indir_table_size ? vi->rss_indir_table_size - 1 : 0; - vi->ctrl->rss.unclassified_queue = 0; + vi->rss.unclassified_queue = 0; for (; i < vi->rss_indir_table_size; ++i) { indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs); - vi->ctrl->rss.indirection_table[i] = indir_val; + vi->rss.indirection_table[i] = indir_val; } - vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; - vi->ctrl->rss.hash_key_length = vi->rss_key_size; + vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0; + vi->rss.hash_key_length = vi->rss_key_size; - netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size); + netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); } static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) @@ -3238,7 +3425,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc * if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; - vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = vi->rss_hash_types_saved; if (vi->dev->features & NETIF_F_RXHASH) return virtnet_commit_rss_command(vi); } @@ -3283,7 +3470,7 @@ static int virtnet_set_channels(struct net_device *dev, return -EINVAL; cpus_read_lock(); - err = _virtnet_set_queues(vi, queue_pairs); + err = virtnet_set_queues(vi, queue_pairs); if (err) { cpus_read_unlock(); goto err; @@ -3297,25 +3484,654 @@ static int virtnet_set_channels(struct net_device *dev, return err; } +static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt, + int num, int qid, const struct virtnet_stat_desc *desc) +{ + int i; + + if (qid < 0) { + for (i = 0; i < num; ++i) + ethtool_sprintf(p, noq_fmt, desc[i].desc); + } else { + for (i = 0; i < num; ++i) + ethtool_sprintf(p, fmt, qid, desc[i].desc); + } +} + +/* qid == -1: for rx/tx queue total field */ +static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data) +{ + const struct virtnet_stat_desc *desc; + const char *fmt, *noq_fmt; + u8 *p = *data; + u32 num; + + if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) { + noq_fmt = "cq_hw_%s"; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { + desc = &virtnet_stats_cvq_desc[0]; + num = ARRAY_SIZE(virtnet_stats_cvq_desc); + + virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc); + } + } + + if (type == VIRTNET_Q_TYPE_RX) { + fmt = "rx%u_%s"; + noq_fmt = "rx_%s"; + + desc = &virtnet_rq_stats_desc[0]; + num = ARRAY_SIZE(virtnet_rq_stats_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + + fmt = "rx%u_hw_%s"; + noq_fmt = "rx_hw_%s"; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + desc = &virtnet_stats_rx_basic_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + desc = &virtnet_stats_rx_csum_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { + desc = &virtnet_stats_rx_speed_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + } + + if (type == VIRTNET_Q_TYPE_TX) { + fmt = "tx%u_%s"; + noq_fmt = "tx_%s"; + + desc = &virtnet_sq_stats_desc[0]; + num = ARRAY_SIZE(virtnet_sq_stats_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + + fmt = "tx%u_hw_%s"; + noq_fmt = "tx_hw_%s"; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + desc = &virtnet_stats_tx_basic_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + desc = &virtnet_stats_tx_gso_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { + desc = &virtnet_stats_tx_speed_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); + + virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc); + } + } + + *data = p; +} + +struct virtnet_stats_ctx { + /* The stats are write to qstats or ethtool -S */ + bool to_qstat; + + /* Used to calculate the offset inside the output buffer. */ + u32 desc_num[3]; + + /* The actual supported stat types. */ + u32 bitmap[3]; + + /* Used to calculate the reply buffer size. */ + u32 size[3]; + + /* Record the output buffer. */ + u64 *data; +}; + +static void virtnet_stats_ctx_init(struct virtnet_info *vi, + struct virtnet_stats_ctx *ctx, + u64 *data, bool to_qstat) +{ + u32 queue_type; + + ctx->data = data; + ctx->to_qstat = to_qstat; + + if (to_qstat) { + ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); + ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); + + queue_type = VIRTNET_Q_TYPE_RX; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); + } + + queue_type = VIRTNET_Q_TYPE_TX; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); + } + + return; + } + + ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc); + ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc); + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) { + queue_type = VIRTNET_Q_TYPE_CQ; + + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq); + } + + queue_type = VIRTNET_Q_TYPE_RX; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed); + } + + queue_type = VIRTNET_Q_TYPE_TX; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso); + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { + ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED; + ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc); + ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed); + } +} + +/* stats_sum_queue - Calculate the sum of the same fields in sq or rq. + * @sum: the position to store the sum values + * @num: field num + * @q_value: the first queue fields + * @q_num: number of the queues + */ +static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num) +{ + u32 step = num; + int i, j; + u64 *p; + + for (i = 0; i < num; ++i) { + p = sum + i; + *p = 0; + + for (j = 0; j < q_num; ++j) + *p += *(q_value + i + j * step); + } +} + +static void virtnet_fill_total_fields(struct virtnet_info *vi, + struct virtnet_stats_ctx *ctx) +{ + u64 *data, *first_rx_q, *first_tx_q; + u32 num_cq, num_rx, num_tx; + + num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; + num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; + num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; + + first_rx_q = ctx->data + num_rx + num_tx + num_cq; + first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx; + + data = ctx->data; + + stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs); + + data = ctx->data + num_rx; + + stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs); +} + +static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid, + struct virtnet_stats_ctx *ctx, + const u8 *base, bool drv_stats, u8 reply_type) +{ + const struct virtnet_stat_desc *desc; + const u64_stats_t *v_stat; + u64 offset, bitmap; + const __le64 *v; + u32 queue_type; + int i, num; + + queue_type = vq_type(vi, qid); + bitmap = ctx->bitmap[queue_type]; + + if (drv_stats) { + if (queue_type == VIRTNET_Q_TYPE_RX) { + desc = &virtnet_rq_stats_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat); + } else { + desc = &virtnet_sq_stats_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat); + } + + for (i = 0; i < num; ++i) { + offset = desc[i].qstat_offset / sizeof(*ctx->data); + v_stat = (const u64_stats_t *)(base + desc[i].offset); + ctx->data[offset] = u64_stats_read(v_stat); + } + return; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + desc = &virtnet_stats_rx_basic_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + desc = &virtnet_stats_rx_csum_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) { + desc = &virtnet_stats_rx_gso_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { + desc = &virtnet_stats_rx_speed_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + desc = &virtnet_stats_tx_basic_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { + desc = &virtnet_stats_tx_csum_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + desc = &virtnet_stats_tx_gso_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO) + goto found; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { + desc = &virtnet_stats_tx_speed_desc_qstat[0]; + num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED) + goto found; + } + + return; + +found: + for (i = 0; i < num; ++i) { + offset = desc[i].qstat_offset / sizeof(*ctx->data); + v = (const __le64 *)(base + desc[i].offset); + ctx->data[offset] = le64_to_cpu(*v); + } +} + +/* virtnet_fill_stats - copy the stats to qstats or ethtool -S + * The stats source is the device or the driver. + * + * @vi: virtio net info + * @qid: the vq id + * @ctx: stats ctx (initiated by virtnet_stats_ctx_init()) + * @base: pointer to the device reply or the driver stats structure. + * @drv_stats: designate the base type (device reply, driver stats) + * @type: the type of the device reply (if drv_stats is true, this must be zero) + */ +static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid, + struct virtnet_stats_ctx *ctx, + const u8 *base, bool drv_stats, u8 reply_type) +{ + u32 queue_type, num_rx, num_tx, num_cq; + const struct virtnet_stat_desc *desc; + const u64_stats_t *v_stat; + u64 offset, bitmap; + const __le64 *v; + int i, num; + + if (ctx->to_qstat) + return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type); + + num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ]; + num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX]; + num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX]; + + queue_type = vq_type(vi, qid); + bitmap = ctx->bitmap[queue_type]; + + /* skip the total fields of pairs */ + offset = num_rx + num_tx; + + if (queue_type == VIRTNET_Q_TYPE_TX) { + offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2); + + num = ARRAY_SIZE(virtnet_sq_stats_desc); + if (drv_stats) { + desc = &virtnet_sq_stats_desc[0]; + goto drv_stats; + } + + offset += num; + + } else if (queue_type == VIRTNET_Q_TYPE_RX) { + offset += num_cq + num_rx * (qid / 2); + + num = ARRAY_SIZE(virtnet_rq_stats_desc); + if (drv_stats) { + desc = &virtnet_rq_stats_desc[0]; + goto drv_stats; + } + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) { + desc = &virtnet_stats_cvq_desc[0]; + num = ARRAY_SIZE(virtnet_stats_cvq_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + desc = &virtnet_stats_rx_basic_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_basic_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + desc = &virtnet_stats_rx_csum_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_csum_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) { + desc = &virtnet_stats_rx_speed_desc[0]; + num = ARRAY_SIZE(virtnet_stats_rx_speed_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + desc = &virtnet_stats_tx_basic_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_basic_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + desc = &virtnet_stats_tx_gso_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_gso_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO) + goto found; + + offset += num; + } + + if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) { + desc = &virtnet_stats_tx_speed_desc[0]; + num = ARRAY_SIZE(virtnet_stats_tx_speed_desc); + if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED) + goto found; + + offset += num; + } + + return; + +found: + for (i = 0; i < num; ++i) { + v = (const __le64 *)(base + desc[i].offset); + ctx->data[offset + i] = le64_to_cpu(*v); + } + + return; + +drv_stats: + for (i = 0; i < num; ++i) { + v_stat = (const u64_stats_t *)(base + desc[i].offset); + ctx->data[offset + i] = u64_stats_read(v_stat); + } +} + +static int __virtnet_get_hw_stats(struct virtnet_info *vi, + struct virtnet_stats_ctx *ctx, + struct virtio_net_ctrl_queue_stats *req, + int req_size, void *reply, int res_size) +{ + struct virtio_net_stats_reply_hdr *hdr; + struct scatterlist sgs_in, sgs_out; + void *p; + u32 qid; + int ok; + + sg_init_one(&sgs_out, req, req_size); + sg_init_one(&sgs_in, reply, res_size); + + ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, + VIRTIO_NET_CTRL_STATS_GET, + &sgs_out, &sgs_in); + + if (!ok) + return ok; + + for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) { + hdr = p; + qid = le16_to_cpu(hdr->vq_index); + virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type); + } + + return 0; +} + +static void virtnet_make_stat_req(struct virtnet_info *vi, + struct virtnet_stats_ctx *ctx, + struct virtio_net_ctrl_queue_stats *req, + int qid, int *idx) +{ + int qtype = vq_type(vi, qid); + u64 bitmap = ctx->bitmap[qtype]; + + if (!bitmap) + return; + + req->stats[*idx].vq_index = cpu_to_le16(qid); + req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap); + *idx += 1; +} + +/* qid: -1: get stats of all vq. + * > 0: get the stats for the special vq. This must not be cvq. + */ +static int virtnet_get_hw_stats(struct virtnet_info *vi, + struct virtnet_stats_ctx *ctx, int qid) +{ + int qnum, i, j, res_size, qtype, last_vq, first_vq; + struct virtio_net_ctrl_queue_stats *req; + bool enable_cvq; + void *reply; + int ok; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) + return 0; + + if (qid == -1) { + last_vq = vi->curr_queue_pairs * 2 - 1; + first_vq = 0; + enable_cvq = true; + } else { + last_vq = qid; + first_vq = qid; + enable_cvq = false; + } + + qnum = 0; + res_size = 0; + for (i = first_vq; i <= last_vq ; ++i) { + qtype = vq_type(vi, i); + if (ctx->bitmap[qtype]) { + ++qnum; + res_size += ctx->size[qtype]; + } + } + + if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) { + res_size += ctx->size[VIRTNET_Q_TYPE_CQ]; + qnum += 1; + } + + req = kcalloc(qnum, sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + reply = kmalloc(res_size, GFP_KERNEL); + if (!reply) { + kfree(req); + return -ENOMEM; + } + + j = 0; + for (i = first_vq; i <= last_vq ; ++i) + virtnet_make_stat_req(vi, ctx, req, i, &j); + + if (enable_cvq) + virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j); + + ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size); + + kfree(req); + kfree(reply); + + return ok; +} + static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct virtnet_info *vi = netdev_priv(dev); - unsigned int i, j; + unsigned int i; u8 *p = data; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) - ethtool_sprintf(&p, "rx_queue_%u_%s", i, - virtnet_rq_stats_desc[j].desc); - } + /* Generate the total field names. */ + virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p); + virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p); - for (i = 0; i < vi->curr_queue_pairs; i++) { - for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) - ethtool_sprintf(&p, "tx_queue_%u_%s", i, - virtnet_sq_stats_desc[j].desc); - } + virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p); + + for (i = 0; i < vi->curr_queue_pairs; ++i) + virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p); + + for (i = 0; i < vi->curr_queue_pairs; ++i) + virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p); break; } } @@ -3323,11 +4139,17 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data) static int virtnet_get_sset_count(struct net_device *dev, int sset) { struct virtnet_info *vi = netdev_priv(dev); + struct virtnet_stats_ctx ctx = {0}; + u32 pair_count; switch (sset) { case ETH_SS_STATS: - return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + - VIRTNET_SQ_STATS_LEN); + virtnet_stats_ctx_init(vi, &ctx, NULL, false); + + pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX]; + + return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] + + vi->curr_queue_pairs * pair_count; default: return -EOPNOTSUPP; } @@ -3337,40 +4159,32 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct virtnet_info *vi = netdev_priv(dev); - unsigned int idx = 0, start, i, j; + struct virtnet_stats_ctx ctx = {0}; + unsigned int start, i; const u8 *stats_base; - const u64_stats_t *p; - size_t offset; + + virtnet_stats_ctx_init(vi, &ctx, data, false); + if (virtnet_get_hw_stats(vi, &ctx, -1)) + dev_warn(&vi->dev->dev, "Failed to get hw stats.\n"); for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; + struct send_queue *sq = &vi->sq[i]; stats_base = (const u8 *)&rq->stats; do { start = u64_stats_fetch_begin(&rq->stats.syncp); - for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { - offset = virtnet_rq_stats_desc[j].offset; - p = (const u64_stats_t *)(stats_base + offset); - data[idx + j] = u64_stats_read(p); - } + virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0); } while (u64_stats_fetch_retry(&rq->stats.syncp, start)); - idx += VIRTNET_RQ_STATS_LEN; - } - - for (i = 0; i < vi->curr_queue_pairs; i++) { - struct send_queue *sq = &vi->sq[i]; stats_base = (const u8 *)&sq->stats; do { start = u64_stats_fetch_begin(&sq->stats.syncp); - for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) { - offset = virtnet_sq_stats_desc[j].offset; - p = (const u64_stats_t *)(stats_base + offset); - data[idx + j] = u64_stats_read(p); - } + virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0); } while (u64_stats_fetch_retry(&sq->stats.syncp, start)); - idx += VIRTNET_SQ_STATS_LEN; } + + virtnet_fill_total_fields(vi, &ctx); } static void virtnet_get_channels(struct net_device *dev, @@ -3410,12 +4224,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev, static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL; struct scatterlist sgs_tx; int i; - vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); + coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL); + if (!coal_tx) + return -ENOMEM; + + coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -3435,8 +4254,10 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, struct ethtool_coalesce *ec) { + struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; struct scatterlist sgs_rx; + int ret = 0; int i; if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) @@ -3446,11 +4267,21 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) return -EINVAL; + /* Acquire all queues dim_locks */ + for (i = 0; i < vi->max_queue_pairs; i++) + mutex_lock(&vi->rq[i].dim_lock); + if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { vi->rx_dim_enabled = true; for (i = 0; i < vi->max_queue_pairs; i++) vi->rq[i].dim_enabled = true; - return 0; + goto unlock; + } + + coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); + if (!coal_rx) { + ret = -ENOMEM; + goto unlock; } if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { @@ -3463,14 +4294,16 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, * we need apply the global new params even if they * are not updated. */ - vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); + coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, - &sgs_rx)) - return -EINVAL; + &sgs_rx)) { + ret = -EINVAL; + goto unlock; + } vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; @@ -3478,8 +4311,11 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; } +unlock: + for (i = vi->max_queue_pairs - 1; i >= 0; i--) + mutex_unlock(&vi->rq[i].dim_lock); - return 0; + return ret; } static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, @@ -3503,19 +4339,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, u16 queue) { bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; - bool cur_rx_dim = vi->rq[queue].dim_enabled; u32 max_usecs, max_packets; + bool cur_rx_dim; int err; + mutex_lock(&vi->rq[queue].dim_lock); + cur_rx_dim = vi->rq[queue].dim_enabled; max_usecs = vi->rq[queue].intr_coal.max_usecs; max_packets = vi->rq[queue].intr_coal.max_packets; if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || - ec->rx_max_coalesced_frames != max_packets)) + ec->rx_max_coalesced_frames != max_packets)) { + mutex_unlock(&vi->rq[queue].dim_lock); return -EINVAL; + } if (rx_ctrl_dim_on && !cur_rx_dim) { vi->rq[queue].dim_enabled = true; + mutex_unlock(&vi->rq[queue].dim_lock); return 0; } @@ -3528,10 +4369,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, ec->rx_coalesce_usecs, ec->rx_max_coalesced_frames); - if (err) - return err; - - return 0; + mutex_unlock(&vi->rq[queue].dim_lock); + return err; } static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi, @@ -3561,39 +4400,27 @@ static void virtnet_rx_dim_work(struct work_struct *work) struct virtnet_info *vi = rq->vq->vdev->priv; struct net_device *dev = vi->dev; struct dim_cq_moder update_moder; - int i, qnum, err; + int qnum, err; - if (!rtnl_trylock()) - return; + qnum = rq - vi->rq; - /* Each rxq's work is queued by "net_dim()->schedule_work()" - * in response to NAPI traffic changes. Note that dim->profile_ix - * for each rxq is updated prior to the queuing action. - * So we only need to traverse and update profiles for all rxqs - * in the work which is holding rtnl_lock. - */ - for (i = 0; i < vi->curr_queue_pairs; i++) { - rq = &vi->rq[i]; - dim = &rq->dim; - qnum = rq - vi->rq; - - if (!rq->dim_enabled) - continue; - - update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - if (update_moder.usec != rq->intr_coal.max_usecs || - update_moder.pkts != rq->intr_coal.max_packets) { - err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, - update_moder.usec, - update_moder.pkts); - if (err) - pr_debug("%s: Failed to send dim parameters on rxq%d\n", - dev->name, qnum); - dim->state = DIM_START_MEASURE; - } - } + mutex_lock(&rq->dim_lock); + if (!rq->dim_enabled) + goto out; - rtnl_unlock(); + update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + if (update_moder.usec != rq->intr_coal.max_usecs || + update_moder.pkts != rq->intr_coal.max_packets) { + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, + update_moder.usec, + update_moder.pkts); + if (err) + pr_debug("%s: Failed to send dim parameters on rxq%d\n", + dev->name, qnum); + dim->state = DIM_START_MEASURE; + } +out: + mutex_unlock(&rq->dim_lock); } static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) @@ -3731,11 +4558,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev, return -EINVAL; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) { + mutex_lock(&vi->rq[queue].dim_lock); ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs; ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; + mutex_unlock(&vi->rq[queue].dim_lock); } else { ec->rx_max_coalesced_frames = 1; @@ -3791,11 +4620,11 @@ static int virtnet_get_rxfh(struct net_device *dev, if (rxfh->indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) - rxfh->indir[i] = vi->ctrl->rss.indirection_table[i]; + rxfh->indir[i] = vi->rss.indirection_table[i]; } if (rxfh->key) - memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size); + memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); rxfh->hfunc = ETH_RSS_HASH_TOP; @@ -3819,7 +4648,7 @@ static int virtnet_set_rxfh(struct net_device *dev, return -EOPNOTSUPP; for (i = 0; i < vi->rss_indir_table_size; ++i) - vi->ctrl->rss.indirection_table[i] = rxfh->indir[i]; + vi->rss.indirection_table[i] = rxfh->indir[i]; update = true; } @@ -3831,7 +4660,7 @@ static int virtnet_set_rxfh(struct net_device *dev, if (!vi->has_rss && !vi->has_rss_hash_report) return -EOPNOTSUPP; - memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size); + memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); update = true; } @@ -3905,6 +4734,97 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_rxnfc = virtnet_set_rxnfc, }; +static void virtnet_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct receive_queue *rq = &vi->rq[i]; + struct virtnet_stats_ctx ctx = {0}; + + virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); + + virtnet_get_hw_stats(vi, &ctx, i * 2); + virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0); +} + +static void virtnet_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct send_queue *sq = &vi->sq[i]; + struct virtnet_stats_ctx ctx = {0}; + + virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true); + + virtnet_get_hw_stats(vi, &ctx, i * 2 + 1); + virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0); +} + +static void virtnet_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct virtnet_info *vi = netdev_priv(dev); + + /* The queue stats of the virtio-net will not be reset. So here we + * return 0. + */ + rx->bytes = 0; + rx->packets = 0; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) { + rx->hw_drops = 0; + rx->hw_drop_overruns = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) { + rx->csum_unnecessary = 0; + rx->csum_none = 0; + rx->csum_bad = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) { + rx->hw_gro_packets = 0; + rx->hw_gro_bytes = 0; + rx->hw_gro_wire_packets = 0; + rx->hw_gro_wire_bytes = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) + rx->hw_drop_ratelimits = 0; + + tx->bytes = 0; + tx->packets = 0; + tx->stop = 0; + tx->wake = 0; + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) { + tx->hw_drops = 0; + tx->hw_drop_errors = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) { + tx->csum_none = 0; + tx->needs_csum = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) { + tx->hw_gso_packets = 0; + tx->hw_gso_bytes = 0; + tx->hw_gso_wire_packets = 0; + tx->hw_gso_wire_bytes = 0; + } + + if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) + tx->hw_drop_ratelimits = 0; +} + +static const struct netdev_stat_ops virtnet_stat_ops = { + .get_queue_stats_rx = virtnet_get_queue_stats_rx, + .get_queue_stats_tx = virtnet_get_queue_stats_tx, + .get_base_stats = virtnet_get_base_stats, +}; + static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; @@ -3951,10 +4871,16 @@ static int virtnet_restore_up(struct virtio_device *vdev) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) { + __virtio64 *_offloads __free(kfree) = NULL; struct scatterlist sg; - vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); - sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); + _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL); + if (!_offloads) + return -ENOMEM; + + *_offloads = cpu_to_virtio64(vi->vdev, offloads); + + sg_init_one(&sg, _offloads, sizeof(*_offloads)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { @@ -4054,7 +4980,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, synchronize_net(); } - err = _virtnet_set_queues(vi, curr_qp + xdp_qp); + err = virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); @@ -4156,9 +5082,9 @@ static int virtnet_set_features(struct net_device *dev, if ((dev->features ^ features) & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) - vi->ctrl->rss.hash_types = vi->rss_hash_types_saved; + vi->rss.hash_types = vi->rss_hash_types_saved; else - vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; + vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; if (!virtnet_commit_rss_command(vi)) return -EINVAL; @@ -4287,7 +5213,7 @@ static void free_receive_page_frags(struct virtnet_info *vi) int i; for (i = 0; i < vi->max_queue_pairs; i++) if (vi->rq[i].alloc_frag.page) { - if (vi->rq[i].do_dma && vi->rq[i].last_dma) + if (vi->rq[i].last_dma) virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0); put_page(vi->rq[i].alloc_frag.page); } @@ -4470,6 +5396,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) u64_stats_init(&vi->rq[i].stats.syncp); u64_stats_init(&vi->sq[i].stats.syncp); + mutex_init(&vi->rq[i].dim_lock); } return 0; @@ -4637,6 +5564,48 @@ static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu) } } +#define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10 +static enum xdp_rss_hash_type +virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = { + [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE, + [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4, + [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP, + [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP, + [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6, + [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP, + [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP, + [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, + [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, + [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX +}; + +static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, + enum xdp_rss_hash_type *rss_type) +{ + const struct xdp_buff *xdp = (void *)_ctx; + struct virtio_net_hdr_v1_hash *hdr_hash; + struct virtnet_info *vi; + u16 hash_report; + + if (!(xdp->rxq->dev->features & NETIF_F_RXHASH)) + return -ENODATA; + + vi = netdev_priv(xdp->rxq->dev); + hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len); + hash_report = __le16_to_cpu(hdr_hash->hash_report); + + if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE) + hash_report = VIRTIO_NET_HASH_REPORT_NONE; + + *rss_type = virtnet_xdp_rss_type[hash_report]; + *hash = __le32_to_cpu(hdr_hash->hash_value); + return 0; +} + +static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = { + .xmo_rx_hash = virtnet_xdp_rx_hash, +}; + static int virtnet_probe(struct virtio_device *vdev) { int i, err = -ENOMEM; @@ -4666,6 +5635,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE | IFF_TX_SKB_NO_LINEAR; dev->netdev_ops = &virtnet_netdev; + dev->stat_ops = &virtnet_stat_ops; dev->features = NETIF_F_HIGHDMA; dev->ethtool_ops = &virtnet_ethtool_ops; @@ -4765,6 +5735,7 @@ static int virtnet_probe(struct virtio_device *vdev) VIRTIO_NET_RSS_HASH_TYPE_UDP_EX); dev->hw_features |= NETIF_F_RXHASH; + dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops; } if (vi->has_rss_hash_report) @@ -4782,6 +5753,8 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; + mutex_init(&vi->cvq_lock); + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { mtu = virtio_cread16(vdev, offsetof(struct virtio_net_config, @@ -4873,7 +5846,7 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - _virtnet_set_queues(vi, vi->curr_queue_pairs); + virtnet_set_queues(vi, vi->curr_queue_pairs); /* a random MAC address has been assigned, notify the device. * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there @@ -4893,6 +5866,33 @@ static int virtnet_probe(struct virtio_device *vdev) } } + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) { + struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL; + struct scatterlist sg; + __le64 v; + + stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL); + if (!stats_cap) { + rtnl_unlock(); + err = -ENOMEM; + goto free_unregister_netdev; + } + + sg_init_one(&sg, stats_cap, sizeof(*stats_cap)); + + if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS, + VIRTIO_NET_CTRL_STATS_QUERY, + NULL, &sg)) { + pr_debug("virtio_net: fail to get stats capability\n"); + rtnl_unlock(); + err = -EINVAL; + goto free_unregister_netdev; + } + + v = stats_cap->supported_stats_types[0]; + vi->device_stats_cap = le64_to_cpu(v); + } + rtnl_unlock(); err = virtnet_cpu_notif_add(vi); @@ -5021,7 +6021,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \ VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \ VIRTIO_NET_F_VQ_NOTF_COAL, \ - VIRTIO_NET_F_GUEST_HDRLEN + VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0578864792b6..89ca6e75fcc6 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3457,7 +3457,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) struct vmxnet3_adapter *adapter = netdev_priv(netdev); int err = 0; - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); /* * Reset_work may be in the middle of resetting the device, wait for its diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index bb95ce43cd97..3a252ac5dd28 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -653,7 +653,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk, skb->dev = dev; rcu_read_lock(); - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); + nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); @@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev) static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct rtable *rt = (struct rtable *)dst; + struct rtable *rt = dst_rtable(dst); struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; @@ -1971,7 +1971,7 @@ static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) static void vrf_netns_exit_sysctl(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); - struct ctl_table *table; + const struct ctl_table *table; table = nn_vrf->ctl_hdr->ctl_table_arg; unregister_net_sysctl_table(nn_vrf->ctl_hdr); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index a1ba5169ed5d..4c260074c091 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -58,7 +58,7 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu) if (!vsockmon_is_valid_mtu(new_mtu)) return -EINVAL; - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index ba319fc21957..f78dd0438843 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1584,7 +1584,8 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, tun_dst = (struct metadata_dst *)skb_dst(skb); if (tun_dst) { - tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, + tun_dst->u.tun_info.key.tun_flags); tun_dst->u.tun_info.options_len = sizeof(*md); } if (gbp->dont_learn) @@ -1674,6 +1675,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) bool raw_proto = false; void *oiph; __be32 vni = 0; + int nh; /* Need UDP and VXLAN header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1720,9 +1722,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; if (vxlan_collect_metadata(vs)) { + IP_TUNNEL_DECLARE_FLAGS(flags) = { }; struct metadata_dst *tun_dst; - tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, + __set_bit(IP_TUNNEL_KEY_BIT, flags); + tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags, key32_to_tunnel_id(vni), sizeof(*md)); if (!tun_dst) @@ -1762,12 +1766,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; } - oiph = skb_network_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_reset_network_header(skb); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(vxlan->dev, rx_length_errors); + DEV_STATS_INC(vxlan->dev, rx_errors); + vxlan_vnifilter_count(vxlan, vni, vninode, + VXLAN_VNI_STATS_RX_ERRORS, 0); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; + DEV_STATS_INC(vxlan->dev, rx_frame_errors); + DEV_STATS_INC(vxlan->dev, rx_errors); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX_ERRORS, 0); goto drop; @@ -1837,7 +1857,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (!pskb_may_pull(skb, arp_hdr_len(dev))) { - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_TX_DROPS, 0); goto out; } parp = arp_hdr(skb); @@ -1893,7 +1915,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) reply->pkt_type = PACKET_HOST; if (netif_rx(reply) == NET_RX_DROP) { - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2052,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (netif_rx(reply) == NET_RX_DROP) { - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2263,7 +2285,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, len); } else { drop: - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2295,7 +2317,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, addr_family, dst_port, vxlan->cfg.flags); if (!dst_vxlan) { - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); @@ -2407,14 +2429,14 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vni = tunnel_id_to_key32(info->key.tun_id); ifindex = 0; dst_cache = &info->dst_cache; - if (info->key.tun_flags & TUNNEL_VXLAN_OPT) { + if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) { if (info->options_len < sizeof(*md)) goto drop; md = ip_tunnel_info_opts(info); } ttl = info->key.ttl; tos = info->key.tos; - udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); + udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); } src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); @@ -2455,7 +2477,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, old_iph->frag_off & htons(IP_DF))) df = htons(IP_DF); } - } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { + } else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, + info->key.tun_flags)) { df = htons(IP_DF); } @@ -2509,7 +2532,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (!info) { - u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; + u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags; err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6, dst_port, ifindex, vni, @@ -2559,7 +2582,7 @@ out_unlock: return; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); return; @@ -2567,11 +2590,11 @@ drop: tx_error: rcu_read_unlock(); if (err == -ELOOP) - dev->stats.collisions++; + DEV_STATS_INC(dev, collisions); else if (err == -ENETUNREACH) - dev->stats.tx_carrier_errors++; + DEV_STATS_INC(dev, tx_carrier_errors); dst_release(ndst); - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); } @@ -2604,7 +2627,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, return; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2642,7 +2665,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2739,7 +2762,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) !is_multicast_ether_addr(eth->h_dest)) vxlan_fdb_miss(vxlan, eth->h_dest); - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); kfree_skb(skb); @@ -3158,7 +3181,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) return -EINVAL; } - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return 0; } @@ -4546,7 +4569,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - return vxlan->net; + return READ_ONCE(vxlan->net); } static struct rtnl_link_ops vxlan_link_ops __read_mostly = { diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 31ab2136cdf1..67be9857c86c 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -180,7 +180,7 @@ config C101 config FARSYNC tristate "FarSync T-Series support" - depends on HDLC && PCI + depends on HDLC && PCI && HAS_IOPORT help Support for the FarSync T-Series X.21 (and V.35/V.24) cards by FarSite Communications Ltd. diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c index f69b1f579a0c..c5e7ca793c43 100644 --- a/drivers/net/wan/fsl_qmc_hdlc.c +++ b/drivers/net/wan/fsl_qmc_hdlc.c @@ -765,15 +765,13 @@ framer_exit: return ret; } -static int qmc_hdlc_remove(struct platform_device *pdev) +static void qmc_hdlc_remove(struct platform_device *pdev) { struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev); unregister_hdlc_device(qmc_hdlc->netdev); free_netdev(qmc_hdlc->netdev); qmc_hdlc_framer_exit(qmc_hdlc); - - return 0; } static const struct of_device_id qmc_hdlc_id_table[] = { @@ -788,7 +786,7 @@ static struct platform_driver qmc_hdlc_driver = { .of_match_table = qmc_hdlc_id_table, }, .probe = qmc_hdlc_probe, - .remove = qmc_hdlc_remove, + .remove_new = qmc_hdlc_remove, }; module_platform_driver(qmc_hdlc_driver); diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c index ee4da9ab8013..a00671b58701 100644 --- a/drivers/net/wireguard/main.c +++ b/drivers/net/wireguard/main.c @@ -14,7 +14,7 @@ #include <linux/init.h> #include <linux/module.h> -#include <linux/genetlink.h> +#include <net/genetlink.h> #include <net/rtnetlink.h> static int __init wg_mod_init(void) diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 815f8f599f5d..5a55db349cb5 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf, struct ar5523 *ar; int error = -ENOMEM; + static const u8 bulk_ep_addr[] = { + AR5523_CMD_TX_PIPE | USB_DIR_OUT, + AR5523_DATA_TX_PIPE | USB_DIR_OUT, + AR5523_CMD_RX_PIPE | USB_DIR_IN, + AR5523_DATA_RX_PIPE | USB_DIR_IN, + 0}; + + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) { + dev_err(&dev->dev, + "Could not find all expected endpoints\n"); + error = -ENODEV; + goto out; + } + /* * Load firmware if the device requires it. This will return * -ENXIO on success and we'll get called back afer the usb diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index f02a308a9ffc..34654f710d8a 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -171,8 +171,10 @@ struct ath_common { unsigned int clockrate; spinlock_t cc_lock; - struct ath_cycle_counters cc_ani; - struct ath_cycle_counters cc_survey; + struct_group(cc, + struct ath_cycle_counters cc_ani; + struct ath_cycle_counters cc_survey; + ); struct ath_regulatory regulatory; struct ath_regulatory reg_world_copy; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 9ce6f49ab261..bdf0552cd1c3 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -75,7 +75,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, - .board = QCA988X_HW_2_0_BOARD_DATA_FILE, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, @@ -116,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 2116, .fw = { .dir = QCA988X_HW_2_0_FW_DIR, - .board = QCA988X_HW_2_0_BOARD_DATA_FILE, .board_size = QCA988X_BOARD_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, }, @@ -158,7 +156,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 2116, .fw = { .dir = QCA9887_HW_1_0_FW_DIR, - .board = QCA9887_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9887_BOARD_DATA_SZ, .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, }, @@ -199,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 0, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, - .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, @@ -236,7 +232,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, @@ -277,7 +272,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_2_1_FW_DIR, - .board = QCA6174_HW_2_1_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, @@ -318,7 +312,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA6174_HW_3_0_FW_DIR, - .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, @@ -360,7 +353,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .fw = { /* uses same binaries as hw3.0 */ .dir = QCA6174_HW_3_0_FW_DIR, - .board = QCA6174_HW_3_0_BOARD_DATA_FILE, .board_size = QCA6174_BOARD_DATA_SZ, .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, }, @@ -409,7 +401,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, - .board = QCA99X0_HW_2_0_BOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, }, @@ -457,8 +448,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA9984_HW_1_0_FW_DIR, - .board = QCA9984_HW_1_0_BOARD_DATA_FILE, - .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ, @@ -510,7 +499,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA9888_HW_2_0_FW_DIR, - .board = QCA9888_HW_2_0_BOARD_DATA_FILE, .board_size = QCA99X0_BOARD_DATA_SZ, .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, }, @@ -556,7 +544,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, @@ -597,7 +584,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, @@ -640,7 +626,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 8124, .fw = { .dir = QCA9377_HW_1_0_FW_DIR, - .board = QCA9377_HW_1_0_BOARD_DATA_FILE, .board_size = QCA9377_BOARD_DATA_SZ, .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, @@ -680,7 +665,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .cal_data_len = 12064, .fw = { .dir = QCA4019_HW_1_0_FW_DIR, - .board = QCA4019_HW_1_0_BOARD_DATA_FILE, .board_size = QCA4019_BOARD_DATA_SZ, .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, }, @@ -720,6 +704,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .max_spatial_stream = 4, .fw = { .dir = WCN3990_HW_1_0_FW_DIR, + .board_size = WCN3990_BOARD_DATA_SZ, + .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ, }, .sw_decrypt_mcast_mgmt = true, .rx_desc_ops = &wcn3990_rx_desc_ops, @@ -942,11 +928,20 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar, if (dir == NULL) dir = "."; + if (ar->board_name) { + snprintf(filename, sizeof(filename), "%s/%s/%s", + dir, ar->board_name, file); + ret = firmware_request_nowarn(&fw, filename, ar->dev); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", + filename, ret); + if (!ret) + return fw; + } + snprintf(filename, sizeof(filename), "%s/%s", dir, file); ret = firmware_request_nowarn(&fw, filename, ar->dev); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n", filename, ret); - if (ret) return ERR_PTR(ret); @@ -1288,11 +1283,6 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) char boardname[100]; if (bd_ie_type == ATH10K_BD_IE_BOARD) { - if (!ar->hw_params.fw.board) { - ath10k_err(ar, "failed to find board file fw entry\n"); - return -EINVAL; - } - scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin", ath10k_bus_str(ar->hif.bus), dev_name(ar->dev)); @@ -1302,7 +1292,7 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) if (IS_ERR(ar->normal_mode_fw.board)) { fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.board); + ATH10K_BOARD_DATA_FILE); ar->normal_mode_fw.board = fw; } @@ -1312,13 +1302,8 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type) ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data; ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size; } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { - if (!ar->hw_params.fw.eboard) { - ath10k_err(ar, "failed to find eboard file fw entry\n"); - return -EINVAL; - } - fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, - ar->hw_params.fw.eboard); + ATH10K_EBOARD_DATA_FILE); ar->normal_mode_fw.ext_board = fw; if (IS_ERR(ar->normal_mode_fw.ext_board)) return PTR_ERR(ar->normal_mode_fw.ext_board); @@ -3673,11 +3658,13 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, INIT_WORK(&ar->set_coverage_class_work, ath10k_core_set_coverage_class_work); - init_dummy_netdev(&ar->napi_dev); + ar->napi_dev = alloc_netdev_dummy(0); + if (!ar->napi_dev) + goto err_free_tx_complete; ret = ath10k_coredump_create(ar); if (ret) - goto err_free_tx_complete; + goto err_free_netdev; ret = ath10k_debug_create(ar); if (ret) @@ -3687,6 +3674,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, err_free_coredump: ath10k_coredump_destroy(ar); +err_free_netdev: + free_netdev(ar->napi_dev); err_free_tx_complete: destroy_workqueue(ar->workqueue_tx_complete); err_free_aux_wq: @@ -3708,6 +3697,7 @@ void ath10k_core_destroy(struct ath10k *ar) destroy_workqueue(ar->workqueue_tx_complete); + free_netdev(ar->napi_dev); ath10k_debug_destroy(ar); ath10k_coredump_destroy(ar); ath10k_htt_tx_destroy(&ar->htt); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index c110d15528bd..b00099f0b24e 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1081,6 +1081,8 @@ struct ath10k { */ const struct ath10k_fw_components *running_fw; + const char *board_name; + const struct firmware *pre_cal_file; const struct firmware *cal_file; @@ -1269,7 +1271,7 @@ struct ath10k { struct ath10k_per_peer_tx_stats peer_tx_stats; /* NAPI */ - struct net_device napi_dev; + struct net_device *napi_dev; struct napi_struct napi; struct work_struct set_coverage_class_work; diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 394bf3c32abf..0f6de862c3a9 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file, } out: mutex_unlock(&ar->conf_mutex); - return count; + return ret ?: count; } static const struct file_operations fops_peer_debug_trigger = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 93c073091996..48897e5eca06 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -39,14 +39,12 @@ enum ath10k_bus { #define QCA988X_HW_2_0_VERSION 0x4100016c #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0" -#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9887 1.0 definitions */ #define QCA9887_HW_1_0_VERSION 0x4100016d #define QCA9887_HW_1_0_CHIP_ID_REV 0 #define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0" -#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* QCA6174 target BMI version signatures */ @@ -85,11 +83,9 @@ enum qca9377_chip_id_rev { }; #define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1" -#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 #define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0" -#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 /* QCA99X0 1.0 definitions (unsupported) */ @@ -99,7 +95,6 @@ enum qca9377_chip_id_rev { #define QCA99X0_HW_2_0_DEV_VERSION 0x01000000 #define QCA99X0_HW_2_0_CHIP_ID_REV 0x1 #define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0" -#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9984 1.0 defines */ @@ -107,8 +102,6 @@ enum qca9377_chip_id_rev { #define QCA9984_HW_DEV_TYPE 0xa #define QCA9984_HW_1_0_CHIP_ID_REV 0x0 #define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" -#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin" -#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin" #define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* QCA9888 2.0 defines */ @@ -116,18 +109,15 @@ enum qca9377_chip_id_rev { #define QCA9888_HW_DEV_TYPE 0xc #define QCA9888_HW_2_0_CHIP_ID_REV 0x0 #define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0" -#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234 /* QCA9377 1.0 definitions */ #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" -#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* QCA4019 1.0 definitions */ #define QCA4019_HW_1_0_DEV_VERSION 0x01000000 #define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" -#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 /* WCN3990 1.0 definitions */ @@ -159,7 +149,9 @@ enum qca9377_chip_id_rev { #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" #define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD" +#define ATH10K_BOARD_DATA_FILE "board.bin" #define ATH10K_BOARD_API2_FILE "board-2.bin" +#define ATH10K_EBOARD_DATA_FILE "eboard.bin" #define REG_DUMP_COUNT_QCA988X 60 @@ -553,9 +545,7 @@ struct ath10k_hw_params { struct ath10k_hw_params_fw { const char *dir; - const char *board; size_t board_size; - const char *eboard; size_t ext_board_size; size_t board_ext_size; } fw; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 5c34b156b4ff..3777fe5d7d69 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3217,7 +3217,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar) void ath10k_pci_init_napi(struct ath10k *ar) { - netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll); + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll); } static int ath10k_pci_init_irq(struct ath10k *ar) @@ -3826,28 +3826,28 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA9887 1.0 firmware files */ MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); -MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 2.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); -MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 3.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); -MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA9377 1.0 firmware files */ MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); -MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); +MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 0ab5433f6cf6..e28f2fe1101b 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2532,7 +2532,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, return -ENOMEM; } - netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll); ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n", diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 2c39bad7ebfb..8530550cf5df 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -935,7 +935,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar) bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX); - dev_set_threaded(&ar->napi_dev, true); + dev_set_threaded(ar->napi_dev, true); ath10k_core_napi_enable(ar); ath10k_snoc_irq_enable(ar); ath10k_snoc_rx_post(ar); @@ -1253,7 +1253,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) static void ath10k_snoc_init_napi(struct ath10k *ar) { - netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll); + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll); } static int ath10k_snoc_request_irq(struct ath10k *ar) @@ -1338,6 +1338,9 @@ static void ath10k_snoc_quirks_init(struct ath10k *ar) struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct device *dev = &ar_snoc->dev->dev; + /* ignore errors, keep NULL if there is no property */ + of_property_read_string(dev->of_node, "firmware-name", &ar->board_name); + if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk")) set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags); } diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index ec556bb88d65..ba37e6c7ced0 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -491,4 +491,7 @@ struct host_interest { #define QCA4019_BOARD_DATA_SZ 12064 #define QCA4019_BOARD_EXT_DATA_SZ 0 +#define WCN3990_BOARD_DATA_SZ 26328 +#define WCN3990_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index 31c8d7fbb095..8b15ec07b107 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -100,7 +100,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, spin_unlock_bh(&ar->data_lock); /* display in millidegree celsius */ - ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000); + ret = sysfs_emit(buf, "%d\n", temperature * 1000); out: mutex_unlock(&ar->conf_mutex); return ret; diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 3c482baacec1..3b51b7f52130 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1014,7 +1014,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, return -ENOMEM; } - netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll); + netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll); usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2e9661f4bea8..80d255aaff1b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch, int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { - unsigned long time_left; + unsigned long time_left, i; time_left = wait_for_completion_timeout(&ar->wmi.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); - if (!time_left) - return -ETIMEDOUT; + if (!time_left) { + /* Sometimes the PCI HIF doesn't receive interrupt + * for the service ready message even if the buffer + * was completed. PCIe sniffer shows that it's + * because the corresponding CE ring doesn't fires + * it. Workaround here by polling CE rings once. + */ + ath10k_warn(ar, "failed to receive service ready completion, polling..\n"); + + for (i = 0; i < CE_COUNT; i++) + ath10k_hif_send_complete_check(ar, i, 1); + + time_left = wait_for_completion_timeout(&ar->wmi.service_ready, + WMI_SERVICE_READY_TIMEOUT_HZ); + if (!time_left) { + ath10k_warn(ar, "polling timed out\n"); + return -ETIMEDOUT; + } + + ath10k_warn(ar, "service ready completion received, continuing normally\n"); + } + return 0; } diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index 2c94d50ae36f..43d2d8ddcdc0 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -18,7 +18,8 @@ ath11k-y += core.o \ dbring.o \ hw.o \ pcic.o \ - fw.o + fw.o \ + p2p.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 7c0a23517949..ca0f17ddebba 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> @@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab) return ret; } -static void ath11k_ahb_power_down(struct ath11k_base *ab) +static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); @@ -442,6 +442,7 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); + free_netdev(irq_grp->napi_ndev); } } @@ -533,8 +534,12 @@ static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab) irq_grp->ab = ab; irq_grp->grp_id = i; - init_dummy_netdev(&irq_grp->napi_ndev); - netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + + irq_grp->napi_ndev = alloc_netdev_dummy(0); + if (!irq_grp->napi_ndev) + return -ENOMEM; + + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_ahb_ext_grp_napi_poll); for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) { @@ -1256,7 +1261,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev) struct ath11k_base *ab = platform_get_drvdata(pdev); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath11k_ahb_power_down(ab); + ath11k_ahb_power_down(ab, false); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index c78bce19bd75..3cc817a3b4a4 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -247,7 +247,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, @@ -416,7 +419,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, @@ -501,7 +507,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, @@ -750,7 +759,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP), + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, @@ -894,12 +906,6 @@ int ath11k_core_suspend(struct ath11k_base *ab) return ret; } - ret = ath11k_wow_enable(ab); - if (ret) { - ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); - return ret; - } - ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", @@ -910,29 +916,85 @@ int ath11k_core_suspend(struct ath11k_base *ab) ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); + /* PM framework skips suspend_late/resume_early callbacks + * if other devices report errors in their suspend callbacks. + * However ath11k_core_resume() would still be called because + * here we return success thus kernel put us on dpm_suspended_list. + * Since we won't go through a power down/up cycle, there is + * no chance to call complete(&ab->restart_completed) in + * ath11k_core_restart(), making ath11k_core_resume() timeout. + * So call it here to avoid this issue. This also works in case + * no error happens thus suspend_late/resume_early get called, + * because it will be reinitialized in ath11k_core_resume_early(). + */ + complete(&ab->restart_completed); + + return 0; +} +EXPORT_SYMBOL(ath11k_core_suspend); + +int ath11k_core_suspend_late(struct ath11k_base *ab) +{ + struct ath11k_pdev *pdev; + struct ath11k *ar; + + if (!ab->hw_params.supports_suspend) + return -EOPNOTSUPP; + + /* so far single_pdev_only chips have supports_suspend as true + * and only the first pdev is valid. + */ + pdev = ath11k_core_get_single_pdev(ab); + ar = pdev->ar; + if (!ar || ar->state != ATH11K_STATE_OFF) + return 0; + ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); - ret = ath11k_hif_suspend(ab); - if (ret) { - ath11k_warn(ab, "failed to suspend hif: %d\n", ret); - return ret; - } + ath11k_hif_power_down(ab, true); return 0; } -EXPORT_SYMBOL(ath11k_core_suspend); +EXPORT_SYMBOL(ath11k_core_suspend_late); + +int ath11k_core_resume_early(struct ath11k_base *ab) +{ + int ret; + struct ath11k_pdev *pdev; + struct ath11k *ar; + + if (!ab->hw_params.supports_suspend) + return -EOPNOTSUPP; + + /* so far single_pdev_only chips have supports_suspend as true + * and only the first pdev is valid. + */ + pdev = ath11k_core_get_single_pdev(ab); + ar = pdev->ar; + if (!ar || ar->state != ATH11K_STATE_OFF) + return 0; + + reinit_completion(&ab->restart_completed); + ret = ath11k_hif_power_up(ab); + if (ret) + ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret); + + return ret; +} +EXPORT_SYMBOL(ath11k_core_resume_early); int ath11k_core_resume(struct ath11k_base *ab) { int ret; struct ath11k_pdev *pdev; struct ath11k *ar; + long time_left; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; - /* so far signle_pdev_only chips have supports_suspend as true + /* so far single_pdev_only chips have supports_suspend as true * and only the first pdev is valid. */ pdev = ath11k_core_get_single_pdev(ab); @@ -940,29 +1002,19 @@ int ath11k_core_resume(struct ath11k_base *ab) if (!ar || ar->state != ATH11K_STATE_OFF) return 0; - ret = ath11k_hif_resume(ab); - if (ret) { - ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); - return ret; + time_left = wait_for_completion_timeout(&ab->restart_completed, + ATH11K_RESET_TIMEOUT_HZ); + if (time_left == 0) { + ath11k_warn(ab, "timeout while waiting for restart complete"); + return -ETIMEDOUT; } - ath11k_hif_ce_irq_enable(ab); - ath11k_hif_irq_enable(ab); - ret = ath11k_dp_rx_pktlog_start(ab); - if (ret) { + if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); - return ret; - } - ret = ath11k_wow_wakeup(ab); - if (ret) { - ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret); - return ret; - } - - return 0; + return ret; } EXPORT_SYMBOL(ath11k_core_resume); @@ -2060,6 +2112,8 @@ static void ath11k_core_restart(struct work_struct *work) if (!ab->is_reset) ath11k_core_post_reconfigure_recovery(ab); + + complete(&ab->restart_completed); } static void ath11k_core_reset(struct work_struct *work) @@ -2129,7 +2183,7 @@ static void ath11k_core_reset(struct work_struct *work) ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); - ath11k_hif_power_down(ab); + ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); @@ -2202,7 +2256,7 @@ void ath11k_core_deinit(struct ath11k_base *ab) mutex_unlock(&ab->core_lock); - ath11k_hif_power_down(ab); + ath11k_hif_power_down(ab, false); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); ath11k_fw_destroy(ab); @@ -2255,6 +2309,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); + init_completion(&ab->restart_completed); ab->dev = dev; ab->hif.bus = bus; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index b3fb74a226fb..205f40ee6b66 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CORE_H @@ -174,7 +174,7 @@ struct ath11k_ext_irq_grp { u64 timestamp; bool napi_enabled; struct napi_struct napi; - struct net_device napi_ndev; + struct net_device *napi_ndev; }; enum ath11k_smbios_cc_type { @@ -1033,6 +1033,8 @@ struct ath11k_base { DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT); } fw; + struct completion restart_completed; + #ifdef CONFIG_NL80211_TESTMODE struct { u32 data_pos; @@ -1232,8 +1234,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_check_dt(struct ath11k_base *ath11k); int ath11k_core_check_smbios(struct ath11k_base *ab); void ath11k_core_halt(struct ath11k *ar); +int ath11k_core_resume_early(struct ath11k_base *ab); int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab); +int ath11k_core_suspend_late(struct ath11k_base *ab); void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab); bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index a48e737ef35d..414a5ce279f7 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/vmalloc.h> @@ -980,7 +980,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab) debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); - debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab, + debugfs_create_file("soc_dp_stats", 0400, ab->debugfs_soc, ab, &fops_soc_dp_stats); if (ab->hw_params.sram_dump.start != 0) diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 65e8f244ebb9..e453c137385e 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -664,7 +664,7 @@ struct hal_srng_config { }; /** - * enum hal_rx_buf_return_buf_manager + * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers * * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 877a4073fed6..c4c6cc09c7c1 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _HIF_H_ @@ -18,7 +18,7 @@ struct ath11k_hif_ops { int (*start)(struct ath11k_base *ab); void (*stop)(struct ath11k_base *ab); int (*power_up)(struct ath11k_base *ab); - void (*power_down)(struct ath11k_base *ab); + void (*power_down)(struct ath11k_base *ab, bool is_suspend); int (*suspend)(struct ath11k_base *ab); int (*resume)(struct ath11k_base *ab); int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id, @@ -67,12 +67,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab) static inline int ath11k_hif_power_up(struct ath11k_base *ab) { + if (!ab->hif.ops->power_up) + return -EOPNOTSUPP; + return ab->hif.ops->power_up(ab); } -static inline void ath11k_hif_power_down(struct ath11k_base *ab) +static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend) { - ab->hif.ops->power_down(ab); + if (!ab->hif.ops->power_down) + return; + + ab->hif.ops->power_down(ab, is_suspend); } static inline int ath11k_hif_suspend(struct ath11k_base *ab) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9f4bf41a3d41..4f62e38ba48b 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) enable_ps = arvif->ps; - if (!arvif->is_started) { - /* mac80211 can update vif powersave state while disconnected. - * Firmware doesn't behave nicely and consumes more power than - * necessary if PS is disabled on a non-started vdev. Hence - * force-enable PS for non-running vdevs. - */ - psmode = WMI_STA_PS_MODE_ENABLED; - } else if (enable_ps) { + if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; @@ -1430,10 +1423,67 @@ static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif, return false; } -static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif, - struct sk_buff *bcn) +static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif, + struct sk_buff *bcn) +{ + struct ath11k *ar = arvif->ar; + struct ieee80211_mgmt *mgmt; + const u8 *p2p_ie; + int ret; + + mgmt = (void *)bcn->data; + p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, + mgmt->u.beacon.variable, + bcn->len - (mgmt->u.beacon.variable - + bcn->data)); + if (!p2p_ie) + return -ENOENT; + + ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); + if (ret) { + ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return ret; +} + +static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, + u8 oui_type, size_t ie_offset) { + size_t len; + const u8 *next, *end; + u8 *ie; + + if (WARN_ON(skb->len < ie_offset)) + return -EINVAL; + + ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, + skb->data + ie_offset, + skb->len - ie_offset); + if (!ie) + return -ENOENT; + + len = ie[1] + 2; + end = skb->data + skb->len; + next = ie + len; + + if (WARN_ON(next > end)) + return -EINVAL; + + memmove(ie, next, end - next); + skb_trim(skb, skb->len - len); + + return 0; +} + +static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif, + struct sk_buff *bcn) +{ + struct ath11k_base *ab = arvif->ar->ab; struct ieee80211_mgmt *mgmt; + int ret = 0; u8 *ies; ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); @@ -1451,6 +1501,32 @@ static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif, arvif->wpaie_present = true; else arvif->wpaie_present = false; + + if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + return ret; + + ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn); + if (ret) { + ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", + ret); + return ret; + } + + /* P2P IE is inserted by firmware automatically (as + * configured above) so remove it from the base beacon + * template to avoid duplicate P2P IEs in beacon frames. + */ + ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, + WLAN_OUI_TYPE_WFA_P2P, + offsetof(struct ieee80211_mgmt, + u.beacon.variable)); + if (ret) { + ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n", + ret); + return ret; + } + + return ret; } static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) @@ -1472,10 +1548,12 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) return -EPERM; } - if (tx_arvif == arvif) - ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb); - else + if (tx_arvif == arvif) { + if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb)) + return -EINVAL; + } else { arvif->wpaie_present = tx_arvif->wpaie_present; + } for (i = 0; i < beacons->cnt; i++) { if (tx_arvif != arvif && !nontx_vif_params_set) @@ -1534,10 +1612,12 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) return -EPERM; } - if (tx_arvif == arvif) - ath11k_mac_set_vif_params(tx_arvif, bcn); - else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) + if (tx_arvif == arvif) { + if (ath11k_mac_set_vif_params(tx_arvif, bcn)) + return -EINVAL; + } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) { return -EINVAL; + } ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0); kfree_skb(bcn); @@ -1579,7 +1659,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) if (vif->bss_conf.color_change_active && ieee80211_beacon_cntdwn_is_complete(vif, 0)) { arvif->bcca_zero_sent = true; - ieee80211_color_change_finish(vif); + ieee80211_color_change_finish(vif, 0); return; } @@ -3996,6 +4076,9 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, arg->vdev_id = arvif->vdev_id; arg->scan_id = ATH11K_SCAN_ID; + if (ar->ab->hw_params.single_pdev_only) + arg->scan_f_filter_prb_req = 1; + if (req->ie_len) { arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); if (!arg->extraie.ptr) { @@ -6570,17 +6653,26 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; + if (vif->p2p) + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; break; case NL80211_IFTYPE_MESH_POINT: arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; fallthrough; case NL80211_IFTYPE_AP: arvif->vdev_type = WMI_VDEV_TYPE_AP; + if (vif->p2p) + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; ar->monitor_vdev_id = bit; break; + case NL80211_IFTYPE_P2P_DEVICE: + arvif->vdev_type = WMI_VDEV_TYPE_STA; + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + break; + default: WARN_ON(1); break; @@ -9255,9 +9347,11 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, arg->dwell_time_passive = scan_time_msec; arg->max_scan_time = scan_time_msec; arg->scan_f_passive = 1; - arg->scan_f_filter_prb_req = 1; arg->burst_duration = duration; + if (!ar->ab->hw_params.single_pdev_only) + arg->scan_f_filter_prb_req = 1; + ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); @@ -9859,12 +9953,18 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) struct ieee80211_iface_combination *combinations; struct ieee80211_iface_limit *limits; int n_limits; + bool p2p; + + p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); if (!combinations) return -ENOMEM; - n_limits = 2; + if (p2p) + n_limits = 3; + else + n_limits = 2; limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); if (!limits) { @@ -9872,39 +9972,29 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) return -ENOMEM; } + limits[0].types |= BIT(NL80211_IFTYPE_STATION); + limits[1].types |= BIT(NL80211_IFTYPE_AP); + if (IS_ENABLED(CONFIG_MAC80211_MESH) && + ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) + limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); + + combinations[0].limits = limits; + combinations[0].n_limits = n_limits; + combinations[0].beacon_int_infra_match = true; + combinations[0].beacon_int_min_gcd = 100; + if (ab->hw_params.support_dual_stations) { limits[0].max = 2; - limits[0].types |= BIT(NL80211_IFTYPE_STATION); - limits[1].max = 1; - limits[1].types |= BIT(NL80211_IFTYPE_AP); - if (IS_ENABLED(CONFIG_MAC80211_MESH) && - ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) - limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); - combinations[0].limits = limits; - combinations[0].n_limits = 2; combinations[0].max_interfaces = ab->hw_params.num_vdevs; combinations[0].num_different_channels = 2; - combinations[0].beacon_int_infra_match = true; - combinations[0].beacon_int_min_gcd = 100; } else { limits[0].max = 1; - limits[0].types |= BIT(NL80211_IFTYPE_STATION); - limits[1].max = 16; - limits[1].types |= BIT(NL80211_IFTYPE_AP); - - if (IS_ENABLED(CONFIG_MAC80211_MESH) && - ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) - limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); - combinations[0].limits = limits; - combinations[0].n_limits = 2; combinations[0].max_interfaces = 16; combinations[0].num_different_channels = 1; - combinations[0].beacon_int_infra_match = true; - combinations[0].beacon_int_min_gcd = 100; combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | @@ -9913,6 +10003,13 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) BIT(NL80211_CHAN_WIDTH_160); } + if (p2p) { + limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + limits[2].max = 1; + limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); + } + ar->hw->wiphy->iface_combinations = combinations; ar->hw->wiphy->n_iface_combinations = 1; @@ -10029,6 +10126,7 @@ static int __ath11k_mac_register(struct ath11k *ar) if (ret) goto err; + wiphy_read_of_freq_limits(ar->hw->wiphy); ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); ath11k_mac_setup_he_cap(ar, cap); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index fb4ecf9a103e..ab182690aed3 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -19,6 +19,7 @@ #define MHI_TIMEOUT_DEFAULT_MS 20000 #define RDDM_DUMP_SIZE 0x420000 +#define MHI_CB_INVALID 0xff static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = { { @@ -158,9 +159,8 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val); - /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS - * has SYSERR bit set and thus need to set MHICTRL_RESET - * to clear SYSERR. + /* After SOC_GLOBAL_RESET, MHISTATUS may still have SYSERR bit set + * and thus need to set MHICTRL_RESET to clear SYSERR. */ ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK); @@ -269,6 +269,7 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback cb) { struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n", ath11k_mhi_op_callback_to_str(cb)); @@ -279,12 +280,21 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, break; case MHI_CB_EE_RDDM: ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n"); + if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) { + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "do not queue again for consecutive RDDM event\n"); + break; + } + if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))) queue_work(ab->workqueue_aux, &ab->reset_work); + break; default: break; } + + ab_pci->mhi_pre_cb = cb; } static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, @@ -397,6 +407,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) goto free_controller; } + ab_pci->mhi_pre_cb = MHI_CB_INVALID; ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config); if (ret) { ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret); @@ -442,9 +453,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci) return 0; } -void ath11k_mhi_stop(struct ath11k_pci *ab_pci) +void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend) { - mhi_power_down(ab_pci->mhi_ctrl, true); + /* During suspend we need to use mhi_power_down_keep_dev() + * workaround, otherwise ath11k_core_resume() will timeout + * during resume. + */ + if (is_suspend) + mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true); + else + mhi_power_down(ab_pci->mhi_ctrl, true); + mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); } diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index f81fba2644a4..2d567705e732 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH11K_MHI_H #define _ATH11K_MHI_H @@ -18,7 +18,7 @@ #define MHICTRL_RESET_MASK 0x2 int ath11k_mhi_start(struct ath11k_pci *ar_pci); -void ath11k_mhi_stop(struct ath11k_pci *ar_pci); +void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend); int ath11k_mhi_register(struct ath11k_pci *ar_pci); void ath11k_mhi_unregister(struct ath11k_pci *ar_pci); void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab); @@ -26,5 +26,4 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab); int ath11k_mhi_suspend(struct ath11k_pci *ar_pci); int ath11k_mhi_resume(struct ath11k_pci *ar_pci); - #endif diff --git a/drivers/net/wireless/ath/ath11k/p2p.c b/drivers/net/wireless/ath/ath11k/p2p.c new file mode 100644 index 000000000000..01e14523f1fe --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/p2p.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "wmi.h" +#include "mac.h" +#include "p2p.h" + +static void ath11k_p2p_noa_ie_fill(u8 *data, size_t len, + const struct ath11k_wmi_p2p_noa_info *noa) +{ + struct ieee80211_p2p_noa_attr *noa_attr; + u8 noa_descriptors, ctwindow; + bool oppps; + __le16 *noa_attr_len; + u16 attr_len; + int i; + + ctwindow = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU); + oppps = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS); + noa_descriptors = u32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_DESC_NUM); + + /* P2P IE */ + data[0] = WLAN_EID_VENDOR_SPECIFIC; + data[1] = len - 2; + data[2] = (WLAN_OUI_WFA >> 16) & 0xff; + data[3] = (WLAN_OUI_WFA >> 8) & 0xff; + data[4] = (WLAN_OUI_WFA >> 0) & 0xff; + data[5] = WLAN_OUI_TYPE_WFA_P2P; + + /* NOA ATTR */ + data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE; + noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */ + noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9]; + + noa_attr->index = u32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_INDEX); + noa_attr->oppps_ctwindow = ctwindow; + if (oppps) + noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT; + + for (i = 0; i < noa_descriptors; i++) { + noa_attr->desc[i].count = noa->descriptors[i].type_count; + noa_attr->desc[i].duration = + cpu_to_le32(noa->descriptors[i].duration); + noa_attr->desc[i].interval = + cpu_to_le32(noa->descriptors[i].interval); + noa_attr->desc[i].start_time = + cpu_to_le32(noa->descriptors[i].start_time); + } + + attr_len = 2; /* index + oppps_ctwindow */ + attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc); + *noa_attr_len = __cpu_to_le16(attr_len); +} + +static size_t +ath11k_p2p_noa_ie_len_compute(const struct ath11k_wmi_p2p_noa_info *noa) +{ + size_t len = 0; + u8 noa_descriptors = u32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_DESC_NUM); + + if (!(noa_descriptors) && + !(u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS))) + return 0; + + len += 1 + 1 + 4; /* EID + len + OUI */ + len += 1 + 2; /* noa attr + attr len */ + len += 1 + 1; /* index + oppps_ctwindow */ + len += noa_descriptors * + sizeof(struct ieee80211_p2p_noa_desc); + + return len; +} + +static void ath11k_p2p_noa_ie_assign(struct ath11k_vif *arvif, void *ie, + size_t len) +{ + struct ath11k *ar = arvif->ar; + + lockdep_assert_held(&ar->data_lock); + + kfree(arvif->u.ap.noa_data); + + arvif->u.ap.noa_data = ie; + arvif->u.ap.noa_len = len; +} + +static void __ath11k_p2p_noa_update(struct ath11k_vif *arvif, + const struct ath11k_wmi_p2p_noa_info *noa) +{ + struct ath11k *ar = arvif->ar; + void *ie; + size_t len; + + lockdep_assert_held(&ar->data_lock); + + ath11k_p2p_noa_ie_assign(arvif, NULL, 0); + + len = ath11k_p2p_noa_ie_len_compute(noa); + if (!len) + return; + + ie = kmalloc(len, GFP_ATOMIC); + if (!ie) + return; + + ath11k_p2p_noa_ie_fill(ie, len, noa); + ath11k_p2p_noa_ie_assign(arvif, ie, len); } + +void ath11k_p2p_noa_update(struct ath11k_vif *arvif, + const struct ath11k_wmi_p2p_noa_info *noa) +{ + struct ath11k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); + __ath11k_p2p_noa_update(arvif, noa); + spin_unlock_bh(&ar->data_lock); +} + +static void ath11k_p2p_noa_update_vdev_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_p2p_noa_arg *arg = data; + + if (arvif->vdev_id != arg->vdev_id) + return; + + ath11k_p2p_noa_update(arvif, arg->noa); +} + +void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id, + const struct ath11k_wmi_p2p_noa_info *noa) +{ + struct ath11k_p2p_noa_arg arg = { + .vdev_id = vdev_id, + .noa = noa, + }; + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + IEEE80211_IFACE_ITER_NORMAL, + ath11k_p2p_noa_update_vdev_iter, + &arg); +} diff --git a/drivers/net/wireless/ath/ath11k/p2p.h b/drivers/net/wireless/ath/ath11k/p2p.h new file mode 100644 index 000000000000..d907940a9b09 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/p2p.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH11K_P2P_H +#define ATH11K_P2P_H + +#include "wmi.h" + +struct ath11k_wmi_p2p_noa_info; + +struct ath11k_p2p_noa_arg { + u32 vdev_id; + const struct ath11k_wmi_p2p_noa_info *noa; +}; + +void ath11k_p2p_noa_update(struct ath11k_vif *arvif, + const struct ath11k_wmi_p2p_noa_info *noa); +void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id, + const struct ath11k_wmi_p2p_noa_info *noa); +#endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index be9d2c69cc41..8d63b84d1261 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -638,7 +638,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) return 0; } -static void ath11k_pci_power_down(struct ath11k_base *ab) +static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -649,7 +649,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) ath11k_pci_msi_disable(ab_pci); - ath11k_mhi_stop(ab_pci); + ath11k_mhi_stop(ab_pci, is_suspend); clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, false); } @@ -970,7 +970,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath11k_pci_power_down(ab); + ath11k_pci_power_down(ab, false); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; @@ -998,7 +998,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev) struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); - ath11k_pci_power_down(ab); + ath11k_pci_power_down(ab, false); } static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) @@ -1035,9 +1035,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops, - ath11k_pci_pm_suspend, - ath11k_pci_pm_resume); +static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev) +{ + struct ath11k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath11k_core_suspend_late(ab); + if (ret) + ath11k_warn(ab, "failed to late suspend core: %d\n", ret); + + /* Similar to ath11k_pci_pm_suspend(), we return success here + * even error happens, to allow system suspend/hibernation survive. + */ + return 0; +} + +static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev) +{ + struct ath11k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath11k_core_resume_early(ab); + if (ret) + ath11k_warn(ab, "failed to early resume core: %d\n", ret); + + return ret; +} + +static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend, + ath11k_pci_pm_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late, + ath11k_pci_pm_resume_early) +}; static struct pci_driver ath11k_pci_driver = { .name = "ath11k_pci", diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 6be73333d90b..c33c7865145c 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -64,6 +64,7 @@ struct ath11k_pci { char amss_path[100]; struct mhi_controller *mhi_ctrl; const struct ath11k_msi_config *msi_config; + enum mhi_callback mhi_pre_cb; u32 register_window; /* protects register_window above */ diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index add4db4c50bc..79eb3f9c902f 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -316,6 +316,7 @@ static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); + free_netdev(irq_grp->napi_ndev); } } @@ -558,7 +559,7 @@ ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector) static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) { - int i, j, ret, num_vectors = 0; + int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; unsigned long irq_flags; @@ -578,8 +579,11 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) irq_grp->ab = ab; irq_grp->grp_id = i; - init_dummy_netdev(&irq_grp->napi_ndev); - netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + irq_grp->napi_ndev = alloc_netdev_dummy(0); + if (!irq_grp->napi_ndev) + return -ENOMEM; + + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_pcic_ext_grp_napi_poll); if (ab->hw_params.ring_mask->tx[i] || @@ -601,8 +605,13 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) int vector = (i % num_vectors) + base_vector; int irq = ath11k_pcic_get_msi_irq(ab, vector); - if (irq < 0) + if (irq < 0) { + for (n = 0; n <= i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } return irq; + } ab->irq_num[irq_idx] = irq; @@ -615,6 +624,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) if (ret) { ath11k_err(ab, "failed request irq %d: %d\n", vector, ret); + for (n = 0; n <= i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } return ret; } } diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 5006f81f779b..d4a243b64f6c 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2877,7 +2877,7 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) } /* reset the firmware */ - ath11k_hif_power_down(ab); + ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n"); return 0; diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c index 41e7499f075f..18d6eab5cce3 100644 --- a/drivers/net/wireless/ath/ath11k/thermal.c +++ b/drivers/net/wireless/ath/ath11k/thermal.c @@ -101,7 +101,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev, spin_unlock_bh(&ar->data_lock); /* display in millidegree Celsius */ - ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000); + ret = sysfs_emit(buf, "%d\n", temperature * 1000); out: mutex_unlock(&ar->conf_mutex); return ret; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 34ab9631ff36..6ff01c45f165 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -20,6 +20,7 @@ #include "hw.h" #include "peer.h" #include "testmode.h" +#include "p2p.h" struct wmi_tlv_policy { size_t min_len; @@ -154,6 +155,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = { .min_len = sizeof(struct wmi_twt_add_dialog_event) }, + [WMI_TAG_P2P_NOA_INFO] = { + .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) }, + [WMI_TAG_P2P_NOA_EVENT] = { + .min_len = sizeof(struct wmi_p2p_noa_event) }, }; #define PRIMAP(_hw_mode_) \ @@ -981,7 +986,7 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, FIELD_PREP(WMI_TLV_LEN, 0); /* Note: This is a nested TLV containing: - * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); @@ -1704,6 +1709,45 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, return ret; } +int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, + const u8 *p2p_ie) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_p2p_go_set_beacon_ie_cmd *cmd; + size_t p2p_ie_len, aligned_len; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret, len; + + p2p_ie_len = p2p_ie[1] + 2; + aligned_len = roundup(p2p_ie_len, 4); + + len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->ie_buf_len = p2p_ie_len; + + tlv = (struct wmi_tlv *)cmd->tlv; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, aligned_len); + memcpy(tlv->value, p2p_ie, p2p_ie_len); + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); + if (ret) { + ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); + dev_kfree_skb(skb); + } + + return ret; +} + int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, u32 ema_params) @@ -4020,7 +4064,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk switch (ev->evt_type) { case WMI_BSS_COLOR_COLLISION_DETECTION: - ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); + ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, + 0); ath11k_dbg(ab, ATH11K_DBG_WMI, "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); @@ -8606,6 +8651,58 @@ exit: kfree(tb); } +static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_p2p_noa_event *ev; + const struct ath11k_wmi_p2p_noa_info *noa; + struct ath11k *ar; + int vdev_id; + u8 noa_descriptors; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); + return; + } + + ev = tb[WMI_TAG_P2P_NOA_EVENT]; + noa = tb[WMI_TAG_P2P_NOA_INFO]; + + if (!ev || !noa) + goto out; + + vdev_id = ev->vdev_id; + noa_descriptors = u32_get_bits(noa->noa_attr, + WMI_P2P_NOA_INFO_DESC_NUM); + + if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) { + ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n", + noa_descriptors); + goto out; + } + + ath11k_dbg(ab, ATH11K_DBG_WMI, + "wmi tlv p2p noa vdev_id %i descriptors %u\n", + vdev_id, noa_descriptors); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n", + vdev_id); + goto unlock; + } + + ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); + +unlock: + rcu_read_unlock(); +out: + kfree(tb); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -8733,6 +8830,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_GTK_OFFLOAD_STATUS_EVENTID: ath11k_wmi_gtk_offload_status_event(ab, skb); break; + case WMI_P2P_NOA_EVENTID: + ath11k_wmi_p2p_noa_event(ab, skb); + break; default: ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id); break; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index bb419e3abb00..8982b909c821 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_WMI_H @@ -60,13 +60,9 @@ struct wmi_tlv { #define WLAN_SCAN_MAX_HINT_BSSID 10 #define MAX_RNR_BSS 5 -#define WLAN_SCAN_MAX_HINT_S_SSID 10 -#define WLAN_SCAN_MAX_HINT_BSSID 10 -#define MAX_RNR_BSS 5 - #define WLAN_SCAN_PARAMS_MAX_SSID 16 #define WLAN_SCAN_PARAMS_MAX_BSSID 4 -#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512 #define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1 @@ -3444,34 +3440,6 @@ struct wmi_bssid_arg { const u8 *bssid; }; -struct wmi_start_scan_arg { - u32 scan_id; - u32 scan_req_id; - u32 vdev_id; - u32 scan_priority; - u32 notify_scan_events; - u32 dwell_time_active; - u32 dwell_time_passive; - u32 min_rest_time; - u32 max_rest_time; - u32 repeat_probe_time; - u32 probe_spacing_time; - u32 idle_time; - u32 max_scan_time; - u32 probe_delay; - u32 scan_ctrl_flags; - - u32 ie_len; - u32 n_channels; - u32 n_ssids; - u32 n_bssids; - - u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; - struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; - struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; -}; - #define WMI_SCAN_STOP_ONE 0x00000000 #define WMI_SCN_STOP_VAP_ALL 0x01000000 #define WMI_SCAN_STOP_ALL 0x04000000 @@ -3630,6 +3598,37 @@ struct wmi_ftm_event_msg { u8 data[]; } __packed; +#define WMI_P2P_MAX_NOA_DESCRIPTORS 4 + +struct wmi_p2p_noa_event { + u32 vdev_id; +} __packed; + +struct ath11k_wmi_p2p_noa_descriptor { + u32 type_count; /* 255: continuous schedule, 0: reserved */ + u32 duration; /* Absent period duration in micro seconds */ + u32 interval; /* Absent period interval in micro seconds */ + u32 start_time; /* 32 bit tsf time when in starts */ +} __packed; + +#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0) +#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8) +#define WMI_P2P_NOA_INFO_OPP_PS BIT(16) +#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17) +#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24) + +struct ath11k_wmi_p2p_noa_info { + /* Bit 0 - Flag to indicate an update in NOA schedule + * Bits 7-1 - Reserved + * Bits 15-8 - Index (identifies the instance of NOA sub element) + * Bit 16 - Opp PS state of the AP + * Bits 23-17 - Ctwindow in TUs + * Bits 31-24 - Number of NOA descriptors + */ + u32 noa_attr; + struct ath11k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; +} __packed; + #define WMI_BEACON_TX_BUFFER_SIZE 512 #define WMI_EMA_TMPL_IDX_SHIFT 8 @@ -3653,6 +3652,13 @@ struct wmi_bcn_tmpl_cmd { u32 ema_params; } __packed; +struct wmi_p2p_go_set_beacon_ie_cmd { + u32 tlv_header; + u32 vdev_id; + u32 ie_buf_len; + u8 tlv[]; +} __packed; + struct wmi_key_seq_counter { u32 key_seq_counter_l; u32 key_seq_counter_h; @@ -5740,8 +5746,6 @@ struct wmi_debug_log_config_cmd_fixed_param { u32 value; } __packed; -#define WMI_MAX_MEM_REQS 32 - #define MAX_RADIOS 3 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) @@ -6349,6 +6353,8 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame); +int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, + const u8 *p2p_ie); int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, u32 ema_param); diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig index e135d2b1b61d..eceab9153e98 100644 --- a/drivers/net/wireless/ath/ath12k/Kconfig +++ b/drivers/net/wireless/ath/ath12k/Kconfig @@ -24,6 +24,15 @@ config ATH12K_DEBUG If unsure, say Y to make it easier to debug problems. But if you want optimal performance choose N. +config ATH12K_DEBUGFS + bool "QTI ath12k debugfs support" + depends on ATH12K && MAC80211_DEBUGFS + help + Enable ath12k debugfs support + + If unsure, say Y to make it easier to debug problems. But if + you want optimal performance choose N. + config ATH12K_TRACING bool "ath12k tracing support" depends on ATH12K && EVENT_TRACING diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile index 71669f94ff75..d42480db7463 100644 --- a/drivers/net/wireless/ath/ath12k/Makefile +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -23,6 +23,8 @@ ath12k-y += core.o \ fw.o \ p2p.o +ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o +ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o # for tracing framework to find trace.h diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c new file mode 100644 index 000000000000..443ba12e01f3 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/acpi.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "acpi.h" +#include "debug.h" + +static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func) +{ + union acpi_object *obj; + acpi_handle root_handle; + int ret; + + root_handle = ACPI_HANDLE(ab->dev); + if (!root_handle) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n"); + return -EOPNOTSUPP; + } + + obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func, + NULL); + + if (!obj) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n"); + return -ENOENT; + } + + if (obj->type == ACPI_TYPE_INTEGER) { + ab->acpi.func_bit = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER) { + switch (func) { + case ATH12K_ACPI_DSM_FUNC_TAS_CFG: + if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_TAS_DATA: + if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_BIOS_SAR: + if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET: + if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_INDEX_CCA: + if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.cca_data, obj->buffer.pointer, + obj->buffer.length); + + break; + case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE: + if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer, + obj->buffer.length); + + break; + } + } else { + ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n", + obj->type); + ret = -EINVAL; + goto out; + } + + ret = 0; + +out: + ACPI_FREE(obj); + return ret; +} + +static int ath12k_acpi_set_power_limit(struct ath12k_base *ab) +{ + const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table; + int ret; + + if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION || + tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) { + ath12k_warn(ab, "latest ACPI TAS data is invalid\n"); + return -EINVAL; + } + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE, + tas_sar_power_table, + ATH12K_ACPI_DSM_TAS_DATA_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret); + return ret; + } + + return ret; +} + +static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab) +{ + int ret; + + if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION || + ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) { + ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n"); + return -EINVAL; + } + + ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data) +{ + int ret; + struct ath12k_base *ab = data; + + if (event == ATH12K_ACPI_NOTIFY_EVENT) { + ath12k_warn(ab, "unknown acpi notify %u\n", event); + return; + } + + if (!ab->acpi.acpi_tas_enable) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n"); + return; + } + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA); + if (ret) { + ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret); + return; + } + + ret = ath12k_acpi_set_power_limit(ab); + if (ret) { + ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret); + return; + } + + if (!ab->acpi.acpi_bios_sar_enable) + return; + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR); + if (ret) { + ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret); + return; + } + + ret = ath12k_acpi_set_bios_sar_power(ab); + if (ret) { + ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret); + return; + } +} + +static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret); + return ret; + } + + ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data); + if (ret) { + ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret); + return ret; + } + + return 0; +} + +static int ath12k_acpi_set_tas_params(struct ath12k_base *ab) +{ + int ret; + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE, + ab->acpi.tas_cfg, + ATH12K_ACPI_DSM_TAS_CFG_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n", + ret); + return ret; + } + + ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE, + ab->acpi.tas_sar_power_table, + ATH12K_ACPI_DSM_TAS_DATA_SIZE); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n", + ret); + return ret; + } + + return 0; +} + +int ath12k_acpi_start(struct ath12k_base *ab) +{ + acpi_status status; + u8 *buf; + int ret; + + if (!ab->hw_params->acpi_guid) + /* not supported with this hardware */ + return 0; + + ab->acpi.acpi_tas_enable = false; + + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS); + if (ret) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG); + if (ret) { + ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret); + return ret; + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA); + if (ret) { + ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) && + ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION && + ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE) + ab->acpi.acpi_tas_enable = true; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR); + if (ret) { + ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret); + return ret; + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET); + if (ret) { + ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) && + ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION && + ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG && + !ab->acpi.acpi_tas_enable) + ab->acpi.acpi_bios_sar_enable = true; + } + + if (ab->acpi.acpi_tas_enable) { + ret = ath12k_acpi_set_tas_params(ab); + if (ret) { + ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret); + return ret; + } + } + + if (ab->acpi.acpi_bios_sar_enable) { + ret = ath12k_acpi_set_bios_sar_params(ab); + if (ret) + return ret; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA); + if (ret) { + ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n", + ret); + return ret; + } + + if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION && + ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] == + ATH12K_ACPI_CCA_THR_ENABLE_FLAG) { + buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, + buf, + ATH12K_ACPI_CCA_THR_OFFSET_LEN); + if (ret) { + ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", + ret); + return ret; + } + } + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, + ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE); + if (ret) { + ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n", + ret); + return ret; + } + + if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION && + ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) { + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_TYPE_BANDEDGE, + ab->acpi.band_edge_power, + sizeof(ab->acpi.band_edge_power)); + if (ret) { + ath12k_warn(ab, + "failed to set ACPI DSM band edge channel power: %d\n", + ret); + return ret; + } + } + } + + status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev), + ACPI_DEVICE_NOTIFY, + ath12k_acpi_dsm_notify, ab); + if (ACPI_FAILURE(status)) { + ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status); + return -EIO; + } + + ab->acpi.started = true; + + return 0; +} + +void ath12k_acpi_stop(struct ath12k_base *ab) +{ + if (!ab->acpi.started) + return; + + acpi_remove_notify_handler(ACPI_HANDLE(ab->dev), + ACPI_DEVICE_NOTIFY, + ath12k_acpi_dsm_notify); +} diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h new file mode 100644 index 000000000000..39e003d86a48 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/acpi.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_ACPI_H +#define ATH12K_ACPI_H + +#include <linux/acpi.h> + +#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0 +#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4 +#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5 +#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6 +#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8 +#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9 +#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10 + +#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3) +#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4) +#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5) +#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7) +#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8) +#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9) + +#define ATH12K_ACPI_NOTIFY_EVENT 0x86 +#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func)) + +#define ATH12K_ACPI_TAS_DATA_VERSION 0x1 +#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1 +#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1 +#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1 +#define ATH12K_ACPI_CCA_THR_VERSION 0x1 +#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1 +#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1 +#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1 + +#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1 +#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2 +#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5 +#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10 +#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12 +#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18 +#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22 +#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36 + +#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69 +#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100 +#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108 + +#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \ + ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN) +#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \ + ATH12K_ACPI_BIOS_SAR_TABLE_LEN) +#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \ + ATH12K_ACPI_CCA_THR_OFFSET_LEN) + +#ifdef CONFIG_ACPI + +int ath12k_acpi_start(struct ath12k_base *ab); +void ath12k_acpi_stop(struct ath12k_base *ab); + +#else + +static inline int ath12k_acpi_start(struct ath12k_base *ab) +{ + return 0; +} + +static inline void ath12k_acpi_stop(struct ath12k_base *ab) +{ +} + +#endif /* CONFIG_ACPI */ + +#endif /* ATH12K_ACPI_H */ diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 391b6fb2bd42..6663f4e1792d 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -15,6 +15,7 @@ #include "debug.h" #include "hif.h" #include "fw.h" +#include "debugfs.h" unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); @@ -43,67 +44,90 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab) int ath12k_core_suspend(struct ath12k_base *ab) { - int ret; + struct ath12k *ar; + int ret, i; if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; - /* TODO: there can frames in queues so for now add delay as a hack. - * Need to implement to handle and remove this delay. + rcu_read_lock(); + for (i = 0; i < ab->num_radios; i++) { + ar = ath12k_mac_get_ar_by_pdev_id(ab, i); + if (!ar) + continue; + ret = ath12k_mac_wait_tx_complete(ar); + if (ret) { + ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); + rcu_read_unlock(); + return ret; + } + } + rcu_read_unlock(); + + /* PM framework skips suspend_late/resume_early callbacks + * if other devices report errors in their suspend callbacks. + * However ath12k_core_resume() would still be called because + * here we return success thus kernel put us on dpm_suspended_list. + * Since we won't go through a power down/up cycle, there is + * no chance to call complete(&ab->restart_completed) in + * ath12k_core_restart(), making ath12k_core_resume() timeout. + * So call it here to avoid this issue. This also works in case + * no error happens thus suspend_late/resume_early get called, + * because it will be reinitialized in ath12k_core_resume_early(). */ - msleep(500); + complete(&ab->restart_completed); - ret = ath12k_dp_rx_pktlog_stop(ab, true); - if (ret) { - ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", - ret); - return ret; - } + return 0; +} +EXPORT_SYMBOL(ath12k_core_suspend); - ret = ath12k_dp_rx_pktlog_stop(ab, false); - if (ret) { - ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", - ret); - return ret; - } +int ath12k_core_suspend_late(struct ath12k_base *ab) +{ + if (!ab->hw_params->supports_suspend) + return -EOPNOTSUPP; ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); - ret = ath12k_hif_suspend(ab); - if (ret) { - ath12k_warn(ab, "failed to suspend hif: %d\n", ret); - return ret; - } + ath12k_hif_power_down(ab, true); return 0; } +EXPORT_SYMBOL(ath12k_core_suspend_late); -int ath12k_core_resume(struct ath12k_base *ab) +int ath12k_core_resume_early(struct ath12k_base *ab) { int ret; if (!ab->hw_params->supports_suspend) return -EOPNOTSUPP; - ret = ath12k_hif_resume(ab); - if (ret) { - ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret); - return ret; - } + reinit_completion(&ab->restart_completed); + ret = ath12k_hif_power_up(ab); + if (ret) + ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret); - ath12k_hif_ce_irq_enable(ab); - ath12k_hif_irq_enable(ab); + return ret; +} +EXPORT_SYMBOL(ath12k_core_resume_early); - ret = ath12k_dp_rx_pktlog_start(ab); - if (ret) { - ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n", - ret); - return ret; +int ath12k_core_resume(struct ath12k_base *ab) +{ + long time_left; + + if (!ab->hw_params->supports_suspend) + return -EOPNOTSUPP; + + time_left = wait_for_completion_timeout(&ab->restart_completed, + ATH12K_RESET_TIMEOUT_HZ); + if (time_left == 0) { + ath12k_warn(ab, "timeout while waiting for restart complete"); + return -ETIMEDOUT; } return 0; } +EXPORT_SYMBOL(ath12k_core_resume); static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, size_t name_len, bool with_variant, @@ -542,6 +566,8 @@ static void ath12k_core_stop(struct ath12k_base *ab) if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath12k_qmi_firmware_stop(ab); + ath12k_acpi_stop(ab); + ath12k_hif_stop(ab); ath12k_wmi_detach(ab); ath12k_dp_rx_pdev_reo_cleanup(ab); @@ -628,6 +654,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) return ret; } + ath12k_debugfs_soc_create(ab); + ret = ath12k_hif_power_up(ab); if (ret) { ath12k_err(ab, "failed to power up :%d\n", ret); @@ -637,6 +665,7 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) return 0; err_qmi_deinit: + ath12k_debugfs_soc_destroy(ab); ath12k_qmi_deinit_service(ab); return ret; } @@ -645,6 +674,7 @@ static void ath12k_core_soc_destroy(struct ath12k_base *ab) { ath12k_dp_free(ab); ath12k_reg_free(ab); + ath12k_debugfs_soc_destroy(ab); ath12k_qmi_deinit_service(ab); } @@ -779,6 +809,11 @@ static int ath12k_core_start(struct ath12k_base *ab, goto err_reo_cleanup; } + ret = ath12k_acpi_start(ab); + if (ret) + /* ACPI is optional so continue in case of an error */ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret); + return 0; err_reo_cleanup: @@ -874,9 +909,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab) int ret; mutex_lock(&ab->core_lock); - ath12k_hif_irq_disable(ab); ath12k_dp_pdev_free(ab); - ath12k_hif_stop(ab); + ath12k_ce_cleanup_pipes(ab); ath12k_wmi_detach(ab); ath12k_dp_rx_pdev_reo_cleanup(ab); mutex_unlock(&ab->core_lock); @@ -1052,9 +1086,6 @@ static void ath12k_core_restart(struct work_struct *work) struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); int ret; - if (!ab->is_reset) - ath12k_core_pre_reconfigure_recovery(ab); - ret = ath12k_core_reconfigure_on_crash(ab); if (ret) { ath12k_err(ab, "failed to reconfigure driver on crash recovery\n"); @@ -1064,8 +1095,7 @@ static void ath12k_core_restart(struct work_struct *work) if (ab->is_reset) complete_all(&ab->reconfigure_complete); - if (!ab->is_reset) - ath12k_core_post_reconfigure_recovery(ab); + complete(&ab->restart_completed); } static void ath12k_core_reset(struct work_struct *work) @@ -1131,8 +1161,10 @@ static void ath12k_core_reset(struct work_struct *work) time_left = wait_for_completion_timeout(&ab->recovery_start, ATH12K_RECOVER_START_TIMEOUT_HZ); - ath12k_hif_power_down(ab); - ath12k_qmi_free_resource(ab); + ath12k_hif_irq_disable(ab); + ath12k_hif_ce_irq_disable(ab); + + ath12k_hif_power_down(ab, false); ath12k_hif_power_up(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n"); @@ -1175,7 +1207,7 @@ void ath12k_core_deinit(struct ath12k_base *ab) mutex_unlock(&ab->core_lock); - ath12k_hif_power_down(ab); + ath12k_hif_power_down(ab, false); ath12k_mac_destroy(ab); ath12k_core_soc_destroy(ab); ath12k_fw_unmap(ab); @@ -1223,11 +1255,12 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); + init_completion(&ab->restart_completed); ab->dev = dev; ab->hif.bus = bus; ab->qmi.num_radios = U8_MAX; - ab->slo_capable = true; + ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT; return ab; diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 97e5a0ccd233..47dde4401210 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -26,6 +26,7 @@ #include "reg.h" #include "dbring.h" #include "fw.h" +#include "acpi.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -46,6 +47,7 @@ #define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_" #define ATH12K_INVALID_HW_MAC_ID 0xFF +#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ) #define ATH12K_RX_RATE_TABLE_NUM 320 #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 @@ -214,6 +216,24 @@ enum ath12k_monitor_flags { ATH12K_FLAG_MONITOR_ENABLED, }; +struct ath12k_tx_conf { + bool changed; + u16 ac; + struct ieee80211_tx_queue_params tx_queue_params; +}; + +struct ath12k_key_conf { + bool changed; + enum set_key_cmd cmd; + struct ieee80211_key_conf *key; +}; + +struct ath12k_vif_cache { + struct ath12k_tx_conf tx_conf; + struct ath12k_key_conf key_conf; + u32 bss_conf_changed; +}; + struct ath12k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; @@ -251,11 +271,13 @@ struct ath12k_vif { } ap; } u; + bool is_created; bool is_started; bool is_up; u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; + struct delayed_work connection_loss_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; @@ -267,10 +289,12 @@ struct ath12k_vif { u8 vdev_stats_id; u32 punct_bitmap; bool ps; + struct ath12k_vif_cache *cache; }; struct ath12k_vif_iter { u32 vdev_id; + struct ath12k *ar; struct ath12k_vif *arvif; }; @@ -453,6 +477,10 @@ struct ath12k_fw_stats { struct list_head bcn; }; +struct ath12k_debug { + struct dentry *debugfs_pdev; +}; + struct ath12k_per_peer_tx_stats { u32 succ_bytes; u32 retry_bytes; @@ -592,16 +620,24 @@ struct ath12k { struct ath12k_per_peer_tx_stats cached_stats; u32 last_ppdu_id; u32 cached_ppdu_id; +#ifdef CONFIG_ATH12K_DEBUGFS + struct ath12k_debug debug; +#endif bool dfs_block_radar_events; bool monitor_conf_enabled; bool monitor_vdev_created; bool monitor_started; int monitor_vdev_id; + + u32 freq_low; + u32 freq_high; }; struct ath12k_hw { struct ieee80211_hw *hw; + bool regd_updated; + bool use_6ghz_regd; u8 num_radio; struct ath12k radio[] __aligned(sizeof(void *)); @@ -688,6 +724,21 @@ struct ath12k_soc_dp_stats { struct ath12k_soc_dp_tx_err_stats tx_err; }; +/** + * enum ath12k_link_capable_flags - link capable flags + * + * Single/Multi link capability information + * + * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all + * the links (radios) present within a device. + * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all + * the links (radios) present across the devices. + */ +enum ath12k_link_capable_flags { + ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0), + ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1), +}; + /* Master structure to hold the hw data which may be used in core module */ struct ath12k_base { enum ath12k_hw_rev hw_rev; @@ -782,6 +833,9 @@ struct ath12k_base { /* Current DFS Regulatory */ enum ath12k_dfs_region dfs_region; struct ath12k_soc_dp_stats soc_stats; +#ifdef CONFIG_ATH12K_DEBUGFS + struct dentry *debugfs_soc; +#endif unsigned long dev_flags; struct completion driver_recovery; @@ -843,10 +897,31 @@ struct ath12k_base { const struct hal_rx_ops *hal_rx_ops; - /* slo_capable denotes if the single/multi link operation - * is supported within the same chip (SoC). + /* mlo_capable_flags denotes the single/multi link operation + * capabilities of the Device. + * + * See enum ath12k_link_capable_flags */ - bool slo_capable; + u8 mlo_capable_flags; + + struct completion restart_completed; + +#ifdef CONFIG_ACPI + + struct { + bool started; + u32 func_bit; + bool acpi_tas_enable; + bool acpi_bios_sar_enable; + u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE]; + u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE]; + u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE]; + u8 geo_offset_data[ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE]; + u8 cca_data[ATH12K_ACPI_DSM_CCA_DATA_SIZE]; + u8 band_edge_power[ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE]; + } acpi; + +#endif /* CONFIG_ACPI */ /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); @@ -874,8 +949,10 @@ int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd int ath12k_core_check_dt(struct ath12k_base *ath12k); int ath12k_core_check_smbios(struct ath12k_base *ab); void ath12k_core_halt(struct ath12k *ar); +int ath12k_core_resume_early(struct ath12k_base *ab); int ath12k_core_resume(struct ath12k_base *ab); int ath12k_core_suspend(struct ath12k_base *ab); +int ath12k_core_suspend_late(struct ath12k_base *ab); const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, const char *filename); @@ -951,13 +1028,21 @@ static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw) return hw->priv; } -static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah) +static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id) { - return ah->radio; + if (WARN(hw_link_id >= ah->num_radio, + "bad hw link id %d, so switch to default link\n", hw_link_id)) + hw_link_id = 0; + + return &ah->radio[hw_link_id]; } static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar) { return ar->ah->hw; } + +#define for_each_ar(ah, ar, index) \ + for ((index) = 0; ((index) < (ah)->num_radio && \ + ((ar) = &(ah)->radio[(index)])); (index)++) #endif /* _CORE_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c new file mode 100644 index 000000000000..8d8ba951093b --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "debugfs.h" + +static ssize_t ath12k_write_simulate_radar(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + int ret; + + mutex_lock(&ar->conf_mutex); + ret = ath12k_wmi_simulate_radar(ar); + if (ret) + goto exit; + + ret = count; +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static const struct file_operations fops_simulate_radar = { + .write = ath12k_write_simulate_radar, + .open = simple_open +}; + +void ath12k_debugfs_soc_create(struct ath12k_base *ab) +{ + bool dput_needed; + char soc_name[64] = { 0 }; + struct dentry *debugfs_ath12k; + + debugfs_ath12k = debugfs_lookup("ath12k", NULL); + if (debugfs_ath12k) { + /* a dentry from lookup() needs dput() after we don't use it */ + dput_needed = true; + } else { + debugfs_ath12k = debugfs_create_dir("ath12k", NULL); + if (IS_ERR_OR_NULL(debugfs_ath12k)) + return; + dput_needed = false; + } + + scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus), + dev_name(ab->dev)); + + ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k); + + if (dput_needed) + dput(debugfs_ath12k); +} + +void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) +{ + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = NULL; + /* We are not removing ath12k directory on purpose, even if it + * would be empty. This simplifies the directory handling and it's + * a minor cosmetic issue to leave an empty ath12k directory to + * debugfs. + */ +} + +void ath12k_debugfs_register(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ar->ah->hw; + char pdev_name[5]; + char buf[100] = {0}; + + scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); + + ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); + + /* Create a symlink under ieee80211/phy* */ + scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev); + debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf); + + if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) { + debugfs_create_file("dfs_simulate_radar", 0200, + ar->debug.debugfs_pdev, ar, + &fops_simulate_radar); + } +} diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h new file mode 100644 index 000000000000..a62f2a550b23 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH12K_DEBUGFS_H_ +#define _ATH12K_DEBUGFS_H_ + +#ifdef CONFIG_ATH12K_DEBUGFS +void ath12k_debugfs_soc_create(struct ath12k_base *ab); +void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); +void ath12k_debugfs_register(struct ath12k *ar); + +#else +static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) +{ +} + +static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) +{ +} + +static inline void ath12k_debugfs_register(struct ath12k *ar) +{ +} + +#endif /* CONFIG_ATH12K_DEBUGFS */ + +#endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index c8e1b244b69e..7843c76a82c1 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -14,6 +14,11 @@ #include "peer.h" #include "dp_mon.h" +enum ath12k_dp_desc_type { + ATH12K_DP_TX_DESC, + ATH12K_DP_RX_DESC, +}; + static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab, struct sk_buff *skb) { @@ -960,8 +965,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, if (ab->hw_params->ring_mask->host2rxdma[grp_id]) { struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + LIST_HEAD(list); - ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); } /* TODO: Implement handler for other interrupts */ @@ -1146,11 +1152,13 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif) static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) { - struct ath12k_rx_desc_info *desc_info, *tmp; + struct ath12k_rx_desc_info *desc_info; struct ath12k_tx_desc_info *tx_desc_info, *tmp1; struct ath12k_dp *dp = &ab->dp; + struct ath12k_skb_cb *skb_cb; struct sk_buff *skb; - int i; + struct ath12k *ar; + int i, j; u32 pool_id, tx_spt_page; if (!dp->spt_info) @@ -1159,16 +1167,23 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) /* RX Descriptor cleanup */ spin_lock_bh(&dp->rx_desc_lock); - list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) { - list_del(&desc_info->list); - skb = desc_info->skb; + for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { + desc_info = dp->spt_info->rxbaddr[i]; - if (!skb) - continue; + for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { + if (!desc_info[j].in_use) { + list_del(&desc_info[j].list); + continue; + } + + skb = desc_info[j].skb; + if (!skb) + continue; - dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, - skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } } for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { @@ -1193,6 +1208,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) if (!skb) continue; + skb_cb = ATH12K_SKB_CB(skb); + ar = skb_cb->ar; + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); @@ -1336,12 +1356,16 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, u32 cookie) { struct ath12k_rx_desc_info **desc_addr_ptr; - u16 ppt_idx, spt_idx; + u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx; ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); - spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); + + start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET; + end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES; - if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES || + if (ppt_idx < start_ppt_idx || + ppt_idx >= end_ppt_idx || spt_idx > ATH12K_MAX_SPT_ENTRIES) return NULL; @@ -1354,13 +1378,17 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab, u32 cookie) { struct ath12k_tx_desc_info **desc_addr_ptr; - u16 ppt_idx, spt_idx; + u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx; ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); - spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT); + spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); - if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES || - ppt_idx > ab->dp.num_spt_pages || + start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET; + end_ppt_idx = start_ppt_idx + + (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES); + + if (ppt_idx < start_ppt_idx || + ppt_idx >= end_ppt_idx || spt_idx > ATH12K_MAX_SPT_ENTRIES) return NULL; @@ -1389,15 +1417,16 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return -ENOMEM; } + ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i; dp->spt_info->rxbaddr[i] = &rx_descs[0]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j); + rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j); rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list); /* Update descriptor VA in SPT */ - rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j); + rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j); *rx_desc_addr = &rx_descs[j]; } } @@ -1417,10 +1446,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) } tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; + ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page; + dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page; tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); tx_descs[j].pool_id = pool_id; list_add_tail(&tx_descs[j].list, @@ -1437,14 +1467,43 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) return 0; } +static int ath12k_dp_cmem_init(struct ath12k_base *ab, + struct ath12k_dp *dp, + enum ath12k_dp_desc_type type) +{ + u32 cmem_base; + int i, start, end; + + cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; + + switch (type) { + case ATH12K_DP_TX_DESC: + start = ATH12K_TX_SPT_PAGE_OFFSET; + end = start + ATH12K_NUM_TX_SPT_PAGES; + break; + case ATH12K_DP_RX_DESC: + start = ATH12K_RX_SPT_PAGE_OFFSET; + end = start + ATH12K_NUM_RX_SPT_PAGES; + break; + default: + ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type); + return -EINVAL; + } + + /* Write to PPT in CMEM */ + for (i = start; i < end; i++) + ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i), + dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); + + return 0; +} + static int ath12k_dp_cc_init(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; int i, ret = 0; - u32 cmem_base; INIT_LIST_HEAD(&dp->rx_desc_free_list); - INIT_LIST_HEAD(&dp->rx_desc_used_list); spin_lock_init(&dp->rx_desc_lock); for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { @@ -1465,8 +1524,6 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) return -ENOMEM; } - cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start; - for (i = 0; i < dp->num_spt_pages; i++) { dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev, ATH12K_PAGE_SIZE, @@ -1483,10 +1540,18 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) ret = -EINVAL; goto free; } + } - /* Write to PPT in CMEM */ - ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i), - dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); + ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC); + if (ret) { + ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret); + goto free; + } + + ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC); + if (ret) { + ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret); + goto free; } ret = ath12k_dp_cc_desc_init(ab); diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index eb2dd408e081..5cf0d21ef184 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -223,6 +223,9 @@ struct ath12k_pdev_dp { #define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES) #define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES) +#define ATH12K_TX_SPT_PAGE_OFFSET 0 +#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES + /* The SPT pages are divided for RX and TX, first block for RX * and remaining for TX */ @@ -245,7 +248,7 @@ struct ath12k_pdev_dp { #define ATH12K_CC_SPT_MSB 8 #define ATH12K_CC_PPT_MSB 19 #define ATH12K_CC_PPT_SHIFT 9 -#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0) +#define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0) #define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9) #define DP_REO_QREF_NUM GENMASK(31, 16) @@ -282,6 +285,8 @@ struct ath12k_rx_desc_info { struct sk_buff *skb; u32 cookie; u32 magic; + u8 in_use : 1, + reserved : 7; }; struct ath12k_tx_desc_info { @@ -347,8 +352,7 @@ struct ath12k_dp { struct ath12k_spt_info *spt_info; u32 num_spt_pages; struct list_head rx_desc_free_list; - struct list_head rx_desc_used_list; - /* protects the free and used desc list */ + /* protects the free desc list */ spinlock_t rx_desc_lock; struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES]; @@ -377,8 +381,6 @@ struct ath12k_dp { /* peer meta data */ #define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2) -#define HTT_TX_WBM_COMP_STATUS_OFFSET 8 - /* HTT tx completion is overlaid in wbm_release_ring */ #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) #define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0) diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 2d56913a75d0..6b0b72477540 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -944,7 +944,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, goto err_merge_fail; ath12k_dbg(ab, ATH12K_DBG_DATA, - "mpdu_buf %pK mpdu_buf->len %u", + "mpdu_buf %p mpdu_buf->len %u", prev_buf, prev_buf->len); } else { ath12k_dbg(ab, ATH12K_DBG_DATA, @@ -958,7 +958,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, err_merge_fail: if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) { ath12k_dbg(ab, ATH12K_DBG_DATA, - "err_merge_fail mpdu_buf %pK", mpdu_buf); + "err_merge_fail mpdu_buf %p", mpdu_buf); /* Free the head buffer */ dev_kfree_skb_any(mpdu_buf); } @@ -1092,7 +1092,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct spin_unlock_bh(&ar->ab->base_lock); ath12k_dbg(ar->ab, ATH12K_DBG_DATA, - "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index ca76c018dd0c..75df622f25d8 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -239,31 +239,61 @@ static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab, return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc); } -static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab) +static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list) { - int i, reaped = 0; - unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); + struct sk_buff *skb; + + while ((skb = __skb_dequeue(skb_list))) + dev_kfree_skb_any(skb); +} + +static size_t ath12k_dp_list_cut_nodes(struct list_head *list, + struct list_head *head, + size_t count) +{ + struct list_head *cur; + struct ath12k_rx_desc_info *rx_desc; + size_t nodes = 0; + + if (!count) { + INIT_LIST_HEAD(list); + goto out; + } + + list_for_each(cur, head) { + if (!count) + break; + + rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list); + rx_desc->in_use = true; - do { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) - reaped += ath12k_dp_mon_process_ring(ab, i, NULL, - DP_MON_SERVICE_BUDGET, - ATH12K_DP_RX_MONITOR_MODE); + count--; + nodes++; + } - /* nothing more to reap */ - if (reaped < DP_MON_SERVICE_BUDGET) - return 0; + list_cut_before(list, head, cur); +out: + return nodes; +} - } while (time_before(jiffies, timeout)); +static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp, + struct list_head *used_list) +{ + struct ath12k_rx_desc_info *rx_desc, *safe; - ath12k_warn(ab, "dp mon ring purge timeout"); + /* Reset the use flag */ + list_for_each_entry_safe(rx_desc, safe, used_list, list) + rx_desc->in_use = false; - return -ETIMEDOUT; + spin_lock_bh(&dp->rx_desc_lock); + list_splice_tail(used_list, &dp->rx_desc_free_list); + spin_unlock_bh(&dp->rx_desc_lock); } /* Returns number of Rx buffers replenished */ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, struct dp_rxdma_ring *rx_ring, + struct list_head *used_list, int req_entries) { struct ath12k_buffer_addr *desc; @@ -292,6 +322,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, req_entries = min(num_free, req_entries); num_remain = req_entries; + if (!num_remain) + goto out; + + /* Get the descriptor from free list */ + if (list_empty(used_list)) { + spin_lock_bh(&dp->rx_desc_lock); + req_entries = ath12k_dp_list_cut_nodes(used_list, + &dp->rx_desc_free_list, + num_remain); + spin_unlock_bh(&dp->rx_desc_lock); + num_remain = req_entries; + } + while (num_remain > 0) { skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); @@ -311,33 +354,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, if (dma_mapping_error(ab->dev, paddr)) goto fail_free_skb; - spin_lock_bh(&dp->rx_desc_lock); - - /* Get desc from free list and store in used list - * for cleanup purposes - * - * TODO: pass the removed descs rather than - * add/read to optimize - */ - rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list, + rx_desc = list_first_entry_or_null(used_list, struct ath12k_rx_desc_info, list); - if (!rx_desc) { - spin_unlock_bh(&dp->rx_desc_lock); + if (!rx_desc) goto fail_dma_unmap; - } rx_desc->skb = skb; cookie = rx_desc->cookie; - list_del(&rx_desc->list); - list_add_tail(&rx_desc->list, &dp->rx_desc_used_list); - - spin_unlock_bh(&dp->rx_desc_lock); desc = ath12k_hal_srng_src_get_next_entry(ab, srng); if (!desc) - goto fail_buf_unassign; + goto fail_dma_unmap; + list_del(&rx_desc->list); ATH12K_SKB_RXCB(skb)->paddr = paddr; num_remain--; @@ -345,26 +375,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } - ath12k_hal_srng_access_end(ab, srng); - - spin_unlock_bh(&srng->lock); + goto out; - return req_entries - num_remain; - -fail_buf_unassign: - spin_lock_bh(&dp->rx_desc_lock); - list_del(&rx_desc->list); - list_add_tail(&rx_desc->list, &dp->rx_desc_free_list); - rx_desc->skb = NULL; - spin_unlock_bh(&dp->rx_desc_lock); fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); - +out: ath12k_hal_srng_access_end(ab, srng); + if (!list_empty(used_list)) + ath12k_dp_rx_enqueue_free(dp, used_list); + spin_unlock_bh(&srng->lock); return req_entries - num_remain; @@ -422,13 +445,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab, static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab, struct dp_rxdma_ring *rx_ring) { - int num_entries; + LIST_HEAD(list); - num_entries = rx_ring->refill_buf_ring.size / - ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); + rx_ring->bufs_max = rx_ring->refill_buf_ring.size / + ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF); - rx_ring->bufs_max = num_entries; - ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0); return 0; } @@ -2423,7 +2445,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap spin_unlock_bh(&ab->base_lock); ath12k_dbg(ab, ATH12K_DBG_DATA, - "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, @@ -2585,6 +2607,7 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { + LIST_HEAD(rx_desc_used_list); struct ath12k_rx_desc_info *desc_info; struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; @@ -2637,9 +2660,7 @@ try_again: msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&dp->rx_desc_lock); - list_move_tail(&desc_info->list, &dp->rx_desc_free_list); - spin_unlock_bh(&dp->rx_desc_lock); + list_add_tail(&desc_info->list, &rx_desc_used_list); rxcb = ATH12K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, @@ -2700,7 +2721,8 @@ try_again: if (!total_msdu_reaped) goto exit; - ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, + num_buffs_reaped); ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, ring_id); @@ -3021,9 +3043,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, } desc_info->skb = defrag_skb; + desc_info->in_use = true; list_del(&desc_info->list); - list_add_tail(&desc_info->list, &dp->rx_desc_used_list); spin_unlock_bh(&dp->rx_desc_lock); ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr; @@ -3085,9 +3107,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, err_free_desc: spin_lock_bh(&dp->rx_desc_lock); - list_del(&desc_info->list); - list_add_tail(&desc_info->list, &dp->rx_desc_free_list); + desc_info->in_use = false; desc_info->skb = NULL; + list_add_tail(&desc_info->list, &dp->rx_desc_free_list); spin_unlock_bh(&dp->rx_desc_lock); err_unmap_dma: dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), @@ -3304,6 +3326,7 @@ out_unlock: static int ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, + struct list_head *used_list, bool drop, u32 cookie) { struct ath12k_base *ab = ar->ab; @@ -3333,9 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&ab->dp.rx_desc_lock); - list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list); - spin_unlock_bh(&ab->dp.rx_desc_lock); + + list_add_tail(&desc_info->list, used_list); rxcb = ATH12K_SKB_RXCB(msdu); dma_unmap_single(ar->ab->dev, rxcb->paddr, @@ -3391,6 +3413,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, struct hal_reo_dest_ring *reo_desc; struct dp_rxdma_ring *rx_ring; struct dp_srng *reo_except; + LIST_HEAD(rx_desc_used_list); u32 desc_bank, num_msdus; struct hal_srng *srng; struct ath12k_dp *dp; @@ -3458,7 +3481,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); ar = ab->pdevs[pdev_id].ar; - if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop, + if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, + &rx_desc_used_list, + drop, msdu_cookies[i])) tot_n_bufs_reaped++; } @@ -3478,7 +3503,8 @@ exit: rx_ring = &dp->rx_refill_buf_ring; - ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, + tot_n_bufs_reaped); return tot_n_bufs_reaped; } @@ -3695,25 +3721,27 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar, int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, struct napi_struct *napi, int budget) { + LIST_HEAD(rx_desc_used_list); struct ath12k *ar; struct ath12k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; struct hal_rx_wbm_rel_info err_info; struct hal_srng *srng; struct sk_buff *msdu; - struct sk_buff_head msdu_list; + struct sk_buff_head msdu_list, scatter_msdu_list; struct ath12k_skb_rxcb *rxcb; void *rx_desc; u8 mac_id; int num_buffs_reaped = 0; struct ath12k_rx_desc_info *desc_info; int ret, pdev_id; + struct hal_rx_desc *msdu_data; __skb_queue_head_init(&msdu_list); + __skb_queue_head_init(&scatter_msdu_list); srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; rx_ring = &dp->rx_refill_buf_ring; - spin_lock_bh(&srng->lock); ath12k_hal_srng_access_begin(ab, srng); @@ -3748,9 +3776,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, msdu = desc_info->skb; desc_info->skb = NULL; - spin_lock_bh(&dp->rx_desc_lock); - list_move_tail(&desc_info->list, &dp->rx_desc_free_list); - spin_unlock_bh(&dp->rx_desc_lock); + list_add_tail(&desc_info->list, &rx_desc_used_list); rxcb = ATH12K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, @@ -3768,17 +3794,53 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, continue; } + msdu_data = (struct hal_rx_desc *)msdu->data; rxcb->err_rel_src = err_info.err_rel_src; rxcb->err_code = err_info.err_code; - rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; - - __skb_queue_tail(&msdu_list, msdu); - rxcb->is_first_msdu = err_info.first_msdu; rxcb->is_last_msdu = err_info.last_msdu; rxcb->is_continuation = err_info.continuation; + rxcb->rx_desc = msdu_data; + + if (err_info.continuation) { + __skb_queue_tail(&scatter_msdu_list, msdu); + continue; + } + + mac_id = ath12k_dp_rx_get_msdu_src_link(ab, + msdu_data); + if (mac_id >= MAX_RADIOS) { + dev_kfree_skb_any(msdu); + + /* In any case continuation bit is set + * in the previous record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + continue; + } + + if (!skb_queue_empty(&scatter_msdu_list)) { + struct sk_buff *msdu; + + skb_queue_walk(&scatter_msdu_list, msdu) { + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->mac_id = mac_id; + } + + skb_queue_splice_tail_init(&scatter_msdu_list, + &msdu_list); + } + + rxcb = ATH12K_SKB_RXCB(msdu); + rxcb->mac_id = mac_id; + __skb_queue_tail(&msdu_list, msdu); } + /* In any case continuation bit is set in the + * last record, cleanup scatter_msdu_list + */ + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); + ath12k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); @@ -3786,12 +3848,14 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, if (!num_buffs_reaped) goto done; - ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped); + ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, + num_buffs_reaped); rcu_read_lock(); while ((msdu = __skb_dequeue(&msdu_list))) { - mac_id = ath12k_dp_rx_get_msdu_src_link(ab, - (struct hal_rx_desc *)msdu->data); + rxcb = ATH12K_SKB_RXCB(msdu); + mac_id = rxcb->mac_id; + pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); ar = ab->pdevs[pdev_id].ar; @@ -4224,29 +4288,3 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar) return 0; } - -int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab) -{ - /* start reap timer */ - mod_timer(&ab->mon_reap_timer, - jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL)); - - return 0; -} - -int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer) -{ - int ret; - - if (stop_timer) - del_timer_sync(&ab->mon_reap_timer); - - /* reap all the monitor related rings */ - ret = ath12k_dp_purge_mon_ring(ab); - if (ret) { - ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret); - return ret; - } - - return 0; -} diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index 05b3d5581dbe..2ff421160181 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_RX_H #define ATH12K_DP_RX_H @@ -118,12 +118,11 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id, int budget); int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, struct dp_rxdma_ring *rx_ring, + struct list_head *used_list, int req_entries); int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar); int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id); -int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab); -int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer); u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab, struct hal_rx_desc *desc); struct ath12k_peer * diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 572b87153647..9b6d7d72f57c 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -414,7 +414,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, struct ath12k_dp_htt_wbm_tx_status ts = {0}; enum hal_wbm_htt_tx_comp_status wbm_status; - status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; + status_desc = desc; wbm_status = le32_get_bits(status_desc->info0, HTT_TX_WBM_COMP_INFO0_STATUS); diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h index 107927d64bbb..dbb9205bfa10 100644 --- a/drivers/net/wireless/ath/ath12k/hal.h +++ b/drivers/net/wireless/ath/ath12k/hal.h @@ -767,7 +767,7 @@ struct hal_srng_config { }; /** - * enum hal_rx_buf_return_buf_manager + * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers * * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h index c653ca1f59b2..7f0926fe751d 100644 --- a/drivers/net/wireless/ath/ath12k/hif.h +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HIF_H @@ -17,7 +17,7 @@ struct ath12k_hif_ops { int (*start)(struct ath12k_base *ab); void (*stop)(struct ath12k_base *ab); int (*power_up)(struct ath12k_base *ab); - void (*power_down)(struct ath12k_base *ab); + void (*power_down)(struct ath12k_base *ab, bool is_suspend); int (*suspend)(struct ath12k_base *ab); int (*resume)(struct ath12k_base *ab); int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id, @@ -133,12 +133,18 @@ static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address, static inline int ath12k_hif_power_up(struct ath12k_base *ab) { + if (!ab->hif.ops->power_up) + return -EOPNOTSUPP; + return ab->hif.ops->power_up(ab); } -static inline void ath12k_hif_power_down(struct ath12k_base *ab) +static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend) { - ab->hif.ops->power_down(ab); + if (!ab->hif.ops->power_down) + return; + + ab->hif.ops->power_down(ab, is_suspend); } #endif /* ATH12K_HIF_H */ diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c index 23f7428abd95..2f2230f565bb 100644 --- a/drivers/net/wireless/ath/ath12k/htc.c +++ b/drivers/net/wireless/ath/ath12k/htc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/skbuff.h> #include <linux/ctype.h> @@ -358,7 +358,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, goto out; } - ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n", + ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %p\n", eid, skb); ep->ep_ops.ep_rx_complete(ab, skb); diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index 0b17dfd47856..f4c827015821 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -15,6 +15,10 @@ #include "mhi.h" #include "dp_rx.h" +static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec, + 0x90, 0xd6, 0x02, 0x42, + 0xac, 0x12, 0x00, 0x03); + static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx) { return pdev_idx; @@ -920,6 +924,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB, .supports_sta_ps = false, + + .acpi_guid = NULL, }, { .name = "wcn7850 hw2.0", @@ -964,7 +970,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .idle_ps = true, .download_calib = false, - .supports_suspend = false, + .supports_suspend = true, .tcl_ring_retry = false, .reoq_lut_support = false, .supports_shadow_regs = true, @@ -993,6 +999,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .otp_board_id_register = 0, .supports_sta_ps = true, + + .acpi_guid = &wcn7850_uuid, }, { .name = "qcn9274 hw2.0", @@ -1061,6 +1069,8 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB, .supports_sta_ps = false, + + .acpi_guid = NULL, }, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 87965980b938..3f450ee93f34 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -8,6 +8,7 @@ #define ATH12K_HW_H #include <linux/mhi.h> +#include <linux/uuid.h> #include "wmi.h" #include "hal.h" @@ -80,6 +81,7 @@ #define TARGET_RX_PEER_METADATA_VER_V1A 2 #define TARGET_RX_PEER_METADATA_VER_V1B 3 +#define ATH12K_HW_DEFAULT_QUEUE 0 #define ATH12K_HW_MAX_QUEUES 4 #define ATH12K_QUEUE_LEN 4096 @@ -211,6 +213,8 @@ struct ath12k_hw_params { u32 otp_board_id_register; bool supports_sta_ps; + + const guid_t *acpi_guid; }; struct ath12k_hw_ops { diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 52a5fb8b03e9..805cb084484a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -14,6 +14,7 @@ #include "dp_tx.h" #include "dp_rx.h" #include "peer.h" +#include "debugfs.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -243,6 +244,9 @@ static const u32 ath12k_smps_map[] = { static int ath12k_start_vdev_delay(struct ath12k *ar, struct ath12k_vif *arvif); +static void ath12k_mac_stop(struct ath12k *ar); +static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif); +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif); static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) { @@ -530,7 +534,8 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac, struct ath12k_vif_iter *arvif_iter = data; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - if (arvif->vdev_id == arvif_iter->vdev_id) + if (arvif->vdev_id == arvif_iter->vdev_id && + arvif->ar == arvif_iter->ar) arvif_iter->arvif = arvif; } @@ -540,6 +545,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) u32 flags; arvif_iter.vdev_id = vdev_id; + arvif_iter.ar = ar; flags = IEEE80211_IFACE_ITER_RESUME_ALL; ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), @@ -613,6 +619,53 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) return NULL; } +static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw, + struct ieee80211_channel *channel) +{ + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar; + int i; + + ar = ah->radio; + + if (ah->num_radio == 1) + return ar; + + for_each_ar(ah, ar, i) { + if (channel->center_freq >= ar->freq_low && + channel->center_freq <= ar->freq_high) + return ar; + } + return NULL; +} + +static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + if (!ctx) + return NULL; + + return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan); +} + +static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + + /* If there is one pdev within ah, then we return + * ar directly. + */ + if (ah->num_radio == 1) + return ah->radio; + + if (arvif->is_created) + return arvif->ar; + + return NULL; +} + static void ath12k_pdev_caps_update(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -1169,7 +1222,7 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) struct ath12k *ar; int ret; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); ret = ath12k_mac_config(ar, changed); if (ret) @@ -1345,6 +1398,75 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif, ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } +static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_handle_beacon_iter, + skb); +} + +static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k *ar = arvif->ar; + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* Firmware doesn't report beacon loss events repeatedly. If AP probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. Queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ATH12K_CONNECTION_LOSS_HZ); +} + +void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), + IEEE80211_IFACE_ITER_NORMAL, + ath12k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath12k_vif *arvif = container_of(work, struct ath12k_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); +} + static void ath12k_peer_assoc_h_basic(struct ath12k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2517,7 +2639,7 @@ static void ath12k_bss_disassoc(struct ath12k *ar, arvif->is_up = false; - /* TODO: cancel connection_loss_work */ + cancel_delayed_work(&arvif->connection_loss_work); } static u32 ath12k_mac_get_rate_hw_value(int bitrate) @@ -2961,16 +3083,42 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, } } +static struct ath12k_vif_cache *ath12k_arvif_get_cache(struct ath12k_vif *arvif) +{ + if (!arvif->cache) + arvif->cache = kzalloc(sizeof(*arvif->cache), GFP_KERNEL); + + return arvif->cache; +} + +static void ath12k_arvif_put_cache(struct ath12k_vif *arvif) +{ + kfree(arvif->cache); + arvif->cache = NULL; +} + static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif_cache *cache; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_vif(hw, vif); + + /* if the vdev is not created on a certain radio, + * cache the info to be updated later on vdev creation + */ + + if (!ar) { + cache = ath12k_arvif_get_cache(arvif); + if (!cache) + return; + arvif->cache->bss_conf_changed |= changed; + return; + } mutex_lock(&ar->conf_mutex); @@ -2979,6 +3127,42 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, mutex_unlock(&ar->conf_mutex); } +static struct ath12k* +ath12k_mac_select_scan_device(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct ath12k_hw *ah = hw->priv; + enum nl80211_band band; + struct ath12k *ar; + int i; + + if (ah->num_radio == 1) + return ah->radio; + + /* Currently mac80211 supports splitting scan requests into + * multiple scan requests per band. + * Loop through first channel and determine the scan radio + * TODO: There could be 5 GHz low/high channels in that case + * split the hw request and perform multiple scans + */ + + if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ) + band = NL80211_BAND_2GHZ; + else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ) + band = NL80211_BAND_5GHZ; + else + band = NL80211_BAND_6GHZ; + + for_each_ar(ah, ar, i) { + /* TODO 5 GHz low high split changes */ + if (ar->mac.sbands[band].channels) + return ar; + } + + return NULL; +} + void __ath12k_mac_scan_finish(struct ath12k *ar) { struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); @@ -3148,15 +3332,68 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_scan_request *hw_req) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar; + struct ath12k *ar, *prev_ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; struct ath12k_wmi_scan_req_arg arg = {}; int ret; int i; + bool create = true; + + if (ah->num_radio == 1) { + WARN_ON(!arvif->is_created); + ar = ath12k_ah_to_ar(ah, 0); + goto scan; + } + + /* Since the targeted scan device could depend on the frequency + * requested in the hw_req, select the corresponding radio + */ + ar = ath12k_mac_select_scan_device(hw, vif, hw_req); + if (!ar) + return -EINVAL; - ar = ath12k_ah_to_ar(ah); + /* If the vif is already assigned to a specific vdev of an ar, + * check whether its already started, vdev which is started + * are not allowed to switch to a new radio. + * If the vdev is not started, but was earlier created on a + * different ar, delete that vdev and create a new one. We don't + * delete at the scan stop as an optimization to avoid redundant + * delete-create vdev's for the same ar, in case the request is + * always on the same band for the vif + */ + if (arvif->is_created) { + if (WARN_ON(!arvif->ar)) + return -EINVAL; + if (ar != arvif->ar && arvif->is_started) + return -EINVAL; + + if (ar != arvif->ar) { + /* backup the previously used ar ptr, since the vdev delete + * would assign the arvif->ar to NULL after the call + */ + prev_ar = arvif->ar; + mutex_lock(&prev_ar->conf_mutex); + ret = ath12k_mac_vdev_delete(prev_ar, vif); + mutex_unlock(&prev_ar->conf_mutex); + if (ret) + ath12k_warn(prev_ar->ab, + "unable to delete scan vdev %d\n", ret); + } else { + create = false; + } + } + if (create) { + mutex_lock(&ar->conf_mutex); + ret = ath12k_mac_vdev_create(ar, vif); + mutex_unlock(&ar->conf_mutex); + if (ret) { + ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret); + return -EINVAL; + } + } +scan: mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); @@ -3242,10 +3479,13 @@ exit: static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k *ar; - ar = ath12k_ah_to_ar(ah); + if (!arvif->is_created) + return; + + ar = arvif->ar; mutex_lock(&ar->conf_mutex); ath12k_scan_abort(ar); @@ -3369,13 +3609,11 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, return first_errno; } -static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar; - struct ath12k_base *ab; + struct ath12k_base *ab = ar->ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; struct ath12k_sta *arsta; @@ -3383,24 +3621,11 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int ret = 0; u32 flags = 0; - /* BIP needs to be done in software */ - if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || - key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) - return 1; - - ar = ath12k_ah_to_ar(ah); - ab = ar->ab; + lockdep_assert_held(&ar->conf_mutex); - if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) + if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) return 1; - if (key->keyidx > WMI_MAX_KEY_INDEX) - return -ENOSPC; - - mutex_lock(&ar->conf_mutex); - if (sta) peer_addr = sta->addr; else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) @@ -3492,6 +3717,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, spin_unlock_bh(&ab->base_lock); exit: + return ret; +} + +static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif_cache *cache; + struct ath12k *ar; + int ret; + + /* BIP needs to be done in software */ + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || + key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) + return 1; + + if (key->keyidx > WMI_MAX_KEY_INDEX) + return -ENOSPC; + + ar = ath12k_get_ar_by_vif(hw, vif); + if (!ar) { + /* ar is expected to be valid when sta ptr is available */ + if (sta) { + WARN_ON_ONCE(1); + return -EINVAL; + } + + cache = ath12k_arvif_get_cache(arvif); + if (!cache) + return -ENOSPC; + cache->key_conf.cmd = cmd; + cache->key_conf.key = key; + cache->key_conf.changed = true; + return 0; + } + + mutex_lock(&ar->conf_mutex); + ret = ath12k_mac_set_key(ar, cmd, vif, sta, key); mutex_unlock(&ar->conf_mutex); return ret; } @@ -3968,7 +4234,6 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); @@ -3980,7 +4245,11 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, new_state == IEEE80211_STA_NOTEXIST)) cancel_work_sync(&arsta->update_wk); - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_vif(hw, vif); + if (!ar) { + WARN_ON_ONCE(1); + return -EINVAL; + } mutex_lock(&ar->conf_mutex); @@ -4109,7 +4378,7 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) return -EINVAL; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); mutex_lock(&ar->conf_mutex); @@ -4131,14 +4400,17 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u32 changed) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; u32 bw, smps; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_vif(hw, vif); + if (!ar) { + WARN_ON_ONCE(1); + return; + } spin_lock_bh(&ar->ab->base_lock); @@ -4314,12 +4586,22 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif_cache *cache = arvif->cache; int ret; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_vif(hw, vif); + if (!ar) { + /* cache the info and apply after vdev is created */ + cache = ath12k_arvif_get_cache(arvif); + if (!cache) + return -ENOSPC; + cache->tx_conf.changed = true; + cache->tx_conf.ac = ac; + cache->tx_conf.tx_queue_params = *params; + return 0; + } mutex_lock(&ar->conf_mutex); ret = ath12k_mac_conf_tx(arvif, link_id, ac, params); @@ -4997,6 +5279,13 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) if (ath12k_check_chain_mask(ar, rx_ant, false)) return -EINVAL; + /* Since we advertised the max cap of all radios combined during wiphy + * registration, ensure we don't set the antenna config higher than the + * limits + */ + tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask); + rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask); + ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; @@ -5443,23 +5732,39 @@ err: return ret; } +static void ath12k_drain_tx(struct ath12k_hw *ah) +{ + struct ath12k *ar; + int i; + + for_each_ar(ah, ar, i) + ath12k_mac_drain_tx(ar); +} + static int ath12k_mac_op_start(struct ieee80211_hw *hw) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar = ath12k_ah_to_ar(ah); - struct ath12k_base *ab = ar->ab; - int ret; + struct ath12k *ar; + int ret, i; - ath12k_mac_drain_tx(ar); + ath12k_drain_tx(ah); - ret = ath12k_mac_start(ar); - if (ret) { - ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n", - ar->pdev_idx, ret); - return ret; + for_each_ar(ah, ar, i) { + ret = ath12k_mac_start(ar); + if (ret) { + ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n", + ar->pdev_idx, ret); + goto fail_start; + } } return 0; +fail_start: + for (; i > 0; i--) { + ar = ath12k_ah_to_ar(ah, i - 1); + ath12k_mac_stop(ar); + } + return ret; } int ath12k_mac_rfkill_config(struct ath12k *ar) @@ -5555,11 +5860,13 @@ static void ath12k_mac_stop(struct ath12k *ar) static void ath12k_mac_op_stop(struct ieee80211_hw *hw) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar; + int i; - ath12k_mac_drain_tx(ar); + ath12k_drain_tx(ah); - ath12k_mac_stop(ar); + for_each_ar(ah, ar, i) + ath12k_mac_stop(ar); } static u8 @@ -5732,64 +6039,24 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, ath12k_mac_update_vif_offload(arvif); } -static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar; - struct ath12k_base *ab; + struct ath12k_hw *ah = ar->ah; + struct ath12k_base *ab = ar->ab; + struct ieee80211_hw *hw = ah->hw; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; struct ath12k_wmi_peer_create_arg peer_param; u32 param_id, param_value; u16 nss; int i; - int ret; - int bit; - - vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; - - ar = ath12k_ah_to_ar(ah); - ab = ar->ab; - - mutex_lock(&ar->conf_mutex); + int ret, vdev_id; - if (vif->type == NL80211_IFTYPE_AP && - ar->num_peers > (ar->max_num_peers - 1)) { - ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); - ret = -ENOBUFS; - goto err; - } - - if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { - ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", - TARGET_NUM_VDEVS); - ret = -EBUSY; - goto err; - } - - memset(arvif, 0, sizeof(*arvif)); + lockdep_assert_held(&ar->conf_mutex); arvif->ar = ar; - arvif->vif = vif; - - INIT_LIST_HEAD(&arvif->list); - - /* Should we initialize any worker to handle connection loss indication - * from firmware in sta mode? - */ - - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { - arvif->bitrate_mask.control[i].legacy = 0xffffffff; - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); - } - - bit = __ffs64(ab->free_vdev_map); - - arvif->vdev_id = bit; + vdev_id = __ffs64(ab->free_vdev_map); + arvif->vdev_id = vdev_id; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { @@ -5813,7 +6080,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; - ar->monitor_vdev_id = bit; + ar->monitor_vdev_id = vdev_id; break; case NL80211_IFTYPE_P2P_DEVICE: arvif->vdev_type = WMI_VDEV_TYPE_STA; @@ -5824,7 +6091,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, break; } - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n", + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, ab->free_vdev_map); @@ -5842,6 +6109,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, } ar->num_created_vdevs++; + arvif->is_created = true; ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n", vif->addr, arvif->vdev_id); ar->allocated_vdev_map |= 1LL << arvif->vdev_id; @@ -5942,8 +6210,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) ath12k_mac_monitor_vdev_create(ar); - mutex_unlock(&ar->conf_mutex); - + arvif->ar = ar; return ret; err_peer_del: @@ -5969,6 +6236,8 @@ err_peer_del: err_vdev_del: ath12k_wmi_vdev_delete(ar, arvif->vdev_id); ar->num_created_vdevs--; + arvif->is_created = false; + arvif->ar = NULL; ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ab->free_vdev_map |= 1LL << arvif->vdev_id; ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); @@ -5977,9 +6246,171 @@ err_vdev_del: spin_unlock_bh(&ar->data_lock); err: + arvif->ar = NULL; + return ret; +} + +static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif_cache *cache = arvif->cache; + struct ath12k_base *ab = ar->ab; + + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (!cache) + return; + + if (cache->tx_conf.changed) { + ret = ath12k_mac_conf_tx(arvif, 0, cache->tx_conf.ac, + &cache->tx_conf.tx_queue_params); + if (ret) + ath12k_warn(ab, + "unable to apply tx config parameters to vdev %d\n", + ret); + } + + if (cache->bss_conf_changed) { + ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, + cache->bss_conf_changed); + } + + if (cache->key_conf.changed) { + ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL, + cache->key_conf.key); + if (ret) + ath12k_warn(ab, "unable to apply set key param to vdev %d ret %d\n", + arvif->vdev_id, ret); + } + ath12k_arvif_put_cache(arvif); +} + +static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar, *prev_ar; + struct ath12k_base *ab; + int ret; + + if (ah->num_radio == 1) + ar = ah->radio; + else if (ctx) + ar = ath12k_get_ar_by_ctx(hw, ctx); + else + return NULL; + + if (!ar) + return NULL; + + if (arvif->ar) { + /* This is not expected really */ + if (WARN_ON(!arvif->is_created)) { + arvif->ar = NULL; + return NULL; + } + + if (ah->num_radio == 1) + return arvif->ar; + + /* This can happen as scan vdev gets created during multiple scans + * across different radios before a vdev is brought up in + * a certain radio. + */ + if (ar != arvif->ar) { + if (WARN_ON(arvif->is_started)) + return NULL; + + /* backup the previously used ar ptr since arvif->ar would + * be set to NULL after vdev delete is done + */ + prev_ar = arvif->ar; + mutex_lock(&prev_ar->conf_mutex); + ret = ath12k_mac_vdev_delete(prev_ar, vif); + + if (ret) + ath12k_warn(prev_ar->ab, "unable to delete vdev %d\n", + ret); + mutex_unlock(&prev_ar->conf_mutex); + } + } + + ab = ar->ab; + + mutex_lock(&ar->conf_mutex); + + if (arvif->is_created) + goto flush; + + if (vif->type == NL80211_IFTYPE_AP && + ar->num_peers > (ar->max_num_peers - 1)) { + ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); + goto unlock; + } + + if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { + ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", + TARGET_NUM_VDEVS); + goto unlock; + } + + ret = ath12k_mac_vdev_create(ar, vif); + if (ret) { + ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret); + goto unlock; + } + +flush: + /* If the vdev is created during channel assign and not during + * add_interface(), Apply any parameters for the vdev which were received + * after add_interface, corresponding to this vif. + */ + ath12k_mac_vif_cache_flush(ar, vif); +unlock: mutex_unlock(&ar->conf_mutex); + return arvif->ar; +} - return ret; +static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + int i; + + memset(arvif, 0, sizeof(*arvif)); + + arvif->vif = vif; + + INIT_LIST_HEAD(&arvif->list); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath12k_mac_vif_sta_connection_loss_work); + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + /* Allocate Default Queue now and reassign during actual vdev create */ + vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; + + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + /* For single radio wiphy(i.e ah->num_radio is 1), create the vdev + * during add_interface itself, for multi radio wiphy, defer the vdev + * creation until channel_assign to determine the radio on which the + * vdev needs to be created + */ + ath12k_mac_assign_vif_to_vdev(hw, vif, NULL); + return 0; } static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) @@ -6007,31 +6438,14 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif } } -static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_base *ab; + struct ath12k_base *ab = ar->ab; unsigned long time_left; int ret; - ar = ath12k_ah_to_ar(ah); - ab = ar->ab; - - mutex_lock(&ar->conf_mutex); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", - arvif->vdev_id); - - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); - if (ret) - ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", - arvif->vdev_id, ret); - } - + lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_delete_done); ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id); @@ -6048,6 +6462,10 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, goto err_vdev_del; } + ab->free_vdev_map |= 1LL << arvif->vdev_id; + ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); + ar->num_created_vdevs--; + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ar->monitor_vdev_id = -1; ar->monitor_vdev_created = false; @@ -6055,11 +6473,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, ret = ath12k_mac_monitor_vdev_delete(ar); } - ab->free_vdev_map |= 1LL << (arvif->vdev_id); - ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); - ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); - ar->num_created_vdevs--; - ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); @@ -6069,6 +6482,7 @@ err_vdev_del: spin_unlock_bh(&ar->data_lock); ath12k_peer_cleanup(ar, arvif->vdev_id); + ath12k_arvif_put_cache(arvif); idr_for_each(&ar->txmgmt_idr, ath12k_mac_vif_txmgmt_idr_remove, vif); @@ -6081,6 +6495,46 @@ err_vdev_del: clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); /* TODO: recal traffic pause state based on the available vdevs */ + arvif->is_created = false; + arvif->ar = NULL; + + return ret; +} + +static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_base *ab; + struct ath12k *ar; + int ret; + + if (!arvif->is_created) { + /* if we cached some config but never received assign chanctx, + * free the allocated cache. + */ + ath12k_arvif_put_cache(arvif); + return; + } + + ar = arvif->ar; + ab = ar->ab; + + cancel_delayed_work_sync(&arvif->connection_loss_work); + + mutex_lock(&ar->conf_mutex); + + ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", + arvif->vdev_id); + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); + if (ret) + ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", + arvif->vdev_id, ret); + } + + ath12k_mac_vdev_delete(ar, vif); mutex_unlock(&ar->conf_mutex); } @@ -6132,7 +6586,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); mutex_lock(&ar->conf_mutex); @@ -6145,16 +6599,19 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + int antennas_rx = 0, antennas_tx = 0; struct ath12k *ar; + int i; - ar = ath12k_ah_to_ar(ah); - - mutex_lock(&ar->conf_mutex); - - *tx_ant = ar->cfg_tx_chainmask; - *rx_ant = ar->cfg_rx_chainmask; + for_each_ar(ah, ar, i) { + mutex_lock(&ar->conf_mutex); + antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask); + antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask); + mutex_unlock(&ar->conf_mutex); + } - mutex_unlock(&ar->conf_mutex); + *tx_ant = antennas_tx; + *rx_ant = antennas_rx; return 0; } @@ -6163,13 +6620,16 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - int ret; - - ar = ath12k_ah_to_ar(ah); + int ret = 0; + int i; - mutex_lock(&ar->conf_mutex); - ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); - mutex_unlock(&ar->conf_mutex); + for_each_ar(ah, ar, i) { + mutex_lock(&ar->conf_mutex); + ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); + mutex_unlock(&ar->conf_mutex); + if (ret) + break; + } return ret; } @@ -6213,7 +6673,11 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret = -EINVAL; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_vif(hw, vif); + if (!ar) + return -EINVAL; + + ar = ath12k_ah_to_ar(ah, 0); mutex_lock(&ar->conf_mutex); ret = ath12k_mac_ampdu_action(arvif, params); @@ -6229,15 +6693,17 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return -EINVAL; + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx add freq %u width %d ptr %pK\n", + "mac chanctx add freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); @@ -6257,15 +6723,17 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return; + ab = ar->ab; ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx remove freq %u width %d ptr %pK\n", + "mac chanctx remove freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); @@ -6286,14 +6754,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, enum nl80211_band band, enum nl80211_iftype type) { - struct ieee80211_sta_eht_cap *eht_cap; + struct ieee80211_sta_eht_cap *eht_cap = NULL; enum wmi_phy_mode down_mode; + int n = ar->mac.sbands[band].n_iftype_data; + int i; + struct ieee80211_sband_iftype_data *data; if (mode < MODE_11BE_EHT20) return mode; - eht_cap = &ar->mac.iftype[band][type].eht_cap; - if (eht_cap->has_eht) + data = ar->mac.iftype[band]; + for (i = 0; i < n; i++) { + if (data[i].types_mask & BIT(type)) { + eht_cap = &data[i].eht_cap; + break; + } + } + + if (eht_cap && eht_cap->has_eht) return mode; switch (mode) { @@ -6468,14 +6946,19 @@ struct ath12k_mac_change_chanctx_arg { struct ieee80211_vif_chanctx_switch *vifs; int n_vifs; int next_vif; + struct ath12k *ar; }; static void ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; + if (arvif->ar != arg->ar) + return; + if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) return; @@ -6486,9 +6969,13 @@ static void ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; struct ieee80211_chanctx_conf *ctx; + if (arvif->ar != arg->ar) + return; + ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); if (ctx != arg->ctx) return; @@ -6502,6 +6989,57 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, arg->next_vif++; } +static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width) +{ + switch (width) { + case NL80211_CHAN_WIDTH_20: + return WMI_CHAN_WIDTH_20; + case NL80211_CHAN_WIDTH_40: + return WMI_CHAN_WIDTH_40; + case NL80211_CHAN_WIDTH_80: + return WMI_CHAN_WIDTH_80; + case NL80211_CHAN_WIDTH_160: + return WMI_CHAN_WIDTH_160; + case NL80211_CHAN_WIDTH_80P80: + return WMI_CHAN_WIDTH_80P80; + case NL80211_CHAN_WIDTH_5: + return WMI_CHAN_WIDTH_5; + case NL80211_CHAN_WIDTH_10: + return WMI_CHAN_WIDTH_10; + case NL80211_CHAN_WIDTH_320: + return WMI_CHAN_WIDTH_320; + default: + WARN_ON(1); + return WMI_CHAN_WIDTH_20; + } +} + +static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar, + struct ath12k_vif *arvif, + struct cfg80211_chan_def def) +{ + u32 param_id, param_value; + int ret; + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP; + param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) | + u32_encode_bits((~def.punctured), + WMI_PEER_PUNCTURE_BITMAP); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "punctured bitmap %02x width %d vdev %d\n", + def.punctured, def.width, arvif->vdev_id); + + ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, param_id, + param_value); + + return ret; +} + static void ath12k_mac_update_vif_chan(struct ath12k *ar, struct ieee80211_vif_chanctx_switch *vifs, @@ -6594,6 +7132,16 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, arvif->vdev_id, ret); continue; } + + ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif, + vifs[i].new_ctx->def); + if (ret) { + ath12k_warn(ar->ab, + "failed to update puncturing bitmap %02x and width %d: %d\n", + vifs[i].new_ctx->def.punctured, + vifs[i].new_ctx->def.width, ret); + continue; + } } /* Restart the internal monitor vdev on new channel */ @@ -6607,7 +7155,7 @@ static void ath12k_mac_update_active_vif_chan(struct ath12k *ar, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx }; + struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar }; struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); lockdep_assert_held(&ar->conf_mutex); @@ -6637,17 +7185,19 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_ctx(hw, ctx); + if (!ar) + return; + ab = ar->ab; mutex_lock(&ar->conf_mutex); ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx change freq %u width %d ptr %pK changed %x\n", + "mac chanctx change freq %u width %d ptr %p changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); /* This shouldn't really happen because channel switching should use @@ -6705,20 +7255,27 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; struct ath12k_wmi_peer_create_arg param; - ar = ath12k_ah_to_ar(ah); + /* For multi radio wiphy, the vdev was not created during add_interface + * create now since we have a channel ctx now to assign to a specific ar/fw + */ + ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx); + if (!ar) { + WARN_ON(1); + return -EINVAL; + } + ab = ar->ab; mutex_lock(&ar->conf_mutex); ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx assign ptr %pK vdev_id %i\n", + "mac chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); arvif->punct_bitmap = ctx->def.punctured; @@ -6788,19 +7345,28 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; - ar = ath12k_ah_to_ar(ah); + /* The vif is expected to be attached to an ar's VDEV. + * We leave the vif/vdev in this function as is + * and not delete the vdev symmetric to assign_vif_chanctx() + * the VDEV will be deleted and unassigned either during + * remove_interface() or when there is a change in channel + * that moves the vif to a new ar + */ + if (!arvif->is_created) + return; + + ar = arvif->ar; ab = ar->ab; mutex_lock(&ar->conf_mutex); ath12k_dbg(ab, ATH12K_DBG_MAC, - "mac chanctx unassign ptr %pK vdev_id %i\n", + "mac chanctx unassign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); WARN_ON(!arvif->is_started); @@ -6846,13 +7412,20 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx); + if (!ar) + return -EINVAL; mutex_lock(&ar->conf_mutex); + /* Switching channels across radio is not allowed */ + if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) { + mutex_unlock(&ar->conf_mutex); + return -EINVAL; + } + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac chanctx switch n_vifs %d mode %d\n", n_vifs, mode); @@ -6893,11 +7466,21 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret; + int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i; - ar = ath12k_ah_to_ar(ah); - - ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); + /* Currently we set the rts threshold value to all the vifs across + * all radios of the single wiphy. + * TODO Once support for vif specific RTS threshold in mac80211 is + * available, ath12k can make use of it. + */ + for_each_ar(ah, ar, i) { + ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); + if (ret) { + ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d", + ar->pdev->pdev_id); + break; + } + } return ret; } @@ -6917,33 +7500,62 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) return -EOPNOTSUPP; } -static void ath12k_mac_flush(struct ath12k *ar) +static int ath12k_mac_flush(struct ath12k *ar) { long time_left; + int ret = 0; time_left = wait_event_timeout(ar->dp.tx_empty_waitq, (atomic_read(&ar->dp.num_tx_pending) == 0), ATH12K_FLUSH_TIMEOUT); - if (time_left == 0) - ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); + if (time_left == 0) { + ath12k_warn(ar->ab, + "failed to flush transmit queue, data pkts pending %d\n", + atomic_read(&ar->dp.num_tx_pending)); + ret = -ETIMEDOUT; + } time_left = wait_event_timeout(ar->txmgmt_empty_waitq, (atomic_read(&ar->num_pending_mgmt_tx) == 0), ATH12K_FLUSH_TIMEOUT); - if (time_left == 0) - ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", - time_left); + if (time_left == 0) { + ath12k_warn(ar->ab, + "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", + atomic_read(&ar->num_pending_mgmt_tx)); + ret = -ETIMEDOUT; + } + + return ret; +} + +int ath12k_mac_wait_tx_complete(struct ath12k *ar) +{ + ath12k_mac_drain_tx(ar); + return ath12k_mac_flush(ar); } static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar; + int i; if (drop) return; + /* vif can be NULL when flush() is considered for hw */ + if (!vif) { + for_each_ar(ah, ar, i) + ath12k_mac_flush(ar); + return; + } + + ar = ath12k_get_ar_by_vif(hw, vif); + + if (!ar) + return; + ath12k_mac_flush(ar); } @@ -7145,6 +7757,9 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k *ar = arvif->ar; + if (arsta->arvif != arvif) + return; + spin_lock_bh(&ar->data_lock); arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; spin_unlock_bh(&ar->data_lock); @@ -7155,10 +7770,14 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data, static void ath12k_mac_disable_peer_fixed_rate(void *data, struct ieee80211_sta *sta) { + struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); struct ath12k_vif *arvif = data; struct ath12k *ar = arvif->ar; int ret; + if (arsta->arvif != arvif) + return; + ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, @@ -7306,7 +7925,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); ab = ar->ab; mutex_lock(&ar->conf_mutex); @@ -7392,21 +8011,13 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar, static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ieee80211_supported_band *sband; struct survey_info *ar_survey; - int ret = 0; if (idx >= ATH12K_NUM_CHANS) return -ENOENT; - ar = ath12k_ah_to_ar(ah); - - ar_survey = &ar->survey[idx]; - - mutex_lock(&ar->conf_mutex); - sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; @@ -7416,11 +8027,22 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; - if (!sband || idx >= sband->n_channels) { - ret = -ENOENT; - goto exit; + if (!sband || idx >= sband->n_channels) + return -ENOENT; + + ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]); + if (!ar) { + if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) { + memset(survey, 0, sizeof(*survey)); + return 0; + } + return -ENOENT; } + ar_survey = &ar->survey[idx]; + + mutex_lock(&ar->conf_mutex); + ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); spin_lock_bh(&ar->data_lock); @@ -7432,10 +8054,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (ar->rx_channel == survey->channel) survey->filled |= SURVEY_INFO_IN_USE; -exit: mutex_unlock(&ar->conf_mutex); - - return ret; + return 0; } static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, @@ -7478,7 +8098,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); mutex_lock(&ar->conf_mutex); @@ -7508,7 +8128,7 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, u32 scan_time_msec; int ret; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, 0); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); @@ -7643,6 +8263,9 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar, band->channels[i].center_freq > freq_high) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; } + + ar->freq_low = freq_low; + ar->freq_high = freq_high; } static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) @@ -7667,6 +8290,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, { struct ieee80211_supported_band *band; struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; + struct ath12k_hw *ah = ar->ah; void *channels; u32 phy_id; @@ -7721,6 +8345,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, ath12k_mac_update_ch_list(ar, band, reg_cap->low_5ghz_chan, reg_cap->high_5ghz_chan); + ah->use_6ghz_regd = true; } if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) { @@ -7757,10 +8382,12 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar, static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) { - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar; + int i; u16 interface_modes = U16_MAX; - interface_modes &= ar->ab->hw_params->interface_modes; + for_each_ar(ah, ar, i) + interface_modes &= ar->ab->hw_params->interface_modes; return interface_modes == U16_MAX ? 0 : interface_modes; } @@ -7768,15 +8395,19 @@ static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah, enum nl80211_iftype type) { - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar; + int i; u16 interface_modes, mode; bool is_enable = true; mode = BIT(type); - - interface_modes = ar->ab->hw_params->interface_modes; - if (!(interface_modes & mode)) - is_enable = false; + for_each_ar(ah, ar, i) { + interface_modes = ar->ab->hw_params->interface_modes; + if (!(interface_modes & mode)) { + is_enable = false; + break; + } + } return is_enable; } @@ -7906,13 +8537,16 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) { struct ieee80211_hw *hw = ah->hw; struct wiphy *wiphy = hw->wiphy; - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar; + int i; - cancel_work_sync(&ar->regd_update_work); + for_each_ar(ah, ar, i) + cancel_work_sync(&ar->regd_update_work); ieee80211_unregister_hw(hw); - ath12k_mac_cleanup_unregister(ar); + for_each_ar(ah, ar, i) + ath12k_mac_cleanup_unregister(ar); kfree(wiphy->iface_combinations[0].limits); kfree(wiphy->iface_combinations); @@ -7952,7 +8586,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) { struct ieee80211_hw *hw = ah->hw; struct wiphy *wiphy = hw->wiphy; - struct ath12k *ar = ath12k_ah_to_ar(ah); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); struct ath12k_base *ab = ar->ab; struct ath12k_pdev *pdev; struct ath12k_pdev_cap *cap; @@ -7967,39 +8601,71 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_CCMP_256, }; - int ret; - u32 ht_cap = 0; + int ret, i, j; + u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; + bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; + u8 *mac_addr = NULL; - pdev = ar->pdev; + wiphy->max_ap_assoc_sta = 0; - if (ab->pdevs_macaddr_valid) - ether_addr_copy(ar->mac_addr, pdev->mac_addr); - else - ether_addr_copy(ar->mac_addr, ab->mac_addr); + for_each_ar(ah, ar, i) { + u32 ht_cap_info = 0; - ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands); - if (ret) - goto out; + pdev = ar->pdev; + if (ar->ab->pdevs_macaddr_valid) { + ether_addr_copy(ar->mac_addr, pdev->mac_addr); + } else { + ether_addr_copy(ar->mac_addr, ar->ab->mac_addr); + ar->mac_addr[4] += ar->pdev_idx; + } + + ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands); + if (ret) + goto err_cleanup_unregister; + + ht_cap &= ht_cap_info; + wiphy->max_ap_assoc_sta += ar->max_num_stations; + + /* Advertise the max antenna support of all radios, driver can handle + * per pdev specific antenna setting based on pdev cap when antenna + * changes are made + */ + cap = &pdev->cap; + + antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask); + antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask); - wiphy->max_ap_assoc_sta = ar->max_num_stations; + if (ar->supports_6ghz) + is_6ghz = true; - cap = &pdev->cap; + if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) + is_raw_mode = true; + + if (!ar->ab->hw_params->supports_monitor) + is_monitor_disable = true; + + if (i == 0) + mac_addr = ar->mac_addr; + else + mac_addr = ab->mac_addr; + } - wiphy->available_antennas_rx = cap->rx_chain_mask; - wiphy->available_antennas_tx = cap->tx_chain_mask; + wiphy->available_antennas_rx = antennas_rx; + wiphy->available_antennas_tx = antennas_tx; - SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr); + SET_IEEE80211_PERM_ADDR(hw, mac_addr); SET_IEEE80211_DEV(hw, ab->dev); ret = ath12k_mac_setup_iface_combinations(ah); if (ret) { ath12k_err(ab, "failed to setup interface combinations: %d\n", ret); - goto err_cleanup_unregister; + goto err_complete_cleanup_unregister; } wiphy->interface_modes = ath12k_mac_get_ifmodes(ah); - if (wiphy->bands[NL80211_BAND_2GHZ] && + if (ah->num_radio == 1 && + wiphy->bands[NL80211_BAND_2GHZ] && wiphy->bands[NL80211_BAND_5GHZ] && wiphy->bands[NL80211_BAND_6GHZ]) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); @@ -8050,6 +8716,12 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; + /* MLO is not yet supported so disable Wireless Extensions for now + * to make sure ath12k users don't use it. This flag can be removed + * once WIPHY_FLAG_SUPPORTS_MLO is enabled. + */ + wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + hw->queues = ATH12K_HW_MAX_QUEUES; wiphy->tx_queue_len = ATH12K_QUEUE_LEN; hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; @@ -8067,7 +8739,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); - if (ar->supports_6ghz) { + if (is_6ghz) { wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); wiphy_ext_feature_set(wiphy, @@ -8078,7 +8750,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_reg_init(hw); - if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { + if (!is_raw_mode) { hw->netdev_features = NETIF_F_HW_CSUM; ieee80211_hw_set(hw, SW_CRYPTO_CONTROL); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); @@ -8090,7 +8762,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) goto err_free_if_combs; } - if (!ab->hw_params->supports_monitor) + if (is_monitor_disable) /* There's a race between calling ieee80211_register_hw() * and here where the monitor mode is enabled for a little * while. But that time is so short and in practise it make @@ -8098,13 +8770,17 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) */ wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); - /* Apply the regd received during initialization */ - ret = ath12k_regd_update(ar, true); - if (ret) { - ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); - goto err_unregister_hw; + for_each_ar(ah, ar, i) { + /* Apply the regd received during initialization */ + ret = ath12k_regd_update(ar, true); + if (ret) { + ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); + goto err_unregister_hw; + } } + ath12k_debugfs_register(ar); + return 0; err_unregister_hw: @@ -8114,10 +8790,15 @@ err_free_if_combs: kfree(wiphy->iface_combinations[0].limits); kfree(wiphy->iface_combinations); +err_complete_cleanup_unregister: + i = ah->num_radio; + err_cleanup_unregister: - ath12k_mac_cleanup_unregister(ar); + for (j = 0; j < i; j++) { + ar = ath12k_ah_to_ar(ah, j); + ath12k_mac_cleanup_unregister(ar); + } -out: SET_IEEE80211_DEV(hw, NULL); return ret; @@ -8243,7 +8924,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, pdev_idx = pdev_map[i].pdev_idx; pdev = &ab->pdevs[pdev_idx]; - ar = ath12k_ah_to_ar(ah); + ar = ath12k_ah_to_ar(ah, i); ar->ah = ah; ar->ab = ab; ar->hw_link_id = i; diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 3f5e1be0dff9..69fd282b9dd3 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -78,4 +78,8 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher); int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable); int ath12k_mac_rfkill_config(struct ath12k *ar); +int ath12k_mac_wait_tx_complete(struct ath12k *ar); +void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb); +void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id); + #endif diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index adb8c3ec1950..fef2f7622033 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -19,34 +19,6 @@ static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = { { - .num = 0, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 1, - .dir = DMA_TO_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { - .num = 1, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 1, - .dir = DMA_FROM_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { .num = 20, .name = "IPCR", .num_elements = 32, @@ -112,34 +84,6 @@ const struct mhi_controller_config ath12k_mhi_config_qcn9274 = { static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = { { - .num = 0, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 0, - .dir = DMA_TO_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { - .num = 1, - .name = "LOOPBACK", - .num_elements = 32, - .event_ring = 0, - .dir = DMA_FROM_DEVICE, - .ee_mask = 0x4, - .pollcfg = 0, - .doorbell = MHI_DB_BRST_DISABLE, - .lpm_notify = false, - .offload_channel = false, - .doorbell_mode_switch = false, - .auto_queue = false, - }, - { .num = 20, .name = "IPCR", .num_elements = 64, @@ -196,7 +140,7 @@ const struct mhi_controller_config ath12k_mhi_config_wcn7850 = { .max_channels = 128, .timeout_ms = 2000, .use_bounce_buf = false, - .buf_len = 0, + .buf_len = 8192, .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850), .ch_cfg = ath12k_mhi_channels_wcn7850, .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850), @@ -385,7 +329,6 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) "failed to read board id\n"); } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) { dualmac = true; - ab->slo_capable = false; ath12k_dbg(ab, ATH12K_DBG_BOOT, "dualmac fw selected for board id: %x\n", board_id); } @@ -470,6 +413,8 @@ static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state) return "POWER_ON"; case ATH12K_MHI_POWER_OFF: return "POWER_OFF"; + case ATH12K_MHI_POWER_OFF_KEEP_DEV: + return "POWER_OFF_KEEP_DEV"; case ATH12K_MHI_FORCE_POWER_OFF: return "FORCE_POWER_OFF"; case ATH12K_MHI_SUSPEND: @@ -501,6 +446,7 @@ static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci, set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); break; case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_POWER_OFF_KEEP_DEV: case ATH12K_MHI_FORCE_POWER_OFF: clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state); clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state); @@ -544,6 +490,7 @@ static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci, return 0; break; case ATH12K_MHI_POWER_OFF: + case ATH12K_MHI_POWER_OFF_KEEP_DEV: case ATH12K_MHI_SUSPEND: if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) && !test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state)) @@ -594,12 +541,27 @@ static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci, ret = 0; break; case ATH12K_MHI_POWER_ON: - ret = mhi_async_power_up(ab_pci->mhi_ctrl); + /* In case of resume, QRTR's resume_early() is called + * right after ath12k' resume_early(). Since QRTR requires + * MHI mission mode state when preparing IPCR channels + * (see ee_mask of that channel), we need to use the 'sync' + * version here to make sure MHI is in that state when we + * return. Or QRTR might resume before that state comes, + * and as a result it fails. + * + * The 'sync' version works for non-resume (normal power on) + * case as well. + */ + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); break; case ATH12K_MHI_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, true); ret = 0; break; + case ATH12K_MHI_POWER_OFF_KEEP_DEV: + mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true); + ret = 0; + break; case ATH12K_MHI_FORCE_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, false); ret = 0; @@ -653,9 +615,17 @@ out: return ret; } -void ath12k_mhi_stop(struct ath12k_pci *ab_pci) +void ath12k_mhi_stop(struct ath12k_pci *ab_pci, bool is_suspend) { - ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF); + /* During suspend we need to use mhi_power_down_keep_dev() + * workaround, otherwise ath12k_core_resume() will timeout + * during resume. + */ + if (is_suspend) + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF_KEEP_DEV); + else + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF); + ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT); } diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h index ebc23640ce7a..9362ad1958c3 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.h +++ b/drivers/net/wireless/ath/ath12k/mhi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_MHI_H #define _ATH12K_MHI_H @@ -22,6 +22,7 @@ enum ath12k_mhi_state { ATH12K_MHI_DEINIT, ATH12K_MHI_POWER_ON, ATH12K_MHI_POWER_OFF, + ATH12K_MHI_POWER_OFF_KEEP_DEV, ATH12K_MHI_FORCE_POWER_OFF, ATH12K_MHI_SUSPEND, ATH12K_MHI_RESUME, @@ -34,7 +35,7 @@ extern const struct mhi_controller_config ath12k_mhi_config_qcn9274; extern const struct mhi_controller_config ath12k_mhi_config_wcn7850; int ath12k_mhi_start(struct ath12k_pci *ar_pci); -void ath12k_mhi_stop(struct ath12k_pci *ar_pci); +void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend); int ath12k_mhi_register(struct ath12k_pci *ar_pci); void ath12k_mhi_unregister(struct ath12k_pci *ar_pci); void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab); diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c index d334df720032..3a851ee15b2f 100644 --- a/drivers/net/wireless/ath/ath12k/p2p.c +++ b/drivers/net/wireless/ath/ath12k/p2p.c @@ -121,7 +121,7 @@ static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac, struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_p2p_noa_arg *arg = data; - if (arvif->vdev_id != arg->vdev_id) + if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id) return; ath12k_p2p_noa_update(arvif, arg->noa); @@ -132,6 +132,7 @@ void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id, { struct ath12k_p2p_noa_arg arg = { .vdev_id = vdev_id, + .ar = ar, .noa = noa, }; diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h index 5768139a7844..b2eec51a9900 100644 --- a/drivers/net/wireless/ath/ath12k/p2p.h +++ b/drivers/net/wireless/ath/ath12k/p2p.h @@ -12,6 +12,7 @@ struct ath12k_wmi_p2p_noa_info; struct ath12k_p2p_noa_arg { u32 vdev_id; + struct ath12k *ar; const struct ath12k_wmi_p2p_noa_info *noa; }; diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 14954bc05144..16af046c33d9 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -872,7 +872,7 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) goto release_region; } - ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); return 0; release_region: @@ -1271,7 +1271,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab) return 0; } -void ath12k_pci_power_down(struct ath12k_base *ab) +void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); @@ -1280,7 +1280,7 @@ void ath12k_pci_power_down(struct ath12k_base *ab) ath12k_pci_force_wake(ab_pci->ab); ath12k_pci_msi_disable(ab_pci); - ath12k_mhi_stop(ab_pci); + ath12k_mhi_stop(ab_pci, is_suspend); clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); ath12k_pci_sw_reset(ab_pci->ab, false); } @@ -1503,7 +1503,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev) ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { - ath12k_pci_power_down(ab); + ath12k_pci_power_down(ab, false); ath12k_qmi_deinit_service(ab); goto qmi_fail; } @@ -1531,7 +1531,7 @@ static void ath12k_pci_shutdown(struct pci_dev *pdev) struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); - ath12k_pci_power_down(ab); + ath12k_pci_power_down(ab, false); } static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) @@ -1558,9 +1558,36 @@ static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) return ret; } -static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops, - ath12k_pci_pm_suspend, - ath12k_pci_pm_resume); +static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_suspend_late(ab); + if (ret) + ath12k_warn(ab, "failed to late suspend core: %d\n", ret); + + return ret; +} + +static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) +{ + struct ath12k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath12k_core_resume_early(ab); + if (ret) + ath12k_warn(ab, "failed to early resume core: %d\n", ret); + + return ret; +} + +static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, + ath12k_pci_pm_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, + ath12k_pci_pm_resume_early) +}; static struct pci_driver ath12k_pci_driver = { .name = "ath12k_pci", diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h index ca93693ba4e9..6186a78038cf 100644 --- a/drivers/net/wireless/ath/ath12k/pci.h +++ b/drivers/net/wireless/ath/ath12k/pci.h @@ -143,5 +143,5 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab); void ath12k_pci_stop(struct ath12k_base *ab); int ath12k_pci_start(struct ath12k_base *ab); int ath12k_pci_power_up(struct ath12k_base *ab); -void ath12k_pci_power_down(struct ath12k_base *ab); +void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend); #endif /* ATH12K_PCI_H */ diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 92845ffff44a..5484112859a6 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -583,6 +583,24 @@ static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = { board_id), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, + single_chip_mlo_support), + }, + { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, @@ -2005,7 +2023,15 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, u8 hw_link_id = 0; int i; + if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) { + ath12k_dbg(ab, ATH12K_DBG_QMI, + "intra device MLO is disabled hence skip QMI MLO cap"); + return; + } + if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { + ab->mlo_capable_flags = 0; + ath12k_dbg(ab, ATH12K_DBG_QMI, "skip QMI MLO cap due to invalid num_radio %d\n", ab->qmi.num_radios); @@ -2124,9 +2150,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) struct qmi_txn txn; int ret; - if (!ab->slo_capable) - goto out; - ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp); if (ret < 0) @@ -2151,6 +2174,13 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) goto out; } + if (resp.single_chip_mlo_support_valid) { + if (resp.single_chip_mlo_support) + ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT; + else + ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT; + } + if (!resp.num_phy_valid) { ret = -ENODATA; goto out; @@ -2158,9 +2188,11 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) ab->qmi.num_radios = resp.num_phy; - ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n", + ath12k_dbg(ab, ATH12K_DBG_QMI, + "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n", resp.num_phy_valid, resp.num_phy, - resp.board_id_valid, resp.board_id); + resp.board_id_valid, resp.board_id, + resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support); return; @@ -2325,8 +2357,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) for (i = 0; i < ab->qmi.mem_seg_count; i++) { if (!ab->qmi.target_mem[i].v.addr) continue; + dma_free_coherent(ab->dev, - ab->qmi.target_mem[i].size, + ab->qmi.target_mem[i].prev_size, ab->qmi.target_mem[i].v.addr, ab->qmi.target_mem[i].paddr); ab->qmi.target_mem[i].v.addr = NULL; @@ -2352,6 +2385,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) case M3_DUMP_REGION_TYPE: case PAGEABLE_MEM_REGION_TYPE: case CALDB_MEM_REGION_TYPE: + /* Firmware reloads in recovery/resume. + * In such cases, no need to allocate memory for FW again. + */ + if (chunk->v.addr) { + if (chunk->prev_type == chunk->type && + chunk->prev_size == chunk->size) + goto this_chunk_done; + + /* cannot reuse the existing chunk */ + dma_free_coherent(ab->dev, chunk->prev_size, + chunk->v.addr, chunk->paddr); + chunk->v.addr = NULL; + } + chunk->v.addr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, @@ -2370,6 +2417,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) chunk->type, chunk->size); return -ENOMEM; } + + chunk->prev_type = chunk->type; + chunk->prev_size = chunk->size; +this_chunk_done: break; default: ath12k_warn(ab, "memory type %u not supported\n", @@ -2666,6 +2717,19 @@ out: return ret; } +static void ath12k_qmi_m3_free(struct ath12k_base *ab) +{ + struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; + + if (!m3_mem->vaddr) + return; + + dma_free_coherent(ab->dev, m3_mem->size, + m3_mem->vaddr, m3_mem->paddr); + m3_mem->vaddr = NULL; + m3_mem->size = 0; +} + static int ath12k_qmi_m3_load(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; @@ -2675,10 +2739,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) size_t m3_len; int ret; - if (m3_mem->vaddr) - /* m3 firmware buffer is already available in the DMA buffer */ - return 0; - if (ab->fw.m3_data && ab->fw.m3_len > 0) { /* firmware-N.bin had a m3 firmware file so use that */ m3_data = ab->fw.m3_data; @@ -2700,6 +2760,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) m3_len = fw->size; } + /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */ + if (m3_mem->vaddr) { + if (m3_mem->size >= m3_len) + goto skip_m3_alloc; + + /* Old buffer is too small, free and reallocate */ + ath12k_qmi_m3_free(ab); + } + m3_mem->vaddr = dma_alloc_coherent(ab->dev, m3_len, &m3_mem->paddr, GFP_KERNEL); @@ -2710,6 +2779,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab) goto out; } +skip_m3_alloc: memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; @@ -2721,19 +2791,6 @@ out: return ret; } -static void ath12k_qmi_m3_free(struct ath12k_base *ab) -{ - struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; - - if (!m3_mem->vaddr) - return; - - dma_free_coherent(ab->dev, m3_mem->size, - m3_mem->vaddr, m3_mem->paddr); - m3_mem->vaddr = NULL; - m3_mem->size = 0; -} - static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; @@ -3178,6 +3235,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = { .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath12k_qmi_msg_fw_ready_cb, }, + + /* end of list */ + {}, }; static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, @@ -3261,7 +3321,8 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work) case ATH12K_QMI_EVENT_FW_READY: clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { - ath12k_hal_dump_srng_stats(ab); + if (ab->is_reset) + ath12k_hal_dump_srng_stats(ab); queue_work(ab->workqueue, &ab->restart_work); break; } diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h index 6ee33c9851c6..0dfcbd8cb59b 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.h +++ b/drivers/net/wireless/ath/ath12k/qmi.h @@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg { struct target_mem_chunk { u32 size; u32 type; + u32 prev_size; + u32 prev_type; dma_addr_t paddr; union { void __iomem *ioaddr; @@ -265,6 +267,8 @@ struct qmi_wlanfw_phy_cap_resp_msg_v01 { u8 num_phy; u8 board_id_valid; u32 board_id; + u8 single_chip_mlo_support_valid; + u8 single_chip_mlo_support; }; #define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54 diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c index f308e9a6ed55..fbf38044938c 100644 --- a/drivers/net/wireless/ath/ath12k/reg.c +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -49,8 +49,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath12k_wmi_init_country_arg arg; struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar = ath12k_ah_to_ar(ah); - int ret; + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret, i; ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Regulatory Notification received for %s\n", wiphy_name(wiphy)); @@ -85,10 +85,16 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) memcpy(&arg.cc_info.alpha2, request->alpha2, 2); arg.cc_info.alpha2[2] = 0; - ret = ath12k_wmi_send_init_country_cmd(ar, &arg); - if (ret) - ath12k_warn(ar->ab, - "INIT Country code set to fw failed : %d\n", ret); + /* Allow fresh updates to wiphy regd */ + ah->regd_updated = false; + + /* Send the reg change request to all the radios */ + for_each_ar(ah, ar, i) { + ret = ath12k_wmi_send_init_country_cmd(ar, &arg); + if (ret) + ath12k_warn(ar->ab, + "INIT Country code set to fw failed : %d\n", ret); + } } int ath12k_reg_update_chan_list(struct ath12k *ar) @@ -202,10 +208,32 @@ int ath12k_regd_update(struct ath12k *ar, bool init) { struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct ieee80211_regdomain *regd, *regd_copy = NULL; + struct ath12k_hw *ah = ar->ah; int ret, regd_len, pdev_id; struct ath12k_base *ab; + int i; ab = ar->ab; + + /* If one of the radios within ah has already updated the regd for + * the wiphy, then avoid setting regd again + */ + if (ah->regd_updated) + return 0; + + /* firmware provides reg rules which are similar for 2 GHz and 5 GHz + * pdev but 6 GHz pdev has superset of all rules including rules for + * all bands, we prefer 6 GHz pdev's rules to be used for setup of + * the wiphy regd. + * If 6 GHz pdev was part of the ath12k_hw, wait for the 6 GHz pdev, + * else pick the first pdev which calls this function and use its + * regd to update global hw regd. + * The regd_updated flag set at the end will not allow any further + * updates. + */ + if (ah->use_6ghz_regd && !ar->supports_6ghz) + return 0; + pdev_id = ar->pdev_idx; spin_lock_bh(&ab->base_lock); @@ -258,10 +286,17 @@ int ath12k_regd_update(struct ath12k *ar, bool init) if (ret) goto err; - if (ar->state == ATH12K_STATE_ON) { - ret = ath12k_reg_update_chan_list(ar); - if (ret) - goto err; + ah->regd_updated = true; + /* Apply the new regd to all the radios, this is expected to be received only once + * since we check for ah->regd_updated and allow here only once. + */ + for_each_ar(ah, ar, i) { + if (ar->state == ATH12K_STATE_ON) { + ab = ar->ab; + ret = ath12k_reg_update_chan_list(ar); + if (ret) + goto err; + } } return 0; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 9d69a1769926..7a52d2082b79 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -858,20 +858,20 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, len = sizeof(*txrx_streams); txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, len); - txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; + txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); txrx_streams->supported_tx_streams = - args->chains[NL80211_BAND_2GHZ].tx; + cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); txrx_streams->supported_rx_streams = - args->chains[NL80211_BAND_2GHZ].rx; + cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); txrx_streams++; txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, len); - txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; + txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); txrx_streams->supported_tx_streams = - args->chains[NL80211_BAND_5GHZ].tx; + cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); txrx_streams->supported_rx_streams = - args->chains[NL80211_BAND_5GHZ].rx; + cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", @@ -1900,7 +1900,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, if (arg->bw_160) cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); if (arg->bw_320) - cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ); + cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); /* Typically if STBC is enabled for VHT it should be enabled * for HT as well @@ -2723,6 +2723,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, return ret; } +int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, + const u8 *buf, size_t buf_len) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_interface_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + u8 *ptr; + u32 len, len_aligned; + int ret; + + len_aligned = roundup(buf_len, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->param_type_id = cpu_to_le32(param_id); + cmd->length = cpu_to_le32(buf_len); + + ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); + ptr += TLV_HDR_SIZE; + memcpy(ptr, buf, buf_len); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_INTERFACE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", + param_id, ret); + dev_kfree_skb(skb); + } + + return 0; +} + +int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_sar_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret; + u8 *buf_ptr; + u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; + const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; + const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; + + sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); + sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, + sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + + TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); + cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, + sar_table_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); + + buf_ptr += sar_table_len_aligned; + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, + sar_dbs_backoff_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) +{ + struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; + struct wmi_pdev_set_bios_geo_table_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret; + u8 *buf_ptr; + u32 len, sar_geo_len_aligned; + const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; + + sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); + len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; + + skb = ath12k_wmi_alloc_skb(wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, + sizeof(*cmd)); + cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); + cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); + + buf_ptr = skb->data + sizeof(*cmd); + tlv = (struct wmi_tlv *)buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); + buf_ptr += TLV_HDR_SIZE; + memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); + + ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], + skb, + WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); + if (ret) { + ath12k_warn(ab, + "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 initiator, u32 reason) { @@ -3324,7 +3467,8 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); - wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config); + wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | + WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64); wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); @@ -4041,6 +4185,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) { kfree(ab->db_caps); ab->db_caps = NULL; + ab->num_db_cap = 0; } static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, @@ -5927,13 +6072,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) } } - /* TODO: Pending handle beacon implementation - *if (ieee80211_is_beacon(hdr->frame_control)) - * ath12k_mac_handle_beacon(ar, skb); - */ + if (ieee80211_is_beacon(hdr->frame_control)) + ath12k_mac_handle_beacon(ar, skb); ath12k_dbg(ab, ATH12K_DBG_MGMT, - "event mgmt rx skb %pK len %d ftype %02x stype %02x\n", + "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -6137,42 +6280,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_roam_event roam_ev = {}; struct ath12k *ar; + u32 vdev_id; + u8 roam_reason; if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { ath12k_warn(ab, "failed to extract roam event"); return; } + vdev_id = le32_to_cpu(roam_ev.vdev_id); + roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), + WMI_ROAM_REASON_MASK); + ath12k_dbg(ab, ATH12K_DBG_WMI, - "wmi roam event vdev %u reason 0x%08x rssi %d\n", - roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); + "wmi roam event vdev %u reason %d rssi %d\n", + vdev_id, roam_reason, roam_ev.rssi); rcu_read_lock(); - ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id)); + ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { - ath12k_warn(ab, "invalid vdev id in roam ev %d", - roam_ev.vdev_id); + ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); rcu_read_unlock(); return; } - if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX) + if (roam_reason >= WMI_ROAM_REASON_MAX) ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", - roam_ev.reason, roam_ev.vdev_id); + roam_reason, vdev_id); - switch (le32_to_cpu(roam_ev.reason)) { + switch (roam_reason) { case WMI_ROAM_REASON_BEACON_MISS: - /* TODO: Pending beacon miss and connection_loss_work - * implementation - * ath12k_mac_handle_beacon_miss(ar, vdev_id); - */ + ath12k_mac_handle_beacon_miss(ar, vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: case WMI_ROAM_REASON_SUITABLE_AP_FOUND: case WMI_ROAM_REASON_HO_FAILED: ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", - roam_ev.reason, roam_ev.vdev_id); + roam_reason, vdev_id); break; } diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 103462feb935..496866673aea 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -164,10 +164,6 @@ struct wmi_tlv { #define WLAN_SCAN_MAX_HINT_BSSID 10 #define MAX_RNR_BSS 5 -#define WLAN_SCAN_MAX_HINT_S_SSID 10 -#define WLAN_SCAN_MAX_HINT_BSSID 10 -#define MAX_RNR_BSS 5 - #define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1 #define WMI_BA_MODE_BUFFER_SIZE_256 3 @@ -357,6 +353,9 @@ enum wmi_tlv_cmd_id { WMI_PDEV_DMA_RING_CFG_REQ_CMDID, WMI_PDEV_HE_TB_ACTION_FRM_CMDID, WMI_PDEV_PKTLOG_FILTER_CMDID, + WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044, + WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045, + WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A, WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV), WMI_VDEV_DELETE_CMDID, WMI_VDEV_START_REQUEST_CMDID, @@ -1929,6 +1928,9 @@ enum wmi_tlv_tag { WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, WMI_TAG_EHT_RATE_SET = 0x3C4, + WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, + WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9, + WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB, WMI_TAG_MAX }; @@ -2199,8 +2201,11 @@ enum wmi_peer_param { WMI_PEER_SET_MAX_TX_RATE = 17, WMI_PEER_SET_MIN_TX_RATE = 18, WMI_PEER_SET_DEFAULT_ROUTING = 19, + WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP = 39, }; +#define WMI_PEER_PUNCTURE_BITMAP GENMASK(23, 8) + enum wmi_slot_time { WMI_VDEV_SLOT_TIME_LONG = 1, WMI_VDEV_SLOT_TIME_SHORT = 2, @@ -2404,6 +2409,7 @@ struct wmi_init_cmd { #define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4 #define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4) +#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5) struct ath12k_wmi_resource_config_params { __le32 tlv_header; @@ -2604,6 +2610,19 @@ struct ath12k_wmi_soc_hal_reg_caps_params { __le32 num_phy; } __packed; +enum wmi_channel_width { + WMI_CHAN_WIDTH_20 = 0, + WMI_CHAN_WIDTH_40 = 1, + WMI_CHAN_WIDTH_80 = 2, + WMI_CHAN_WIDTH_160 = 3, + WMI_CHAN_WIDTH_80P80 = 4, + WMI_CHAN_WIDTH_5 = 5, + WMI_CHAN_WIDTH_10 = 6, + WMI_CHAN_WIDTH_165 = 7, + WMI_CHAN_WIDTH_160P160 = 8, + WMI_CHAN_WIDTH_320 = 9, +}; + #define WMI_MAX_EHTCAP_MAC_SIZE 2 #define WMI_MAX_EHTCAP_PHY_SIZE 3 #define WMI_MAX_EHTCAP_RATE_SET 3 @@ -2728,9 +2747,9 @@ struct wmi_vdev_create_cmd { struct ath12k_wmi_vdev_txrx_streams_params { __le32 tlv_header; - u32 band; - u32 supported_tx_streams; - u32 supported_rx_streams; + __le32 band; + __le32 supported_tx_streams; + __le32 supported_rx_streams; } __packed; struct wmi_vdev_delete_cmd { @@ -3357,34 +3376,6 @@ struct wmi_bssid_arg { const u8 *bssid; }; -struct wmi_start_scan_arg { - u32 scan_id; - u32 scan_req_id; - u32 vdev_id; - u32 scan_priority; - u32 notify_scan_events; - u32 dwell_time_active; - u32 dwell_time_passive; - u32 min_rest_time; - u32 max_rest_time; - u32 repeat_probe_time; - u32 probe_spacing_time; - u32 idle_time; - u32 max_scan_time; - u32 probe_delay; - u32 scan_ctrl_flags; - - u32 ie_len; - u32 n_channels; - u32 n_ssids; - u32 n_bssids; - - u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; - struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; - struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; -}; - #define WMI_SCAN_STOP_ONE 0x00000000 #define WMI_SCAN_STOP_VAP_ALL 0x01000000 #define WMI_SCAN_STOP_ALL 0x04000000 @@ -4214,6 +4205,9 @@ struct wmi_peer_sta_kickout_event { struct ath12k_wmi_mac_addr_params peer_macaddr; } __packed; +#define WMI_ROAM_REASON_MASK GENMASK(3, 0) +#define WMI_ROAM_SUBNET_STATUS_MASK GENMASK(5, 4) + enum wmi_roam_reason { WMI_ROAM_REASON_BETTER_AP = 1, WMI_ROAM_REASON_BEACON_MISS = 2, @@ -4776,8 +4770,6 @@ struct wmi_probe_tmpl_cmd { __le32 buf_len; } __packed; -#define WMI_MAX_MEM_REQS 32 - #define MAX_RADIOS 3 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) @@ -4808,6 +4800,37 @@ struct ath12k_wmi_base { struct ath12k_wmi_target_cap_arg *targ_cap; }; +struct wmi_pdev_set_bios_interface_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 param_type_id; + __le32 length; +} __packed; + +enum wmi_bios_param_type { + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE = 0, + WMI_BIOS_PARAM_TAS_CONFIG_TYPE = 1, + WMI_BIOS_PARAM_TAS_DATA_TYPE = 2, + + /* bandedge control power */ + WMI_BIOS_PARAM_TYPE_BANDEDGE = 3, + + WMI_BIOS_PARAM_TYPE_MAX, +}; + +struct wmi_pdev_set_bios_sar_table_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 sar_len; + __le32 dbs_backoff_len; +} __packed; + +struct wmi_pdev_set_bios_geo_table_cmd { + __le32 tlv_header; + __le32 pdev_id; + __le32 geo_len; +} __packed; + #define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024) enum wmi_sys_cap_info_flags { @@ -4966,6 +4989,10 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, struct sk_buff *tmpl); int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, enum wmi_host_hw_mode_config_type mode); +int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, + const u8 *buf, size_t buf_len); +int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table); +int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table); static inline u32 ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param) diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c index 1963d3145481..fb5144e2d86c 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c +++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c @@ -364,8 +364,7 @@ static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags, packet->buf -= HTC_HDR_LENGTH; hdr = (struct htc_frame_hdr *)packet->buf; - /* Endianess? */ - put_unaligned((u16)packet->act_len, &hdr->payld_len); + put_unaligned_le16(packet->act_len, &hdr->payld_len); hdr->flags = flags; hdr->eid = packet->endpoint; hdr->ctrl[0] = ctrl0; diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c index 9b88d96bfe96..2f2edfe43761 100644 --- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c +++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c @@ -237,8 +237,7 @@ static int htc_issue_packets(struct htc_target *target, packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF; - /* Endianess? */ - put_unaligned((u16) payload_len, &htc_hdr->payld_len); + put_unaligned_le16(payload_len, &htc_hdr->payld_len); htc_hdr->flags = packet->info.tx.flags; htc_hdr->eid = (u8) packet->endpoint; htc_hdr->ctrl[0] = 0; diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 8a43c48ec1cf..9ab091044706 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1427,25 +1427,7 @@ static struct sdio_driver ath6kl_sdio_driver = { .remove = ath6kl_sdio_remove, .drv.pm = ATH6KL_SDIO_PM_OPS, }; - -static int __init ath6kl_sdio_init(void) -{ - int ret; - - ret = sdio_register_driver(&ath6kl_sdio_driver); - if (ret) - ath6kl_err("sdio driver registration failed: %d\n", ret); - - return ret; -} - -static void __exit ath6kl_sdio_exit(void) -{ - sdio_unregister_driver(&ath6kl_sdio_driver); -} - -module_init(ath6kl_sdio_init); -module_exit(ath6kl_sdio_exit); +module_sdio_driver(ath6kl_sdio_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices"); diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 668fc07b3073..29ca65a732a6 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -39,6 +39,7 @@ extern int ath9k_modparam_nohwcrypt; extern int ath9k_led_blink; extern bool is_ath9k_unloaded; extern int ath9k_use_chanctx; +extern int ath9k_use_msi; /*************************/ /* Descriptor Management */ diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index e8c2cc03be0c..27b860b0c769 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -76,7 +76,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0])); PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index fd5312c2a7e3..d85472ee4d85 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -79,8 +79,8 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_ar9287_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); - PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1])); PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 7685f8ab371e..84b31caf8ca6 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -135,9 +135,9 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_header *modal_hdr) { - PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0])); - PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1])); - PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2])); + PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0])); + PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1])); + PR_EEP("Chain2 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[2])); PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon)); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a2943aaecb20..01173aac3045 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc) if (power_mode != ATH9K_PM_AWAKE) { spin_lock(&common->cc_lock); ath_hw_cycle_counters_update(common); - memset(&common->cc_survey, 0, sizeof(common->cc_survey)); - memset(&common->cc_ani, 0, sizeof(common->cc_ani)); + memset(&common->cc, 0, sizeof(common->cc)); spin_unlock(&common->cc_lock); } diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index e655cd8bbf94..1ff53520f0a3 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -21,8 +21,6 @@ #include <linux/module.h> #include "ath9k.h" -extern int ath9k_use_msi; - static const struct pci_device_id ath_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index d519b676a109..35aa47a9db90 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1674,8 +1674,14 @@ static void ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val) { struct ieee80211_hdr *hdr; - u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA); - u16 mask_val = mask * val; + __le16 mask, mask_val; + + mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA); + + if (val) + mask_val = mask; + else + mask_val = 0; hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data; if ((hdr->frame_control & mask) != mask_val) { diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index e902ca80eba7..0226c31a6cae 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref) * carl9170_tx_fill_rateinfo() has filled the rate information * before we get to this point. */ - memset_after(&txinfo->status, 0, rates); + memset(&txinfo->pad, 0, sizeof(txinfo->pad)); + memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data)); if (atomic_read(&ar->tx_total_queued)) ar->tx_schedule = true; diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index c4edf8355941..a3e03580cd9f 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf, ar->usb_ep_cmd_is_bulk = true; } + /* Verify that all expected endpoints are present */ + if (ar->usb_ep_cmd_is_bulk) { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } else { + u8 bulk_ep_addr[] = { + AR9170_USB_EP_RX | USB_DIR_IN, + AR9170_USB_EP_TX | USB_DIR_OUT, + 0}; + u8 int_ep_addr[] = { + AR9170_USB_EP_IRQ | USB_DIR_IN, + AR9170_USB_EP_CMD | USB_DIR_OUT, + 0}; + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) + err = -ENODEV; + } + + if (err) { + carl9170_free(ar); + return err; + } + usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index bfbd3c7a70b3..e760d8002e09 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -756,9 +756,9 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, if (sta->deflink.vht_cap.vht_supported) { sta_priv->supported_rates.op_rate_mode = STA_11ac; sta_priv->supported_rates.vht_rx_mcs_map = - sta->deflink.vht_cap.vht_mcs.rx_mcs_map; + le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); sta_priv->supported_rates.vht_tx_mcs_map = - sta->deflink.vht_cap.vht_mcs.tx_mcs_map; + le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map); } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 0802ed728824..8826998797d6 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -318,7 +318,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) memset(&status, 0, sizeof(status)); bd = (struct wcn36xx_rx_bd *)skb->data; - buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32)); + buff_to_be(bd, sizeof(*bd)/sizeof(u32)); wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "BD <<< ", (char *)bd, sizeof(struct wcn36xx_rx_bd)); @@ -692,7 +692,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, /* MGMT and CTRL frames are handeld here*/ wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast); - buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32)); + buff_to_be(&bd, sizeof(bd)/sizeof(u32)); bd.tx_bd_sign = 0xbdbdbdbd; ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index ff4a8e5d7209..bccc27de848d 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -100,11 +100,14 @@ enum wcn36xx_ampdu_state { #define RF_IRIS_WCN3660 0x3660 #define RF_IRIS_WCN3680 0x3680 -static inline void buff_to_be(u32 *buf, size_t len) +static inline void buff_to_be(void *buf, size_t len) { + __be32 *to = buf; + u32 *from = buf; int i; + for (i = 0; i < len; i++) - buf[i] = cpu_to_be32(buf[i]); + to[i] = cpu_to_be32(from[i]); } struct nv_data { diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index dbe4b3478f03..e8f1d30a8d73 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -892,10 +892,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wireless_dev *wdev = request->wdev; struct wil6210_vif *vif = wdev_to_vif(wil, wdev); - struct { - struct wmi_start_scan_cmd cmd; - u16 chnl[4]; - } __packed cmd; + DEFINE_FLEX(struct wmi_start_scan_cmd, cmd, + channel_list, num_channels, 4); uint i, n; int rc; @@ -977,9 +975,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, vif->scan_request = request; mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO); - memset(&cmd, 0, sizeof(cmd)); - cmd.cmd.scan_type = WMI_ACTIVE_SCAN; - cmd.cmd.num_channels = 0; + cmd->scan_type = WMI_ACTIVE_SCAN; + cmd->num_channels = 0; n = min(request->n_channels, 4U); for (i = 0; i < n; i++) { int ch = request->channels[i]->hw_value; @@ -991,7 +988,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, continue; } /* 0-based channel indexes */ - cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1; + cmd->num_channels++; + cmd->channel_list[cmd->num_channels - 1].channel = ch - 1; wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch, request->channels[i]->center_freq); } @@ -1007,16 +1005,15 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, if (rc) goto out_restore; - if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) { - cmd.cmd.discovery_mode = 1; + if (wil->discovery_mode && cmd->scan_type == WMI_ACTIVE_SCAN) { + cmd->discovery_mode = 1; wil_dbg_misc(wil, "active scan with discovery_mode=1\n"); } if (vif->mid == 0) wil->radio_wdev = wdev; rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid, - &cmd, sizeof(cmd.cmd) + - cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); + cmd, struct_size(cmd, channel_list, cmd->num_channels)); out_restore: if (rc) { @@ -2735,7 +2732,7 @@ int wil_cfg80211_iface_combinations_from_fw( return 0; } - combo = conc->combos; + combo = (const struct wil_fw_concurrency_combo *)(conc + 1); n_combos = le16_to_cpu(conc->n_combos); for (i = 0; i < n_combos; i++) { total_limits += combo->n_limits; @@ -2751,7 +2748,7 @@ int wil_cfg80211_iface_combinations_from_fw( return -ENOMEM; iface_limit = (struct ieee80211_iface_limit *)(iface_combinations + n_combos); - combo = conc->combos; + combo = (const struct wil_fw_concurrency_combo *)(conc + 1); for (i = 0; i < n_combos; i++) { iface_combinations[i].max_interfaces = combo->max_interfaces; iface_combinations[i].num_different_channels = diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h index aa1620e0d24f..2079a90ec260 100644 --- a/drivers/net/wireless/ath/wil6210/fw.h +++ b/drivers/net/wireless/ath/wil6210/fw.h @@ -93,7 +93,6 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */ /* number of concurrency combinations that follow */ __le16 n_combos; /* keep last - combinations, variable size by n_combos */ - struct wil_fw_concurrency_combo combos[]; } __packed; /* brd file info encoded inside a comment record */ diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index fbc84c03406b..c3c0b289dcf3 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -212,8 +212,8 @@ fw_handle_concurrency(struct wil6210_priv *wil, const void *data, } n_combos = le16_to_cpu(rec->n_combos); - remain = size - offsetof(struct wil_fw_record_concurrency, combos); - combo = rec->combos; + remain = size - sizeof(struct wil_fw_record_concurrency); + combo = (const struct wil_fw_concurrency_combo *)(rec + 1); for (i = 0; i < n_combos; i++) { if (remain < sizeof(*combo)) goto out_short; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6fdb77d4c59e..8ff69dc72fb9 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -4015,27 +4015,22 @@ int wmi_set_cqm_rssi_config(struct wil6210_priv *wil, struct wil6210_vif *vif = ndev_to_vif(ndev); int rc; struct { - struct wmi_set_link_monitor_cmd cmd; - s8 rssi_thold; - } __packed cmd = { - .cmd = { - .rssi_hyst = rssi_hyst, - .rssi_thresholds_list_size = 1, - }, - .rssi_thold = rssi_thold, - }; - struct { struct wmi_cmd_hdr hdr; struct wmi_set_link_monitor_event evt; } __packed reply = { .evt = {.status = WMI_FW_STATUS_FAILURE}, }; + DEFINE_FLEX(struct wmi_set_link_monitor_cmd, cmd, + rssi_thresholds_list, rssi_thresholds_list_size, 1); + + cmd->rssi_hyst = rssi_hyst; + cmd->rssi_thresholds_list[0] = rssi_thold; if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX) return -EINVAL; - rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd, - sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID, + rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, cmd, + __struct_size(cmd), WMI_SET_LINK_MONITOR_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); if (rc) { wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 71bf2ae27a98..38f64524019e 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -474,7 +474,7 @@ struct wmi_start_scan_cmd { struct { u8 channel; u8 reserved; - } channel_list[]; + } channel_list[] __counted_by(num_channels); } __packed; #define WMI_MAX_PNO_SSID_NUM (16) @@ -3320,7 +3320,7 @@ struct wmi_set_link_monitor_cmd { u8 rssi_hyst; u8 reserved[12]; u8 rssi_thresholds_list_size; - s8 rssi_thresholds_list[]; + s8 rssi_thresholds_list[] __counted_by(rssi_thresholds_list_size); } __packed; /* wmi_link_monitor_event_type */ diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c index 0679d132968f..261b2b746a9c 100644 --- a/drivers/net/wireless/broadcom/b43/sysfs.c +++ b/drivers/net/wireless/broadcom/b43/sysfs.c @@ -53,19 +53,14 @@ static ssize_t b43_attr_interfmode_show(struct device *dev, switch (wldev->phy.g->interfmode) { case B43_INTERFMODE_NONE: - count = - snprintf(buf, PAGE_SIZE, - "0 (No Interference Mitigation)\n"); + count = sysfs_emit(buf, "0 (No Interference Mitigation)\n"); break; case B43_INTERFMODE_NONWLAN: - count = - snprintf(buf, PAGE_SIZE, - "1 (Non-WLAN Interference Mitigation)\n"); + count = sysfs_emit(buf, + "1 (Non-WLAN Interference Mitigation)\n"); break; case B43_INTERFMODE_MANUALWLAN: - count = - snprintf(buf, PAGE_SIZE, - "2 (WLAN Interference Mitigation)\n"); + count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n"); break; default: B43_WARN_ON(1); diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c index eec087ca30e6..d988fe541bf7 100644 --- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c @@ -75,16 +75,14 @@ static ssize_t b43legacy_attr_interfmode_show(struct device *dev, switch (wldev->phy.interfmode) { case B43legacy_INTERFMODE_NONE: - count = snprintf(buf, PAGE_SIZE, "0 (No Interference" - " Mitigation)\n"); + count = sysfs_emit(buf, "0 (No Interference Mitigation)\n"); break; case B43legacy_INTERFMODE_NONWLAN: - count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference" - " Mitigation)\n"); + count = sysfs_emit(buf, + "1 (Non-WLAN Interference Mitigation)\n"); break; case B43legacy_INTERFMODE_MANUALWLAN: - count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference" - " Mitigation)\n"); + count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n"); break; default: B43legacy_WARN_ON(1); @@ -155,11 +153,9 @@ static ssize_t b43legacy_attr_preamble_show(struct device *dev, mutex_lock(&wldev->wl->mutex); if (wldev->short_preamble) - count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble" - " enabled)\n"); + count = sysfs_emit(buf, "1 (Short Preamble enabled)\n"); else - count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble" - " disabled)\n"); + count = sysfs_emit(buf, "0 (Short Preamble disabled)\n"); mutex_unlock(&wldev->wl->mutex); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b99aa66dc5a9..5fe0e671ecb3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4549,7 +4549,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - bphy_err(drvr, "ivalid OUI\n"); + bphy_err(drvr, "invalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; @@ -4588,7 +4588,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, for (i = 0; i < count; i++) { if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - bphy_err(drvr, "ivalid OUI\n"); + bphy_err(drvr, "invalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; @@ -4622,7 +4622,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, for (i = 0; i < count; i++) { if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - bphy_err(drvr, "ivalid OUI\n"); + bphy_err(drvr, "invalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index d7fb88bb6ae1..06698a714b52 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer { #define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de #define BRCMF_RANDOM_SEED_LENGTH 0x100 +static noinline_for_stack void +brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address) +{ + u8 randbuf[BRCMF_RANDOM_SEED_LENGTH]; + + get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH); + memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH); +} + static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, u32 nvram_len) @@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, .length = cpu_to_le32(rand_len), .magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC), }; - void *randbuf; /* Some Apple chips/firmwares expect a buffer of random * data to be present before NVRAM @@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, sizeof(footer)); address -= rand_len; - randbuf = kzalloc(rand_len, GFP_KERNEL); - get_random_bytes(randbuf, rand_len); - memcpy_toio(devinfo->tcm + address, randbuf, rand_len); - kfree(randbuf); + brcmf_pcie_provide_random_bytes(devinfo, address); } } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 0ccf735316c2..9a105e6debe1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -117,13 +117,6 @@ struct bootrom_id_le { __le32 boardrev; /* Board revision */ }; -struct brcmf_usb_image { - struct list_head list; - s8 *fwname; - u8 *image; - int image_len; -}; - struct brcmf_usbdev_info { struct brcmf_usbdev bus_pub; /* MUST BE FIRST */ spinlock_t qlock; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index e859075db716..c3376f887114 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c @@ -143,12 +143,6 @@ struct ampdu_info { struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO]; }; -/* used for flushing ampdu packets */ -struct cb_del_ampdu_pars { - struct ieee80211_sta *sta; - u16 tid; -}; - static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur) { u32 rate, mcs; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index eca1457caa0c..bc98b87cf2a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 89 +#define IWL_BZ_UCODE_API_MAX 90 /* Lowest firmware API version supported */ #define IWL_BZ_UCODE_API_MIN 80 @@ -149,6 +149,8 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { }; const char iwl_bz_name[] = "Intel(R) TBD Bz device"; +const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; +const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz"; const struct iwl_cfg iwl_cfg_bz = { diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index dbbcb2d0968c..9b79279fd76c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,7 +10,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 89 +#define IWL_SC_UCODE_API_MAX 90 /* Lowest firmware API version supported */ #define IWL_SC_UCODE_API_MIN 82 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 4caf2e25a297..fa339791223b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2023 Intel Corporation + * Copyright (C) 2019-2024 Intel Corporation */ #include <linux/uuid.h> #include "iwl-drv.h" @@ -960,3 +960,37 @@ out_free: kfree(data); } IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status); + +int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + union acpi_object *wifi_pkg, *data; + int ret = -ENOENT; + int tbl_rev; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WBEM_METHOD); + if (IS_ERR(data)) + return ret; + + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WBEM_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; + + if (tbl_rev != IWL_ACPI_WBEM_REVISION) { + IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI WBEM revision:%d\n", + tbl_rev); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + goto out_free; + + *value = wifi_pkg->package.elements[1].integer.value & + IWL_ACPI_WBEM_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from ACPI\n"); + ret = 0; +out_free: + kfree(data); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 1d32b82f73db..bb88398a6987 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -27,6 +27,7 @@ #define ACPI_WTAS_METHOD "WTAS" #define ACPI_WPFC_METHOD "WPFC" #define ACPI_GLAI_METHOD "GLAI" +#define ACPI_WBEM_METHOD "WBEM" #define ACPI_WIFI_DOMAIN (0x07) @@ -67,6 +68,12 @@ #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 + +/* + * One element for domain type, + * and one for enablement of Wi-Fi 320MHz per MCC + */ +#define ACPI_WBEM_WIFI_DATA_SIZE 2 /* * One element for domain type, * and one for the status @@ -94,6 +101,9 @@ #define ACPI_DSM_REV 0 +#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1)) +#define IWL_ACPI_WBEM_REVISION 0 + #ifdef CONFIG_ACPI struct iwl_fw_runtime; @@ -142,6 +152,7 @@ void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt); int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value); +int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, @@ -205,6 +216,11 @@ static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt, { return -ENOENT; } + +static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + return -ENOENT; +} #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index d2a74beed3a1..bbaaf3c73115 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -843,6 +843,52 @@ struct iwl_wowlan_info_notif_v2 { u8 reserved2[2]; } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ +/* MAX MLO keys of non-active links that can arrive in the notification */ +#define WOWLAN_MAX_MLO_KEYS 18 + +/** + * enum iwl_wowlan_mlo_gtk_type - GTK types + * @WOWLAN_MLO_GTK_KEY_TYPE_GTK: GTK + * @WOWLAN_MLO_GTK_KEY_TYPE_IGTK: IGTK + * @WOWLAN_MLO_GTK_KEY_TYPE_BIGTK: BIGTK + * @WOWLAN_MLO_GTK_KEY_NUM_TYPES: number of key types + */ +enum iwl_wowlan_mlo_gtk_type { + WOWLAN_MLO_GTK_KEY_TYPE_GTK, + WOWLAN_MLO_GTK_KEY_TYPE_IGTK, + WOWLAN_MLO_GTK_KEY_TYPE_BIGTK, + WOWLAN_MLO_GTK_KEY_NUM_TYPES +}; /* WOWLAN_MLO_GTK_KEY_TYPE_API_E_VER_1 */ + +/** + * enum iwl_wowlan_mlo_gtk_flag - MLO GTK flags + * @WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK: 0 for len 16, 1 for len 32 + * @WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK: key id (ranges from 0 to 7) + * @WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK: spec link id of the key + * @WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK: &enum iwl_wowlan_mlo_gtk_type + * @WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK: is this the last given key per + * key-type / link-id - the currently used key + */ +enum iwl_wowlan_mlo_gtk_flag { + WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 0x0001, + WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 0x000E, + WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 0x00F0, + WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 0x0300, + WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 0x0400 +}; /* WOWLAN_MLO_GTK_FLAG_API_E_VER_1 */ + +/** + * struct iwl_wowlan_mlo_gtk - MLO GTK info + * @key: key material + * @flags: &enum iwl_wowlan_mlo_gtk_flag + * @pn: packet number + */ +struct iwl_wowlan_mlo_gtk { + u8 key[WOWLAN_KEY_MAX_SIZE]; + __le16 flags; + u8 pn[6]; +} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */ + /** * struct iwl_wowlan_info_notif - WoWLAN information notification * @gtk: GTK data @@ -859,7 +905,10 @@ struct iwl_wowlan_info_notif_v2 { * @tid_tear_down: bit mask of tids whose BA sessions were closed * in suspend state * @station_id: station id + * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs + * following this notif, or reserved in version < 4 * @reserved2: reserved + * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4 */ struct iwl_wowlan_info_notif { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; @@ -875,8 +924,10 @@ struct iwl_wowlan_info_notif { __le32 received_beacons; u8 tid_tear_down; u8 station_id; - u8 reserved2[2]; -} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */ + u8 num_mlo_link_keys; + u8 reserved2; + struct iwl_wowlan_mlo_gtk mlo_gtks[]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */ /** * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 0f7903c5a4df..f272b6a4e72e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* + * Copyright (C) 2024 Intel Corporation * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH @@ -90,6 +91,12 @@ enum iwl_data_path_subcmd_ids { SEC_KEY_CMD = 0x18, /** + * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode, + * uses &struct iwl_mvm_esr_mode_notif + */ + ESR_MODE_NOTIF = 0xF3, + + /** * @MONITOR_NOTIF: Datapath monitoring notification, using * &struct iwl_datapath_monitor_notif */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index c6d1f5644638..754c5d655ad0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -642,4 +642,25 @@ struct iwl_mvm_sta_disable_tx_cmd { __le32 disable; } __packed; /* STA_DISABLE_TX_API_S_VER_1 */ +/** + * enum iwl_mvm_fw_esr_recommendation - FW recommendation code + * @ESR_RECOMMEND_LEAVE: recommendation to leave esr + * @ESR_FORCE_LEAVE: force exiting esr + * @ESR_RECOMMEND_ENTER: recommendation to enter esr + */ +enum iwl_mvm_fw_esr_recommendation { + ESR_RECOMMEND_LEAVE, + ESR_FORCE_LEAVE, + ESR_RECOMMEND_ENTER, +}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */ + +/** + * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode + * + * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation + */ +struct iwl_mvm_esr_mode_notif { + __le32 action; +} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 58034dfa7e70..a08497a04733 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -7,7 +7,6 @@ #ifndef __iwl_fw_api_nvm_reg_h__ #define __iwl_fw_api_nvm_reg_h__ -#include "fw/regulatory.h" /** * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands */ @@ -23,8 +22,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids { * &struct iwl_lari_config_change_cmd_v3, * &struct iwl_lari_config_change_cmd_v4, * &struct iwl_lari_config_change_cmd_v5, - * &struct iwl_lari_config_change_cmd_v6 or - * &struct iwl_lari_config_change_cmd_v7 + * &struct iwl_lari_config_change_cmd_v6, + * &struct iwl_lari_config_change_cmd_v7 or + * &struct iwl_lari_config_change_cmd */ LARI_CONFIG_CHANGE = 0x1, @@ -46,9 +46,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids { SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, /** - * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd + * @MCC_ALLOWED_AP_TYPE_CMD: &struct iwl_mcc_allowed_ap_type_cmd */ - UATS_TABLE_CMD = 0x5, + MCC_ALLOWED_AP_TYPE_CMD = 0x5, /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy @@ -439,6 +439,7 @@ enum iwl_mcc_source { MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; +#define IWL_WTAS_BLACK_LIST_MAX 16 /** * struct iwl_tas_config_cmd_common - configures the TAS. * This is also the v2 structure. @@ -609,7 +610,7 @@ struct iwl_lari_config_change_cmd_v6 { /** * struct iwl_lari_config_change_cmd_v7 - change LARI configuration - * This structure is used also for lari cmd version 8. + * This structure is used also for lari cmd version 8 and 9. * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. @@ -619,6 +620,8 @@ struct iwl_lari_config_change_cmd_v6 { * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. + * For LARI cmd version 4 to 8 - bits 0:3 are supported. + * For LARI cmd version 9 - bits 0:5 are supported. * @chan_state_active_bitmap: Bitmap to enable different bands per country * or region. * Each bit represents a country or region, and a band to activate @@ -642,6 +645,46 @@ struct iwl_lari_config_change_cmd_v7 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_7 */ /* LARI_CHANGE_CONF_CMD_S_VER_8 */ +/* LARI_CHANGE_CONF_CMD_S_VER_9 */ + +/** + * struct iwl_lari_config_change_cmd - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * For LARI cmd version 10 - bits 0:5 are supported. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 10 - bits 0:4 are supported. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC. + * bit0: enable 320Mhz in Japan. + * bit1: enable 320Mhz in South Korea. + * bit 2 - 31: reserved. + */ +struct iwl_lari_config_change_cmd { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; + __le32 oem_320mhz_allow_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_10 */ /* Activate UNII-1 (5.2GHz) for World Wide */ #define ACTIVATE_5G2_IN_WW_MASK BIT(4) @@ -658,13 +701,13 @@ struct iwl_pnvm_init_complete_ntfy { #define UATS_TABLE_COL_SIZE 13 /** - * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD + * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed * @reserved: reserved */ -struct iwl_uats_table_cmd { +struct iwl_mcc_allowed_ap_type_cmd { u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE]; __le16 reserved; -} __packed; /* UATS_TABLE_CMD_S_VER_1 */ +} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 2d2b9c8c36ea..2ed7acc09e5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -3,7 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids { /** * @WOWLAN_INFO_NOTIFICATION: Notification in * &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2, - * or iwl_wowlan_info_notif + * or &struct iwl_wowlan_info_notif */ WOWLAN_INFO_NOTIFICATION = 0xFD, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 5a3f30e5e06d..92e4b62c119f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2022 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -43,6 +43,11 @@ enum iwl_phy_ops_subcmd_ids { PER_PLATFORM_ANT_GAIN_CMD = 0x07, /** + * @AP_TX_POWER_CONSTRAINTS_CMD: &struct iwl_txpower_constraints_cmd + */ + AP_TX_POWER_CONSTRAINTS_CMD = 0x0C, + + /** * @CT_KILL_NOTIFICATION: &struct ct_kill_notif */ CT_KILL_NOTIFICATION = 0xFE, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 0bf38243f88a..532d5cfa9162 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 { __le32 timer_period; __le32 flags; } __packed; /* TX_REDUCED_POWER_API_S_VER_7 */ + +/** + * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8 + * @per_chain: per chain restrictions + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @per_chain_restriction_changed: is per_chain_restriction has changed + * from last command. used if set_mode is + * IWL_TX_POWER_MODE_SET_SAR_TIMER. + * note: if not changed, the command is used for keep alive only. + * @reserved: reserved (padding) + * @timer_period: timer in milliseconds. if expires FW will change to default + * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER + * @flags: reduce power flags. + * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA. + * Not in use. + */ +struct iwl_dev_tx_power_cmd_v8 { + __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; + u8 enable_ack_reduction; + u8 per_chain_restriction_changed; + u8 reserved[2]; + __le32 timer_period; + __le32 flags; + __le32 tpc_vlp_backoff_level; +} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */ + /** * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) * @common: common part of the command @@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 { * @v4: version 4 part of the command * @v5: version 5 part of the command * @v6: version 6 part of the command + * @v7: version 7 part of the command + * @v8: version 8 part of the command */ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_common common; @@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_cmd_v5 v5; struct iwl_dev_tx_power_cmd_v6 v6; struct iwl_dev_tx_power_cmd_v7 v7; + struct iwl_dev_tx_power_cmd_v8 v8; }; }; @@ -537,7 +567,7 @@ enum iwl_ppag_flags { * union iwl_ppag_table_cmd - union for all versions of PPAG command * @v1: version 1 * @v2: version 2 - * version 3, 4 and 5 are the same structure as v2, + * version 3, 4, 5 and 6 are the same structure as v2, * but has a different format of the flags bitmap * @flags: values from &enum iwl_ppag_flags * @gain: table of antenna gain values per chain and sub-band @@ -702,4 +732,44 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) + +#define DEFAULT_TPE_TX_POWER 0x7F + +/* + * Bandwidth: 20/40/80/(160/80+80)/320 + */ +#define IWL_MAX_TX_EIRP_PWR_MAX_SIZE 5 +#define IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE 16 + +enum iwl_6ghz_ap_type { + IWL_6GHZ_AP_TYPE_LPI, + IWL_6GHZ_AP_TYPE_SP, + IWL_6GHZ_AP_TYPE_VLP, +}; /* PHY_AP_TYPE_API_E_VER_1 */ + +/** + * struct iwl_txpower_constraints_cmd + * AP_TX_POWER_CONSTRAINTS_CMD + * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels + * @link_id: linkId + * @ap_type: see &enum iwl_ap_type + * @eirp_pwr: 8-bit 2s complement signed integer in the range + * -64 dBm to 63 dBm with a 0.5 dB step + * default &DEFAULT_TPE_TX_POWER (no maximum limit) + * @psd_pwr: 8-bit 2s complement signed integer in the range + * -63.5 to +63 dBm/MHz with a 0.5 step + * value - 128 indicates that the corresponding 20 + * MHz channel cannot be used for transmission. + * value +127 indicates that no maximum PSD limit + * is specified for the corresponding 20 MHz channel + * default &DEFAULT_TPE_TX_POWER (no maximum limit) + * @reserved: reserved (padding) + */ +struct iwl_txpower_constraints_cmd { + __le16 link_id; + __le16 ap_type; + __s8 eirp_pwr[IWL_MAX_TX_EIRP_PWR_MAX_SIZE]; + __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE]; + u8 reserved[3]; +} __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 93078f8cc08c..6684506f4fc4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -14,6 +14,10 @@ */ enum iwl_scan_subcmd_ids { /** + * @CHANNEL_SURVEY_NOTIF: &struct iwl_umac_scan_channel_survey_notif + */ + CHANNEL_SURVEY_NOTIF = 0xFB, + /** * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info */ OFFLOAD_MATCH_INFO_NOTIF = 0xFC, @@ -62,6 +66,8 @@ struct iwl_ssid_ie { #define IWL_FAST_SCHED_SCAN_ITERATIONS 3 #define IWL_MAX_SCHED_SCAN_PLANS 2 +#define IWL_MAX_NUM_NOISE_RESULTS 22 + enum scan_framework_client { SCAN_CLIENT_SCHED_SCAN = BIT(0), SCAN_CLIENT_NETDETECT = BIT(1), @@ -642,10 +648,13 @@ enum iwl_umac_scan_general_flags { * notification per channel or not. * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel * reorder optimization or not. + * @IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS: Enable channel statistics + * collection when #IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE is set. */ enum iwl_umac_scan_general_flags2 { IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = BIT(3), }; /** @@ -1258,4 +1267,26 @@ struct iwl_umac_scan_iter_complete_notif { struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ +/** + * struct iwl_umac_scan_channel_survey_notif - data for survey + * @channel: the channel scanned + * @band: band of channel + * @noise: noise floor measurements in negative dBm, invalid 0xff + * @reserved: for future use and alignment + * @active_time: time in ms the radio was turned on (on the channel) + * @busy_time: time in ms the channel was sensed busy, 0 for a clean channel + * @tx_time: time the radio spent transmitting data + * @rx_time: time the radio spent receiving data + */ +struct iwl_umac_scan_channel_survey_notif { + __le32 channel; + __le32 band; + u8 noise[IWL_MAX_NUM_NOISE_RESULTS]; + u8 reserved[2]; + __le32 active_time; + __le32 busy_time; + __le32 tx_time; + __le32 rx_time; +} __packed; /* SCAN_CHANNEL_SURVEY_NTF_API_S_VER_1 */ + #endif /* __iwl_fw_api_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index d9e4c75403b8..bbd176d88820 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -793,7 +793,8 @@ enum iwl_mac_beacon_flags { * @reserved: reserved * @link_id: the firmware id of the link that will use this beacon * @tim_idx: the offset of the tim IE in the beacon - * @tim_size: the length of the tim IE + * @tim_size: the length of the tim IE (version < 14) + * @btwt_offset: offset to the broadcast TWT IE if present (version >= 14) * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame @@ -805,14 +806,18 @@ struct iwl_mac_beacon_cmd { __le32 reserved; __le32 link_id; __le32 tim_idx; - __le32 tim_size; + union { + __le32 tim_size; + __le32 btwt_offset; + }; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10, * BEACON_TEMPLATE_CMD_API_S_VER_11, * BEACON_TEMPLATE_CMD_API_S_VER_12, - * BEACON_TEMPLATE_CMD_API_S_VER_13 + * BEACON_TEMPLATE_CMD_API_S_VER_13, + * BEACON_TEMPLATE_CMD_API_S_VER_14 */ struct iwl_beacon_notif { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index c3bdf433d8f7..945ffc083d25 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1026,17 +1026,12 @@ static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; - u32 prph_val; int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = size; - for (i = 0; i < le32_to_cpu(size); i += 4) { - prph_val = iwl_read_prph(fwrt->trans, addr + i); - if (iwl_trans_is_hw_error_value(prph_val)) - return -EBUSY; - *val++ = cpu_to_le32(prph_val); - } + for (i = 0; i < le32_to_cpu(size); i += 4) + *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } @@ -3084,6 +3079,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; + /* also checks 'desc' for pre-ini mode, since that shadows in union */ if (!dump_data->trig) { IWL_ERR(fwrt, "dump trigger data is not set\n"); goto out; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index f69d29e531c8..ae05227b6153 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -395,6 +395,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload * protected) A-MSDU. * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement. + * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise + * passive channels * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -494,6 +496,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117, IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121, + IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122, NUM_IWL_UCODE_TLV_CAPA /* * This construction make both sparse (which cannot increment the previous diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index 36d506463e0e..b9bb3636e88f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -38,6 +38,7 @@ IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data); IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64); IWL_BIOS_TABLE_LOADER_DATA(mcc, char); IWL_BIOS_TABLE_LOADER_DATA(eckv, u32); +IWL_BIOS_TABLE_LOADER_DATA(wbem, u32); static const struct dmi_system_id dmi_ppag_approved_list[] = { @@ -347,7 +348,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, "PPAG table rev is %d, send truncated table\n", fwrt->ppag_ver); } - } else if (cmd_ver >= 2 && cmd_ver <= 5) { + } else if (cmd_ver >= 2 && cmd_ver <= 6) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = cmd->v2.gain[0]; *cmd_size = sizeof(cmd->v2); @@ -443,7 +444,7 @@ int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt, return enabled; } -__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u32 val; @@ -490,7 +491,127 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) return config_bitmap; } -IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap); + +static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) +{ + size_t cmd_size; + + switch (cmd_ver) { + case 10: + cmd_size = sizeof(struct iwl_lari_config_change_cmd); + break; + case 9: + case 8: + case 7: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); + break; + case 6: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); + break; + case 5: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); + break; + case 4: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); + break; + case 3: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); + break; + case 2: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); + break; + default: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); + break; + } + return cmd_size; +} + +int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, + struct iwl_lari_config_change_cmd *cmd, + size_t *cmd_size) +{ + int ret; + u32 value; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), 1); + + memset(cmd, 0, sizeof(*cmd)); + *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver); + + cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); + if (!ret) + cmd->oem_11ax_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); + if (!ret) { + if (cmd_ver < 9) + value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8; + else + value &= DSM_UNII4_ALLOW_BITMAP; + + cmd->oem_unii4_allow_bitmap = cpu_to_le32(value); + } + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); + if (!ret) { + if (cmd_ver < 8) + value &= ~ACTIVATE_5G2_IN_WW_MASK; + cmd->chan_state_active_bitmap = cpu_to_le32(value); + } + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value); + if (!ret) + cmd->oem_uhb_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value); + if (!ret) + cmd->force_disable_channels_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, + &value); + if (!ret) + cmd->edt_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_wbem(fwrt, &value); + if (!ret) + cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value); + + if (cmd->config_bitmap || + cmd->oem_uhb_allow_bitmap || + cmd->oem_11ax_allow_bitmap || + cmd->oem_unii4_allow_bitmap || + cmd->chan_state_active_bitmap || + cmd->force_disable_channels_bitmap || + cmd->edt_bitmap || + cmd->oem_320mhz_allow_bitmap) { + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->config_bitmap), + le32_to_cpu(cmd->oem_11ax_allow_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", + le32_to_cpu(cmd->oem_unii4_allow_bitmap), + le32_to_cpu(cmd->chan_state_active_bitmap), + cmd_ver); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", + le32_to_cpu(cmd->oem_uhb_allow_bitmap), + le32_to_cpu(cmd->force_disable_channels_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->edt_bitmap), + le32_to_cpu(cmd->oem_320mhz_allow_bitmap)); + } else { + return 1; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fill_lari_config); int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index 28e774766847..633c9ad9af84 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2023 Intel Corporation + * Copyright (C) 2023-2024 Intel Corporation */ #ifndef __fw_regulatory_h__ @@ -11,6 +11,7 @@ #include "fw/api/power.h" #include "fw/api/phy.h" #include "fw/api/config.h" +#include "fw/api/nvm-reg.h" #include "fw/img.h" #include "iwl-trans.h" @@ -39,7 +40,6 @@ #define IWL_PPAG_ETSI_CHINA_MASK 3 #define IWL_PPAG_REV3_MASK 0x7FF -#define IWL_WTAS_BLACK_LIST_MAX 16 #define IWL_WTAS_ENABLED_MSK 0x1 #define IWL_WTAS_OVERRIDE_IEC_MSK 0x2 #define IWL_WTAS_ENABLE_IEC_MSK 0x4 @@ -132,6 +132,23 @@ enum iwl_dsm_values_indonesia { DSM_VALUE_INDONESIA_MAX }; +enum iwl_dsm_unii4_bitmap { + DSM_VALUE_UNII4_US_OVERRIDE_MSK = BIT(0), + DSM_VALUE_UNII4_US_EN_MSK = BIT(1), + DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = BIT(2), + DSM_VALUE_UNII4_ETSI_EN_MSK = BIT(3), + DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = BIT(4), + DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5), +}; + +#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \ + DSM_VALUE_UNII4_US_EN_MSK | \ + DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \ + DSM_VALUE_UNII4_ETSI_EN_MSK) +#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \ + DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \ + DSM_VALUE_UNII4_CANADA_EN_MSK) + enum iwl_dsm_values_rfi { DSM_VALUE_RFI_DLVR_DISABLE = BIT(0), DSM_VALUE_RFI_DDR_DISABLE = BIT(1), @@ -184,8 +201,11 @@ int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt, int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk); +int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); -__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); +int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, + struct iwl_lari_config_change_cmd *cmd, + size_t *cmd_size); int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index b2bc4fd37abf..9122f9a1260a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -46,6 +46,10 @@ struct iwl_fwrt_shared_mem_cfg { * struct iwl_fwrt_dump_data - dump data * @trig: trigger the worker was scheduled upon * @fw_pkt: packet received from FW + * + * Note that the decision which part of the union is used + * is based on iwl_trans_dbg_ini_valid(): the 'trig' part + * is used if it is %true, the 'desc' part otherwise. */ struct iwl_fwrt_dump_data { union { @@ -54,6 +58,7 @@ struct iwl_fwrt_dump_data { struct iwl_rx_packet *fw_pkt; }; struct { + /* must be first to be same as 'trig' */ const struct iwl_fw_dump_desc *desc; bool monitor_only; }; @@ -177,7 +182,7 @@ struct iwl_fw_runtime { u8 ppag_ver; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; - struct iwl_uats_table_cmd uats_table; + struct iwl_mcc_allowed_ap_type_cmd uats_table; u8 uefi_tables_lock_status; bool uats_enabled; }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index e81fc0129b9d..fb982d4fe851 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -674,6 +674,29 @@ out: return ret; } +int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + struct uefi_cnv_wlan_wbem_data *data; + int ret = 0; + + data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WBEM_NAME, + "WBEM", sizeof(*data), NULL); + if (IS_ERR(data)) + return -EINVAL; + + if (data->revision != IWL_UEFI_WBEM_REVISION) { + ret = -EINVAL; + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WBEM revision:%d\n", + data->revision); + goto out; + } + *value = data->wbem_320mhz_per_mcc & IWL_UEFI_WBEM_REV0_MASK; + IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from UEFI\n"); +out: + kfree(data); + return ret; +} + int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 303cc299d1bc..1f8884ca8997 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021-2023 Intel Corporation + * Copyright(c) 2021-2024 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ @@ -21,6 +21,7 @@ #define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD" #define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV" #define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg" +#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" #define IWL_SGOM_MAP_SIZE 339 @@ -35,6 +36,7 @@ #define IWL_UEFI_SPLC_REVISION 0 #define IWL_UEFI_WRDD_REVISION 0 #define IWL_UEFI_ECKV_REVISION 0 +#define IWL_UEFI_WBEM_REVISION 0 #define IWL_UEFI_DSM_REVISION 4 struct pnvm_sku_package { @@ -178,6 +180,20 @@ struct uefi_cnv_var_general_cfg { u32 functions[UEFI_MAX_DSM_FUNCS]; } __packed; +#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1)) +/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined + * in UEFI + * @revision: the revision of the table + * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC + * bit 0 - if set, 320MHz is enabled for Japan + * bit 1 - if set, 320MHz is enabled for South Korea + * bit 2- 31, Reserved + */ +struct uefi_cnv_wlan_wbem_data { + u8 revision; + u32 wbem_320mhz_per_mcc; +} __packed; + /* * This is known to be broken on v4.19 and to work on v5.4. Until we * figure out why this is the case and how to make it work, simply @@ -202,6 +218,7 @@ int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit); int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk); +int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value); void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); @@ -281,6 +298,11 @@ static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) return -ENOENT; } +static inline int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value) +{ + return -ENOENT; +} + static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, u32 *value) { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 6aa4f7f9c708..732889f96ca2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> +#include <linux/mod_devicetable.h> #include "iwl-csr.h" #include "iwl-drv.h" @@ -421,6 +422,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_SC 0x48 #define IWL_CFG_MAC_TYPE_SC2 0x49 #define IWL_CFG_MAC_TYPE_SC2F 0x4A +#define IWL_CFG_MAC_TYPE_BZ_W 0x4B #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -429,8 +431,6 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_HR2 0x10A #define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_TYPE_GF 0x10D -#define IWL_CFG_RF_TYPE_MR 0x110 -#define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 #define IWL_CFG_RF_TYPE_WH 0x113 @@ -484,6 +484,7 @@ const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); +extern const struct pci_device_id iwl_hw_card_ids[]; #endif /* @@ -541,6 +542,8 @@ extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_bz_name[]; +extern const char iwl_fm_name[]; +extern const char iwl_gl_name[]; extern const char iwl_mtp_name[]; extern const char iwl_sc_name[]; extern const char iwl_sc2_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 1379dc2d231b..5b62933134cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2023 Intel Corporation + * Copyright (C) 2018, 2020-2024 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -56,6 +56,8 @@ enum iwl_prph_scratch_mtr_format { * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size + * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE + * upon reset. */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1), @@ -71,6 +73,7 @@ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, + IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29), }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4696d73c8971..33654f228ee8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -192,12 +192,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) case IWL_CFG_RF_TYPE_GF: rf = "gf"; break; - case IWL_CFG_RF_TYPE_MR: - rf = "mr"; - break; - case IWL_CFG_RF_TYPE_MS: - rf = "ms"; - break; case IWL_CFG_RF_TYPE_FM: rf = "fm"; break; @@ -1484,7 +1478,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; - bool load_module = false; bool usniffer_images = false; bool failure = true; @@ -1732,19 +1725,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) goto out_unbind; } } else { - load_module = true; + request_module_nowait("%s", op->name); } mutex_unlock(&iwlwifi_opmode_table_mtx); complete(&drv->request_firmware_complete); - /* - * Load the module last so we don't block anything - * else from proceeding if the module fails to load - * or hangs loading. - */ - if (load_module) - request_module("%s", op->name); failure = false; goto free; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index baa39a18087a..149903f52567 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -392,11 +392,14 @@ static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) return NL80211_BAND_2GHZ; } -static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, +static int iwl_init_channel_map(struct iwl_trans *trans, + const struct iwl_fw *fw, struct iwl_nvm_data *data, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { + const struct iwl_cfg *cfg = trans->cfg; + struct device *dev = trans->dev; int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; @@ -478,11 +481,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, else channel->flags = 0; - /* TODO: Don't put limitations on UHB devices as we still don't - * have NVM for them - */ - if (cfg->uhb_supported) - channel->flags = 0; + if (fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS)) + channel->flags |= IEEE80211_CHAN_CAN_MONITOR; + iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", @@ -597,7 +599,8 @@ static const u8 iwl_vendor_caps[] = { static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { { - .types_mask = BIT(NL80211_IFTYPE_STATION), + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT), .he_cap = { .has_he = true, .he_cap_elem = { @@ -753,7 +756,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { }, }, { - .types_mask = BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), .he_cap = { .has_he = true, .he_cap_elem = { @@ -906,7 +910,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { - bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP); + bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO)); bool no_320; no_320 = (!trans->trans_cfg->integrated && @@ -1023,8 +1028,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: - case IWL_CFG_RF_TYPE_MR: - case IWL_CFG_RF_TYPE_MS: case IWL_CFG_RF_TYPE_FM: case IWL_CFG_RF_TYPE_WH: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= @@ -1176,12 +1179,11 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_fw *fw) { struct device *dev = trans->dev; - const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; - n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, + n_channels = iwl_init_channel_map(trans, fw, data, nvm_ch_flags, sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index a7d44df06eab..898e22e0d1ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -371,7 +371,10 @@ enum { #define CNVI_AUX_MISC_CHIP 0xA200B0 #define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24) #define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff) +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_GL 0x910 #define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930 +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_I 0x900 +#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_W 0x901 #define CNVR_AUX_MISC_CHIP 0xA2B800 #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 @@ -453,11 +456,7 @@ enum { #define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501 #define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532 #define REG_CRF_ID_TYPE_GF 0x410 -#define REG_CRF_ID_TYPE_GF_TC 0xF08 -#define REG_CRF_ID_TYPE_MR 0x810 #define REG_CRF_ID_TYPE_FM 0x910 -#define REG_CRF_ID_TYPE_FMI 0x930 -#define REG_CRF_ID_TYPE_FMR 0x900 #define REG_CRF_ID_TYPE_WHP 0xA10 #define HPM_DEBUG 0xA03440 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 593fe28d89cf..5c754b87ea20 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IWLMVM) += iwlmvm.o +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o rs-fw.o @@ -15,4 +16,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM) += d3.o iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o -ccflags-y += -I $(srctree)/$(src)/../ +subdir-ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 535edb51d1c0..ad3e14a0d043 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include <linux/ieee80211.h> @@ -219,15 +219,13 @@ struct iwl_bt_iterator_data { static inline void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link_info, bool enable, int rssi) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - mvmvif->bf_data.last_bt_coex_event = rssi; - mvmvif->bf_data.bt_coex_max_thold = + link_info->bf_data.last_bt_coex_event = rssi; + link_info->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; - mvmvif->bf_data.bt_coex_min_thold = + link_info->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } @@ -255,44 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, swap(data->primary, data->secondary); } -static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, bool enable) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int link_id; - - lockdep_assert_held(&mvm->mutex); - - if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) - return; - - /* Done already */ - if (mvmvif->bt_coex_esr_disabled == !enable) - return; - - mvmvif->bt_coex_esr_disabled = !enable; - - /* Nothing to do */ - if (mvmvif->esr_active == enable) - return; - - if (enable) { - /* Try to re-enable eSR*/ - iwl_mvm_mld_select_links(mvm, vif, false); - return; - } - - /* - * Find the primary link, as we want to switch to it and drop the - * secondary one. - */ - link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links); - WARN_ON(link_id < 0); - - ieee80211_set_active_links_async(vif, - vif->active_links & BIT(link_id)); -} - /* * This function receives the LB link id and checks if eSR should be * enabled or disabled (due to BT coex) @@ -300,35 +260,25 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm, bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - int link_id, int primary_link) + s32 link_rssi, + bool primary) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; bool have_wifi_loss_rate = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, BT_PROFILE_NOTIFICATION, 0) > 4; - s8 link_rssi = 0; u8 wifi_loss_rate; - lockdep_assert_held(&mvm->mutex); - if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF) return true; - /* If LB link is the primary one we should always disable eSR */ - if (link_id == primary_link) + if (primary) return false; /* The feature is not supported */ if (!have_wifi_loss_rate) return true; - /* - * We might not have a link_info when checking whether we can - * (re)enable eSR - the LB link might not exist yet - */ - if (link_info) - link_rssi = (s8)link_info->beacon_stats.avg_signal; /* * In case we don't know the RSSI - take the lower wifi loss, @@ -338,7 +288,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, if (!link_rssi) wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi; - else if (!mvmvif->bt_coex_esr_disabled) + else if (mvmvif->esr_active) /* RSSI needs to get really low to disable eSR... */ wifi_loss_rate = link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ? @@ -354,23 +304,24 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH; } -void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id) +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id) { - unsigned long usable_links = ieee80211_vif_usable_links(vif); - int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif, - usable_links); - bool enable; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id]; - /* Not assoc, not MLD vif or only one usable link */ - if (primary_link < 0) + if (!ieee80211_vif_is_mld(vif) || + !iwl_mvm_vif_from_mac80211(vif)->authorized || + WARN_ON(!link)) return; - enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id, - primary_link); - - iwl_mvm_bt_coex_enable_esr(mvm, vif, enable); + if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, + (s8)link->beacon_stats.avg_signal, + link_id == iwl_mvm_get_primary_link(vif))) + /* In case we decided to exit eSR - stay with the primary */ + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX, + iwl_mvm_get_primary_link(vif)); } static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, @@ -412,13 +363,13 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, smps_mode, link_id); iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); - /* FIXME: should this be per link? */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, + 0); } return; } - iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id); + iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; @@ -508,13 +459,12 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF || !vif->cfg.assoc) { iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); - /* FIXME: should this be per link? */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0); return; } /* try to get the avg rssi from fw */ - ave_rssi = mvmvif->bf_data.ave_beacon_signal; + ave_rssi = link_info->bf_data.ave_beacon_signal; /* if the RSSI isn't valid, fake it is very low */ if (!ave_rssi) @@ -530,7 +480,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, } /* Begin to monitor the RSSI: it may influence the reduced Tx power */ - iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); + iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi); } /* must be called under rcu_read_lock */ @@ -555,10 +505,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, return; } - /* When BT is off this will be 0 */ - if (data->notif->wifi_loss_low_rssi == BT_OFF) - iwl_mvm_bt_coex_enable_esr(mvm, vif, true); - for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index f5122c4678a1..3cbeaddf4358 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2023 Intel Corporation + * Copyright (C) 2013-2014, 2018-2024 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H @@ -14,6 +14,8 @@ #define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69 #define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63 #define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0 +#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30 +#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5 #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) @@ -123,5 +125,16 @@ #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ #define IWL_MVM_AUTO_EML_ENABLE true +#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7 +#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67 +#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71 +#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64 +#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67 +#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61 +#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74 +#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58 +#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61 + +#define IWL_MVM_ENTER_ESR_TPT_THRESH 400 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 52518a47554e..71e6b06481a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1152,7 +1152,8 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, if (ret) return ret; - return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); + return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0, + mvm_link->ap_sta_id); } static int @@ -1242,7 +1243,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, .data[0] = &d3_cfg_cmd_data, .len[0] = sizeof(d3_cfg_cmd_data), }; - int ret, primary_link; + int ret; int len __maybe_unused; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); @@ -1260,28 +1261,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, if (IS_ERR_OR_NULL(vif)) return 1; - if (hweight16(vif->active_links) > 1) { - /* - * Select the 'best' link. - * May need to revisit, it seems better to not optimize - * for throughput but rather range, reliability and - * power here - and select 2.4 GHz ... - */ - primary_link = iwl_mvm_mld_get_primary_link(mvm, vif, - vif->active_links); - - if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n", - vif->active_links)) - primary_link = __ffs(vif->active_links); - - ret = ieee80211_set_active_links(vif, BIT(primary_link)); - if (ret) - return ret; - } else if (vif->active_links) { - primary_link = __ffs(vif->active_links); - } else { - primary_link = 0; - } + ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN); + if (ret) + return ret; mutex_lock(&mvm->mutex); @@ -1291,7 +1273,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvm_link = mvmvif->link[primary_link]; + mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)]; if (WARN_ON_ONCE(!mvm_link)) { ret = -EINVAL; goto out_noreset; @@ -1472,6 +1454,9 @@ struct iwl_wowlan_status_data { struct iwl_multicast_key_data igtk; struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM]; + int num_mlo_keys; + struct iwl_wowlan_mlo_gtk mlo_keys[WOWLAN_MAX_MLO_KEYS]; + u8 *wake_packet; }; @@ -1830,6 +1815,10 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; + + if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id) + return; if (data->unhandled_cipher) return; @@ -1918,6 +1907,10 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct iwl_mvm_d3_gtk_iter_data *data = _data; struct iwl_wowlan_status_data *status = data->status; s8 keyidx; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; + + if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id) + return; if (data->unhandled_cipher) return; @@ -1973,6 +1966,169 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, } } +struct iwl_mvm_d3_mlo_old_keys { + u32 cipher[IEEE80211_MLD_MAX_NUM_LINKS][WOWLAN_MLO_GTK_KEY_NUM_TYPES]; + struct ieee80211_key_conf *key[IEEE80211_MLD_MAX_NUM_LINKS][8]; +}; + +static void iwl_mvm_mlo_key_ciphers(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_mvm_d3_mlo_old_keys *old_keys = data; + enum iwl_wowlan_mlo_gtk_type key_type; + + if (key->link_id < 0) + return; + + if (WARN_ON(key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS || + key->keyidx >= 8)) + return; + + if (WARN_ON(old_keys->key[key->link_id][key->keyidx])) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + key_type = WOWLAN_MLO_GTK_KEY_TYPE_GTK; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (key->keyidx == 4 || key->keyidx == 5) { + key_type = WOWLAN_MLO_GTK_KEY_TYPE_IGTK; + break; + } else if (key->keyidx == 6 || key->keyidx == 7) { + key_type = WOWLAN_MLO_GTK_KEY_TYPE_BIGTK; + break; + } + return; + default: + /* ignore WEP/TKIP or unknown ciphers */ + return; + } + + old_keys->cipher[key->link_id][key_type] = key->cipher; + old_keys->key[key->link_id][key->keyidx] = key; +} + +static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, + struct iwl_mvm *mvm) +{ + int i; + struct iwl_mvm_d3_mlo_old_keys *old_keys; + bool ret = true; + + IWL_DEBUG_WOWLAN(mvm, "Num of MLO Keys: %d\n", status->num_mlo_keys); + if (!status->num_mlo_keys) + return true; + + old_keys = kzalloc(sizeof(*old_keys), GFP_KERNEL); + if (!old_keys) + return false; + + /* find the cipher for each mlo key */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mlo_key_ciphers, old_keys); + + for (i = 0; i < status->num_mlo_keys; i++) { + struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i]; + struct ieee80211_key_conf *key, *old_key; + struct ieee80211_key_seq seq; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = {}; + u16 flags = le16_to_cpu(mlo_key->flags); + int j, link_id, key_id, key_type; + + link_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK); + key_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK); + key_type = u16_get_bits(flags, + WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK); + + if (!(vif->valid_links & BIT(link_id))) + continue; + + if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS || + key_id >= 8 || + key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES)) + continue; + + conf.conf.cipher = old_keys->cipher[link_id][key_type]; + /* WARN_ON? */ + if (!conf.conf.cipher) + continue; + + conf.conf.keylen = 0; + switch (conf.conf.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC; + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256; + break; + } + + if (WARN_ON(!conf.conf.keylen || + conf.conf.keylen > sizeof(conf.key))) + continue; + + memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen); + conf.conf.keyidx = key_id; + + old_key = old_keys->key[link_id][key_id]; + if (old_key) { + IWL_DEBUG_WOWLAN(mvm, + "Remove MLO key id %d, link id %d\n", + key_id, link_id); + ieee80211_remove_key(old_key); + } + + IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n", + key_id, link_id); + key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id); + if (WARN_ON(IS_ERR(key))) { + ret = false; + goto out; + } + + /* + * mac80211 expects the pn in big-endian + * also note that seq is a union of all cipher types + * (ccmp, gcmp, cmac, gmac), and they all have the same + * pn field (of length 6) so just copy it to ccmp.pn. + */ + for (j = 5; j >= 0; j--) + seq.ccmp.pn[5 - j] = mlo_key->pn[j]; + + /* group keys are non-QoS and use TID 0 */ + ieee80211_set_key_rx_seq(key, 0, &seq); + } + +out: + kfree(old_keys); + return ret; +} + static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, struct ieee80211_vif *vif, struct iwl_mvm *mvm, u32 gtk_cipher) @@ -2176,6 +2332,9 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, return false; } + if (!iwl_mvm_mlo_gtk_rekey(status, vif, mvm)) + return false; + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } @@ -2303,9 +2462,10 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status, static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, struct iwl_wowlan_info_notif *data, struct iwl_wowlan_status_data *status, - u32 len) + u32 len, bool has_mlo_keys) { u32 i; + u32 expected_len = sizeof(*data); if (!data) { IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); @@ -2313,7 +2473,11 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, return; } - if (len < sizeof(*data)) { + if (has_mlo_keys) + expected_len += (data->num_mlo_link_keys * + sizeof(status->mlo_keys[0])); + + if (len < expected_len) { IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); status = NULL; return; @@ -2333,6 +2497,17 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, le32_to_cpu(data->num_of_gtk_rekeys); status->received_beacons = le32_to_cpu(data->received_beacons); status->tid_tear_down = data->tid_tear_down; + + if (has_mlo_keys && data->num_mlo_link_keys) { + status->num_mlo_keys = data->num_mlo_link_keys; + if (IWL_FW_CHECK(mvm, + status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS, + "Too many mlo keys: %d, max %d\n", + status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS)) + status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS; + memcpy(status->mlo_keys, data->mlo_gtks, + status->num_mlo_keys * sizeof(status->mlo_keys[0])); + } } static void @@ -2553,6 +2728,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, int i; bool keep = false; struct iwl_mvm_sta *mvm_ap_sta; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id]; + + if (WARN_ON(!mvm_link)) + goto out_unlock; if (!status) goto out_unlock; @@ -2560,8 +2741,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", status->wakeup_reasons); - /* still at hard-coded place 0 for D3 image */ - mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); + mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id); if (!mvm_ap_sta) goto out_unlock; @@ -3074,7 +3254,8 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, (void *)pkt->data; iwl_mvm_parse_wowlan_info_notif(mvm, notif, - d3_data->status, len); + d3_data->status, len, + wowlan_info_ver > 3); } d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; @@ -3272,6 +3453,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN); + /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 7fe57ecd0682..17c97dfbc62a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -407,7 +407,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); - if (mvmvif->bf_data.bf_enabled) + if (mvmvif->bf_enabled) cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; @@ -692,6 +692,96 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } +static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 action; + int ret; + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return -EINVAL; + + if (kstrtou32(buf, 0, &action)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + if (!action) { + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false); + } else if (action == 1) { + ret = iwl_mvm_int_mlo_scan(mvm, vif); + } else { + ret = -EINVAL; + } + + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + unsigned long esr_mask; + char *buf; + int bufsz, pos, i; + ssize_t rv; + + mutex_lock(&mvm->mutex); + esr_mask = mvmvif->esr_disable_reason; + mutex_unlock(&mvm->mutex); + + bufsz = hweight32(esr_mask) * 32 + 40; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n", + esr_mask); + for_each_set_bit(i, &esr_mask, BITS_PER_LONG) + pos += scnprintf(buf + pos, bufsz - pos, " - %s\n", + iwl_get_esr_state_string(BIT(i))); + + rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return rv; +} + +static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 reason; + u8 block; + int ret; + + ret = sscanf(buf, "%u %hhu", &reason, &block); + if (ret < 0) + return ret; + + if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (block) + iwl_mvm_block_esr(mvm, vif, reason, + iwl_mvm_get_primary_link(vif)); + else + iwl_mvm_unblock_esr(mvm, vif, reason); + mutex_unlock(&mvm->mutex); + + return count; +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -711,6 +801,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); +MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32); void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -738,6 +830,10 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); + debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir, + &mvmvif->ftm_unprotected); + MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index d84d7e955bb0..72a3d71f46f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/etherdevice.h> #include <linux/math64.h> @@ -553,6 +553,15 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, break; } rcu_read_unlock(); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->ftm_unprotected) { + target->sta_id = IWL_MVM_INVALID_STA; + target->initiator_ap_flags &= + ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); + } + +#endif } else { target->sta_id = IWL_MVM_INVALID_STA; } @@ -715,6 +724,12 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_mvm_ftm_pasn_entry *entry; u32 flags = le32_to_cpu(target->initiator_ap_flags); +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->ftm_unprotected) + return; +#endif if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | IWL_INITIATOR_AP_FLAGS_TB))) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e1c2b7fc92ab..e7f5978ef2d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -494,7 +494,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, - UATS_TABLE_CMD), + MCC_ALLOWED_AP_TYPE_CMD), .flags = 0, .data[0] = &mvm->fwrt.uats_table, .len[0] = sizeof(mvm->fwrt.uats_table), @@ -516,7 +516,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver != 1) { IWL_DEBUG_RADIO(mvm, - "UATS_TABLE_CMD ver %d not supported\n", + "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n", cmd_ver); return; } @@ -529,9 +529,10 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) - IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret); + IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n", + ret); else - IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n"); + IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n"); } static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) @@ -666,7 +667,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); - if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ) + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans, CNVI_PMU_STEP_FLOW) & CNVI_PMU_STEP_FLOW_FORCE_URM); @@ -896,11 +897,13 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) u32 n_subbands; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 7) { + if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v7.per_chain[0][0]; cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); + if (cmd_ver == 8) + len = sizeof(cmd.v8); } else if (cmd_ver == 6) { len = sizeof(cmd.v6); n_subbands = IWL_NUM_SUB_BANDS_V2; @@ -1223,94 +1226,12 @@ static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { + struct iwl_lari_config_change_cmd cmd; + size_t cmd_size; int ret; - u32 value; - struct iwl_lari_config_change_cmd_v7 cmd = {}; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE), 1); - - cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt); - - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); - if (!ret) - cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); - if (!ret) - cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); - - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); + ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size); if (!ret) { - if (cmd_ver < 8) - value &= ~ACTIVATE_5G2_IN_WW_MASK; - cmd.chan_state_active_bitmap = cpu_to_le32(value); - } - - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value); - if (!ret) - cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); - - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, - &value); - if (!ret) - cmd.force_disable_channels_bitmap = cpu_to_le32(value); - - ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, - &value); - if (!ret) - cmd.edt_bitmap = cpu_to_le32(value); - - if (cmd.config_bitmap || - cmd.oem_uhb_allow_bitmap || - cmd.oem_11ax_allow_bitmap || - cmd.oem_unii4_allow_bitmap || - cmd.chan_state_active_bitmap || - cmd.force_disable_channels_bitmap || - cmd.edt_bitmap) { - size_t cmd_size; - - switch (cmd_ver) { - case 8: - case 7: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); - break; - case 6: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); - break; - case 5: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); - break; - case 4: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); - break; - case 3: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); - break; - case 2: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); - break; - default: - cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); - break; - } - - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", - le32_to_cpu(cmd.config_bitmap), - le32_to_cpu(cmd.oem_11ax_allow_bitmap)); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", - le32_to_cpu(cmd.oem_unii4_allow_bitmap), - le32_to_cpu(cmd.chan_state_active_bitmap), - cmd_ver); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", - le32_to_cpu(cmd.oem_uhb_allow_bitmap), - le32_to_cpu(cmd.force_disable_channels_bitmap)); - IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n", - le32_to_cpu(cmd.edt_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index fe5bba8561d0..6ec9a8e21a34 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -5,6 +5,48 @@ #include "mvm.h" #include "time-event.h" +#define HANDLE_ESR_REASONS(HOW) \ + HOW(BLOCKED_PREVENTION) \ + HOW(BLOCKED_WOWLAN) \ + HOW(BLOCKED_TPT) \ + HOW(BLOCKED_FW) \ + HOW(BLOCKED_NON_BSS) \ + HOW(EXIT_MISSED_BEACON) \ + HOW(EXIT_LOW_RSSI) \ + HOW(EXIT_COEX) \ + HOW(EXIT_BANDWIDTH) \ + HOW(EXIT_CSA) \ + HOW(EXIT_LINK_USAGE) + +static const char *const iwl_mvm_esr_states_names[] = { +#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x, + HANDLE_ESR_REASONS(NAME_ENTRY) +}; + +const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state) +{ + int offs = ilog2(state); + + if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) || + !iwl_mvm_esr_states_names[offs]) + return "UNKNOWN"; + + return iwl_mvm_esr_states_names[offs]; +} + +static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask) +{ +#define NAME_FMT(x) "%s" +#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "", + IWL_DEBUG_INFO(mvm, + "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT) + " (0x%x)\n", + HANDLE_ESR_REASONS(NAME_PR) + mask); +#undef NAME_FMT +#undef NAME_PR +} + static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvm_vif) { @@ -108,6 +150,65 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD); } +struct iwl_mvm_esr_iter_data { + struct ieee80211_vif *vif; + unsigned int link_id; + bool lift_block; +}; + +static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_esr_iter_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + return; + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_id]; + if (vif == data->vif && link_id == data->link_id) + continue; + if (link_info->active) + data->lift_block = false; + } +} + +int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + unsigned int link_id, bool active) +{ + /* An active link of a non-station vif blocks EMLSR. Upon activation + * block EMLSR on the bss vif. Upon deactivation, check if this link + * was the last non-station link active, and if so unblock the bss vif + */ + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_esr_iter_data data = { + .vif = vif, + .link_id = link_id, + .lift_block = true, + }; + + if (IS_ERR_OR_NULL(bss_vif)) + return 0; + + if (active) + return iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_NON_BSS); + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_esr_vif_iterator, &data); + if (data.lift_block) { + mutex_lock(&mvm->mutex); + iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS); + mutex_unlock(&mvm->mutex); + } + + return 0; +} + int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, u32 changes, bool active) @@ -329,3 +430,702 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return ret; } + +struct iwl_mvm_rssi_to_grade { + s8 rssi[2]; + u16 grade; +}; + +#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \ + { \ + .rssi = {_lb, _hb_uhb}, \ + .grade = _grade \ + } + +/* + * This array must be sorted by increasing RSSI for proper functionality. + * The grades are actually estimated throughput, represented as fixed-point + * with a scale factor of 1/10. + */ +static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = { + RSSI_TO_GRADE_LINE(-85, -89, 177), + RSSI_TO_GRADE_LINE(-83, -86, 344), + RSSI_TO_GRADE_LINE(-82, -85, 516), + RSSI_TO_GRADE_LINE(-80, -83, 688), + RSSI_TO_GRADE_LINE(-77, -79, 1032), + RSSI_TO_GRADE_LINE(-73, -76, 1376), + RSSI_TO_GRADE_LINE(-70, -74, 1548), + RSSI_TO_GRADE_LINE(-69, -72, 1750), + RSSI_TO_GRADE_LINE(-65, -68, 2064), + RSSI_TO_GRADE_LINE(-61, -66, 2294), + RSSI_TO_GRADE_LINE(-58, -61, 2580), + RSSI_TO_GRADE_LINE(-55, -58, 2868), + RSSI_TO_GRADE_LINE(-46, -55, 3098), + RSSI_TO_GRADE_LINE(-43, -54, 3442) +}; + +#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade) + +#define DEFAULT_CHAN_LOAD_LB 30 +#define DEFAULT_CHAN_LOAD_HB 15 +#define DEFAULT_CHAN_LOAD_UHB 0 + +/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */ +#define SCALE_FACTOR 256 + +/* Convert a percentage from [0,100] to [0,255] */ +#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100) + +static unsigned int +iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_chan_width chan_width = + link_conf->chanreq.oper.width; + int mhz = nl80211_chan_width_to_mhz(chan_width); + unsigned int n_subchannels, n_punctured, puncturing_penalty; + + if (WARN_ONCE(mhz < 20 || mhz > 320, + "Invalid channel width : (%d)\n", mhz)) + return SCALE_FACTOR; + + /* No puncturing, no penalty */ + if (mhz < 80) + return SCALE_FACTOR; + + /* total number of subchannels */ + n_subchannels = mhz / 20; + /* how many of these are punctured */ + n_punctured = hweight16(link_conf->chanreq.oper.punctured); + + puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels; + return SCALE_FACTOR - puncturing_penalty; +} + +static unsigned int +iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif_link_info *mvm_link = + iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id]; + const struct element *bss_load_elem; + const struct ieee80211_bss_load_elem *bss_load; + enum nl80211_band band = link_conf->chanreq.oper.chan->band; + unsigned int chan_load; + u32 chan_load_by_us; + + rcu_read_lock(); + bss_load_elem = ieee80211_bss_get_elem(link_conf->bss, + WLAN_EID_QBSS_LOAD); + + /* If there isn't BSS Load element, take the defaults */ + if (!bss_load_elem || + bss_load_elem->datalen != sizeof(*bss_load)) { + rcu_read_unlock(); + switch (band) { + case NL80211_BAND_2GHZ: + chan_load = DEFAULT_CHAN_LOAD_LB; + break; + case NL80211_BAND_5GHZ: + chan_load = DEFAULT_CHAN_LOAD_HB; + break; + case NL80211_BAND_6GHZ: + chan_load = DEFAULT_CHAN_LOAD_UHB; + break; + default: + chan_load = 0; + break; + } + /* The defaults are given in percentage */ + return NORMALIZE_PERCENT_TO_255(chan_load); + } + + bss_load = (const void *)bss_load_elem->data; + /* Channel util is in range 0-255 */ + chan_load = bss_load->channel_util; + rcu_read_unlock(); + + if (!mvm_link || !mvm_link->active) + return chan_load; + + if (WARN_ONCE(!mvm_link->phy_ctxt, + "Active link (%u) without phy ctxt assigned!\n", + link_conf->link_id)) + return chan_load; + + /* channel load by us is given in percentage */ + chan_load_by_us = + NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us); + + /* Use only values that firmware sends that can possibly be valid */ + if (chan_load_by_us <= chan_load) + chan_load -= chan_load_by_us; + + return chan_load; +} + +static unsigned int +iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf) +{ + return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf); +} + +/* This function calculates the grade of a link. Returns 0 in error case */ +VISIBLE_IF_IWLWIFI_KUNIT +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_band band; + int i, rssi_idx; + s32 link_rssi; + unsigned int grade = MAX_GRADE; + + if (WARN_ON_ONCE(!link_conf)) + return 0; + + band = link_conf->chanreq.oper.chan->band; + if (WARN_ONCE(band != NL80211_BAND_2GHZ && + band != NL80211_BAND_5GHZ && + band != NL80211_BAND_6GHZ, + "Invalid band (%u)\n", band)) + return 0; + + link_rssi = MBM_TO_DBM(link_conf->bss->signal); + /* + * For 6 GHz the RSSI of the beacons is lower than + * the RSSI of the data. + */ + if (band == NL80211_BAND_6GHZ) + link_rssi += 4; + + rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1; + + /* No valid RSSI - take the lowest grade */ + if (!link_rssi) + link_rssi = rssi_to_grade_map[0].rssi[rssi_idx]; + + /* Get grade based on RSSI */ + for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) { + const struct iwl_mvm_rssi_to_grade *line = + &rssi_to_grade_map[i]; + + if (link_rssi > line->rssi[rssi_idx]) + continue; + grade = line->grade; + break; + } + + /* apply the channel load and puncturing factors */ + grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR; + grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR; + return grade; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade); + +static +u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif, + struct iwl_mvm_link_sel_data *data, + unsigned long usable_links, + u8 *best_link_idx) +{ + u8 n_data = 0; + u16 max_grade = 0; + unsigned long link_id; + + /* TODO: don't select links that weren't discovered in the last scan */ + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].chandef = &link_conf->chanreq.oper; + data[n_data].signal = link_conf->bss->signal / 100; + data[n_data].grade = iwl_mvm_get_link_grade(link_conf); + + if (data[n_data].grade > max_grade) { + max_grade = data[n_data].grade; + *best_link_idx = n_data; + } + n_data++; + } + + return n_data; +} + +struct iwl_mvm_bw_to_rssi_threshs { + s8 low; + s8 high; +}; + +#define BW_TO_RSSI_THRESHOLDS(_bw) \ + [IWL_PHY_CHANNEL_MODE ## _bw] = { \ + .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \ + .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \ + } + +s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm, + const struct cfg80211_chan_def *chandef, + bool low) +{ + const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = { + BW_TO_RSSI_THRESHOLDS(20), + BW_TO_RSSI_THRESHOLDS(40), + BW_TO_RSSI_THRESHOLDS(80), + BW_TO_RSSI_THRESHOLDS(160) + /* 320 MHz has the same thresholds as 20 MHz */ + }; + const struct iwl_mvm_bw_to_rssi_threshs *threshs; + u8 chan_width = iwl_mvm_get_channel_width(chandef); + + if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && + chandef->chan->band != NL80211_BAND_5GHZ && + chandef->chan->band != NL80211_BAND_6GHZ)) + return S8_MAX; + + /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */ + if (chan_width == IWL_PHY_CHANNEL_MODE320) + chan_width = IWL_PHY_CHANNEL_MODE20; + + threshs = &bw_to_rssi_threshs_map[chan_width]; + + return low ? threshs->low : threshs->high; +} + +static u32 +iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *link, + bool primary) +{ + struct wiphy *wiphy = mvm->hw->wiphy; + struct ieee80211_bss_conf *conf; + enum iwl_mvm_esr_state ret = 0; + s8 thresh; + + conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); + if (WARN_ON_ONCE(!conf)) + return false; + + /* BT Coex effects eSR mode only if one of the links is on LB */ + if (link->chandef->chan->band == NL80211_BAND_2GHZ && + (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal, + primary))) + ret |= IWL_MVM_ESR_EXIT_COEX; + + thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef, + false); + + if (link->signal < thresh) + ret |= IWL_MVM_ESR_EXIT_LOW_RSSI; + + if (conf->csa_active) + ret |= IWL_MVM_ESR_EXIT_CSA; + + if (ret) { + IWL_DEBUG_INFO(mvm, + "Link %d is not allowed for esr\n", + link->link_id); + iwl_mvm_print_esr_state(mvm, ret); + } + return ret; +} + +VISIBLE_IF_IWLWIFI_KUNIT +bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_mvm_esr_state ret = 0; + + /* Per-link considerations */ + if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) || + iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false)) + return false; + + if (a->chandef->width != b->chandef->width || + !(a->chandef->chan->band == NL80211_BAND_6GHZ && + b->chandef->chan->band == NL80211_BAND_5GHZ)) + ret |= IWL_MVM_ESR_EXIT_BANDWIDTH; + + if (ret) { + IWL_DEBUG_INFO(mvm, + "Links %d and %d are not a valid pair for EMLSR\n", + a->link_id, b->link_id); + iwl_mvm_print_esr_state(mvm, ret); + return false; + } + + return true; + +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair); + +/* + * Returns the combined eSR grade of two given links. + * Returns 0 if eSR is not allowed with these 2 links. + */ +static +unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b, + u8 *primary_id) +{ + struct ieee80211_bss_conf *primary_conf; + struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; + unsigned int primary_load; + + lockdep_assert_wiphy(wiphy); + + /* a is always primary, b is always secondary */ + if (b->grade > a->grade) + swap(a, b); + + *primary_id = a->link_id; + + if (!iwl_mvm_mld_valid_link_pair(vif, a, b)) + return 0; + + primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); + + if (WARN_ON_ONCE(!primary_conf)) + return 0; + + primary_load = iwl_mvm_get_chan_load(primary_conf); + + return a->grade + + ((b->grade * primary_load) / SCALE_FACTOR); +} + +void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; + struct iwl_mvm_link_sel_data *best_link; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); + u16 usable_links = ieee80211_vif_usable_links(vif); + u8 best, primary_link, best_in_pair, n_data; + u16 max_esr_grade = 0, new_active_links; + + lockdep_assert_wiphy(mvm->hw->wiphy); + + if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif)) + return; + + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + + /* The logic below is a simple version that doesn't suit more than 2 + * links + */ + WARN_ON_ONCE(max_active_links > 2); + + n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links, + &best); + + if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) + return; + + best_link = &data[best]; + primary_link = best_link->link_id; + new_active_links = BIT(best_link->link_id); + + /* eSR is not supported/blocked, or only one usable link */ + if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) || + mvmvif->esr_disable_reason || n_data == 1) + goto set_active; + + for (u8 a = 0; a < n_data; a++) + for (u8 b = a + 1; b < n_data; b++) { + u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a], + &data[b], + &best_in_pair); + + if (esr_grade <= max_esr_grade) + continue; + + max_esr_grade = esr_grade; + primary_link = best_in_pair; + new_active_links = BIT(data[a].link_id) | + BIT(data[b].link_id); + } + + /* No valid pair was found, go with the best link */ + if (hweight16(new_active_links) <= 1) + goto set_active; + + /* For equal grade - prefer EMLSR */ + if (best_link->grade > max_esr_grade) { + primary_link = best_link->link_id; + new_active_links = BIT(best_link->link_id); + } +set_active: + IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n", + new_active_links, primary_link); + ieee80211_set_active_links_async(vif, new_active_links); + mvmvif->link_selection_res = new_active_links; + mvmvif->link_selection_primary = primary_link; +} + +u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* relevant data is written with both locks held, so read with either */ + lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) || + lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx)); + + if (!ieee80211_vif_is_mld(vif)) + return 0; + + /* In AP mode, there is no primary link */ + if (vif->type == NL80211_IFTYPE_AP) + return __ffs(vif->active_links); + + if (mvmvif->esr_active && + !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links))) + return mvmvif->primary_link; + + return __ffs(vif->active_links); +} + +/* + * For non-MLO/single link, this will return the deflink/single active link, + * respectively + */ +u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id) +{ + switch (hweight16(vif->active_links)) { + case 0: + return 0; + default: + WARN_ON(1); + fallthrough; + case 1: + return __ffs(vif->active_links); + case 2: + return __ffs(vif->active_links & ~BIT(link_id)); + } +} + +/* Reasons that can cause esr prevention */ +#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON +#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400) +#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300) +#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600) + +static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + enum iwl_mvm_esr_state reason) +{ + bool timeout_expired = time_after(jiffies, + mvmvif->last_esr_exit.ts + + IWL_MVM_PREVENT_ESR_TIMEOUT); + unsigned long delay; + + lockdep_assert_held(&mvm->mutex); + + /* Only handle reasons that can cause prevention */ + if (!(reason & IWL_MVM_ESR_PREVENT_REASONS)) + return false; + + /* + * Reset the counter if more than 400 seconds have passed between one + * exit and the other, or if we exited due to a different reason. + * Will also reset the counter after the long prevention is done. + */ + if (timeout_expired || mvmvif->last_esr_exit.reason != reason) { + mvmvif->exit_same_reason_count = 1; + return false; + } + + mvmvif->exit_same_reason_count++; + if (WARN_ON(mvmvif->exit_same_reason_count < 2 || + mvmvif->exit_same_reason_count > 3)) + return false; + + mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION; + + /* + * For the second exit, use a short prevention, and for the third one, + * use a long prevention. + */ + delay = mvmvif->exit_same_reason_count == 2 ? + IWL_MVM_ESR_PREVENT_SHORT : + IWL_MVM_ESR_PREVENT_LONG; + + IWL_DEBUG_INFO(mvm, + "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", + delay / HZ, mvmvif->exit_same_reason_count, + iwl_get_esr_state_string(reason), reason); + + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk, delay); + return true; +} + +#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ) + +/* API to exit eSR mode */ +void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 new_active_links; + bool prevented; + + lockdep_assert_held(&mvm->mutex); + + /* Nothing to do */ + if (!mvmvif->esr_active) + return; + + if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized)) + return; + + if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) + link_to_keep = __ffs(vif->active_links); + + new_active_links = BIT(link_to_keep); + IWL_DEBUG_INFO(mvm, + "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", + iwl_get_esr_state_string(reason), reason, + vif->active_links, new_active_links); + + ieee80211_set_active_links_async(vif, new_active_links); + + /* Prevent EMLSR if needed */ + prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason); + + /* Remember why and when we exited EMLSR */ + mvmvif->last_esr_exit.ts = jiffies; + mvmvif->last_esr_exit.reason = reason; + + /* + * If EMLSR is prevented now - don't try to get back to EMLSR. + * If we exited due to a blocking event, we will try to get back to + * EMLSR when the corresponding unblocking event will happen. + */ + if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS) + return; + + /* If EMLSR is not blocked - try enabling it again in 30 seconds */ + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, + round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME)); +} + +void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + /* This should be called only with disable reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return; + + if (!(mvmvif->esr_disable_reason & reason)) { + IWL_DEBUG_INFO(mvm, + "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + } + + mvmvif->esr_disable_reason |= reason; + + iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep); +} + +int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason) +{ + int primary_link = iwl_mvm_get_primary_link(vif); + int ret; + + if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif)) + return 0; + + /* This should be called only with blocking reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return 0; + + /* leave ESR immediately, not only async with iwl_mvm_block_esr() */ + ret = ieee80211_set_active_links(vif, BIT(primary_link)); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + /* only additionally block for consistency and to avoid concurrency */ + iwl_mvm_block_esr(mvm, vif, reason, primary_link); + mutex_unlock(&mvm->mutex); + + return 0; +} + +static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts + + IWL_MVM_TRIGGER_LINK_SEL_TIME); + + lockdep_assert_held(&mvm->mutex); + + if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized || + mvmvif->esr_active) + return; + + IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n"); + + /* + * If EMLSR was blocked for more than 30 seconds, or the last link + * selection decided to not enter EMLSR, trigger a new scan. + */ + if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) { + IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n"); + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, 0); + /* + * If EMLSR was blocked for less than 30 seconds, and the last link + * selection decided to use EMLSR, activate EMLSR using the previous + * link selection result. + */ + } else { + IWL_DEBUG_INFO(mvm, + "Use the latest link selection result: 0x%x\n", + mvmvif->link_selection_res); + ieee80211_set_active_links_async(vif, + mvmvif->link_selection_res); + } +} + +void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + /* This should be called only with disable reasons */ + if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) + return; + + /* No Change */ + if (!(mvmvif->esr_disable_reason & reason)) + return; + + mvmvif->esr_disable_reason &= ~reason; + + IWL_DEBUG_INFO(mvm, + "Unblocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + + if (!mvmvif->esr_disable_reason) + iwl_mvm_esr_unblocked(mvm, vif); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 228ede7b8957..5a06f887769a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1163,6 +1163,13 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); + if (vif->type == NL80211_IFTYPE_AP && + iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14) + beacon_cmd.btwt_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len)); + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } @@ -1591,23 +1598,23 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, u32 id = le32_to_cpu(mb->link_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; u32 mac_type; + int link_id = -1; u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, MISSED_BEACONS_NOTIFICATION, 0); - rcu_read_lock(); - /* before version four the ID in the notification refers to mac ID */ if (notif_ver < 4) { - vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); } else { struct ieee80211_bss_conf *bss_conf = - iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, false); if (!bss_conf) - goto out; + return; vif = bss_conf->vif; + link_id = bss_conf->link_id; } IWL_DEBUG_INFO(mvm, @@ -1620,7 +1627,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, le32_to_cpu(mb->num_expected_beacons)); if (!vif) - goto out; + return; mac_type = iwl_mvm_get_mac_type(vif); @@ -1647,6 +1654,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, "missed_beacons:%d, missed_beacons_since_rx:%d\n", rx_missed_bcon, rx_missed_bcon_since_rx); } + } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH && + link_id >= 0 && hweight16(vif->active_links) > 1) { + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON, + iwl_mvm_get_other_link(vif, link_id)); } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) { if (!iwl_mvm_has_new_tx_api(mvm)) ieee80211_beacon_loss(vif); @@ -1660,7 +1671,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) - goto out; + return; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); @@ -1672,9 +1683,6 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); - -out: - rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8f4b063d6243..486a6b8f3c97 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -359,8 +359,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) /* Set this early since we need to have it for the check below */ if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && !iwlwifi_mod_params.disable_11ax && - !iwlwifi_mod_params.disable_11be) + !iwlwifi_mod_params.disable_11be) { hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + /* we handle this already earlier, but need it for MLO */ + ieee80211_hw_set(hw, HANDLES_QUIET_CSA); + } /* With MLD FW API, it tracks timing by itself, * no need for any timing from the host @@ -720,6 +723,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; + ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ); + #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && @@ -903,6 +908,8 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) &mvmtxq->state) && !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &mvmtxq->state) && + !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, + &mvmtxq->state) && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); @@ -1097,15 +1104,21 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); + mvmvif->bf_enabled = false; + mvmvif->ba_enabled = false; mvmvif->ap_sta = NULL; + mvmvif->esr_active = false; + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; + for_each_mvm_vif_valid_link(mvmvif, link_id) { mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; mvmvif->link[link_id]->phy_ctxt = NULL; mvmvif->link[link_id]->active = 0; mvmvif->link[link_id]->igtk = NULL; + memset(&mvmvif->link[link_id]->bf_data, 0, + sizeof(mvmvif->link[link_id]->bf_data)); } probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, @@ -1332,6 +1345,13 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + /* Stop internal MLO scan, if running */ + mutex_lock(&mvm->mutex); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false); + mutex_unlock(&mvm->mutex); + + wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk); + wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); @@ -1399,7 +1419,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); - if (cmd_ver == 7) + if (cmd_ver == 8) + len = sizeof(cmd.v8); + else if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); @@ -1418,6 +1440,20 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } +static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta) +{ + struct ieee80211_hw *hw = data; + int i; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]); + } +} + int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) @@ -1434,6 +1470,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id; mvmvif->csa_bcn_pending = false; + mvmvif->csa_blocks_tx = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (WARN_ON(!mvmsta)) { @@ -1455,6 +1492,18 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, iwl_mvm_stop_session_protection(mvm, vif); } + } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(vif->txq); + + clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + + local_bh_disable(); + iwl_mvm_mac_itxq_xmit(hw, vif->txq); + ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw); + local_bh_enable(); + + mvmvif->csa_blocks_tx = false; } mvmvif->ps_disabled = false; @@ -1564,6 +1613,65 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm, IWL_STA_MULTICAST); } +static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = + container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + + mutex_lock(&mvm->mutex); + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif, + mlo_int_scan_wk.work); + struct ieee80211_vif *vif = + container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + + mutex_lock(&mvmvif->mvm->mutex); + + iwl_mvm_int_mlo_scan(mvmvif->mvm, vif); + + mutex_unlock(&mvmvif->mvm->mutex); +} + +static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mvm_vif *mvmvif = + container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + + mutex_lock(&mvm->mutex); + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT); + mutex_unlock(&mvm->mutex); +} + +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) +{ + lockdep_assert_held(&mvm->mutex); + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); + + wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk, + iwl_mvm_prevent_esr_done_wk); + + wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk, + iwl_mvm_mlo_int_scan_wk); + + wiphy_work_init(&mvmvif->unblock_esr_tpt_wk, + iwl_mvm_unblock_esr_tpt); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1574,6 +1682,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); + mvmvif->mvm = mvm; /* the first link always points to the default one */ @@ -1651,15 +1761,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; } - if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) - vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_device_vif = vif; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; @@ -1697,6 +1802,8 @@ out: void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1705,6 +1812,16 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, */ flush_work(&mvm->roc_done_wk); } + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk); + + wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk); + + cancel_delayed_work_sync(&mvmvif->csa_work); } /* This function is doing the common part of removing the interface for @@ -2545,6 +2662,7 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + int link_id; /* The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. @@ -2560,7 +2678,8 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, iwl_mvm_recalc_multicast(mvm); /* reset rssi values */ - mvmvif->bf_data.ave_beacon_signal = 0; + for_each_mvm_vif_valid_link(mvmvif, link_id) + mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT, @@ -2601,10 +2720,14 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, } if (changes & BSS_CHANGED_CQM) { - IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); - /* reset cqm events tracking */ - mvmvif->bf_data.last_cqm_event = 0; - if (mvmvif->bf_data.bf_enabled) { + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[link_conf->link_id]; + + IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n"); + if (link_info) + link_info->bf_data.last_cqm_event = 0; + + if (mvmvif->bf_enabled) { /* FIXME: need to update per link when FW API will * support it */ @@ -3816,15 +3939,29 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, mvmvif->authorized = 1; + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + mvmvif->link_selection_res = vif->active_links; + mvmvif->link_selection_primary = + vif->active_links ? __ffs(vif->active_links) : 0; + } + callbacks->mac_ctxt_changed(mvm, vif, false); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + memset(&mvmvif->last_esr_exit, 0, + sizeof(mvmvif->last_esr_exit)); + + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0); + + /* Block until FW notif will arrive */ + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0); + /* when client is authorized (AP station marked as such), - * try to enable more links + * try to enable the best link(s). */ if (vif->type == NL80211_IFTYPE_STATION && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - iwl_mvm_mld_select_links(mvm, vif, false); + iwl_mvm_select_links(mvm, vif); } mvm_sta->authorized = true; @@ -3868,9 +4005,22 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, * time. */ mvmvif->authorized = 0; + mvmvif->link_selection_res = 0; /* disable beacon filtering */ iwl_mvm_disable_beacon_filter(mvm, vif); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->prevent_esr_done_wk); + + wiphy_delayed_work_cancel(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk); + + wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk); + + /* No need for the periodic statistics anymore */ + if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active) + iwl_mvm_request_periodic_system_statistics(mvm, false); } return 0; @@ -4684,6 +4834,10 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, */ flush_work(&mvm->roc_done_wk); + ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true); + if (ret) + return ret; + mutex_lock(&mvm->mutex); switch (vif->type) { @@ -4727,9 +4881,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "enter\n"); - mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); - mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; @@ -5398,7 +5550,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); - if (mvmvif->bf_data.bf_enabled) { + if (mvmvif->bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif); if (ret) @@ -5411,19 +5563,32 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + } +} + #define IWL_MAX_CSA_BLOCK_TX 1500 -int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, +int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_txq *mvmtxq; int ret; - mutex_lock(&mvm->mutex); + lockdep_assert_held(&mvm->mutex); mvmvif->csa_failed = false; + mvmvif->csa_blocks_tx = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); @@ -5438,30 +5603,38 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, - "Another CSA is already in progress")) { - ret = -EBUSY; - goto out_unlock; - } + "Another CSA is already in progress")) + return -EBUSY; /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, - lockdep_is_held(&mvm->mutex))) { - ret = -EBUSY; - goto out_unlock; - } + lockdep_is_held(&mvm->mutex))) + return -EBUSY; rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, - "Previous CSA countdown didn't complete")) { - ret = -EBUSY; - goto out_unlock; - } + "Previous CSA countdown didn't complete")) + return -EBUSY; mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; + if (!chsw->block_tx) + break; + /* don't need blocking in driver otherwise - mac80211 will do */ + if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA)) + break; + + mvmvif->csa_blocks_tx = true; + mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq); + set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state); + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_csa_block_txqs, + NULL); break; case NL80211_IFTYPE_STATION: + mvmvif->csa_blocks_tx = chsw->block_tx; + /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this @@ -5476,10 +5649,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ - if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { - ret = -EBUSY; - goto out_unlock; - } + if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) + return -EBUSY; if (chsw->delay > IWL_MAX_CSA_BLOCK_TX && hweight16(vif->valid_links) <= 1) @@ -5501,7 +5672,7 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) - goto out_unlock; + return ret; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } @@ -5517,12 +5688,23 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, ret = iwl_mvm_power_update_ps(mvm); if (ret) - goto out_unlock; + return ret; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); -out_unlock: + return ret; +} + +static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw); mutex_unlock(&mvm->mutex); return ret; @@ -5626,8 +5808,8 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; bool ap_sta_done = false; @@ -5639,11 +5821,22 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; } + if (!drop && hweight16(vif->active_links) <= 1) { + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct ieee80211_bss_conf *link_conf; + + link_conf = wiphy_dereference(hw->wiphy, + vif->link_conf[link_id]); + if (WARN_ON(!link_conf)) + return; + if (link_conf->csa_active && mvmvif->csa_blocks_tx) + drop = true; + } + /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); - mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { @@ -5706,6 +5899,65 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_unlock(&mvm->mutex); } +static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx, + struct survey_info *survey) +{ + int chan_idx; + enum nl80211_band band; + int ret; + + mutex_lock(&mvm->mutex); + + if (!mvm->acs_survey) { + ret = -ENOENT; + goto out; + } + + /* Find and return the next entry that has a non-zero active time */ + for (band = 0; band < NUM_NL80211_BANDS; band++) { + struct ieee80211_supported_band *sband = + mvm->hw->wiphy->bands[band]; + + if (!sband) + continue; + + for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) { + struct iwl_mvm_acs_survey_channel *info = + &mvm->acs_survey->bands[band][chan_idx]; + + if (!info->time) + continue; + + /* Found (the next) channel to report */ + survey->channel = &sband->channels[chan_idx]; + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX; + survey->time = info->time; + survey->time_busy = info->time_busy; + survey->time_rx = info->time_rx; + survey->time_tx = info->time_tx; + survey->noise = info->noise; + if (survey->noise < 0) + survey->filled |= SURVEY_INFO_NOISE_DBM; + + /* Clear time so that channel is only reported once */ + info->time = 0; + + ret = 0; + goto out; + } + } + + ret = -ENOENT; + +out: + mutex_unlock(&mvm->mutex); + + return ret; +} + int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { @@ -5718,14 +5970,18 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, memset(survey, 0, sizeof(*survey)); - /* only support global statistics right now */ - if (idx != 0) - return -ENOENT; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; + /* + * Return the beacon stats at index zero and pass on following indices + * to the function returning the full survey, most likely for ACS + * (Automatic Channel Selection). + */ + if (idx > 0) + return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey); + mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { @@ -6297,7 +6553,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, - .pre_channel_switch = iwl_mvm_pre_channel_switch, + .pre_channel_switch = iwl_mvm_mac_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 084314bf6f36..0a3b7284eedd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -14,6 +14,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + iwl_mvm_mac_init_mvmvif(mvm, mvmvif); + mvmvif->mvm = mvm; /* Not much to do here. The stack will not allow interface @@ -92,6 +94,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, mvm->csme_vif = vif; } + if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) + vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; + goto out_unlock; out_free_bf: @@ -189,23 +194,43 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif) +static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif) { unsigned int n_active = 0; int i; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - struct ieee80211_bss_conf *link_conf; - - link_conf = link_conf_dereference_protected(vif, i); - if (link_conf && - rcu_access_pointer(link_conf->chanctx_conf)) + if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt) n_active++; } return n_active; } +static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif) +{ + struct ieee80211_sta *ap_sta = mvmvif->ap_sta; + struct iwl_mvm_sta *mvmsta; + + lockdep_assert_held(&mvm->mutex); + + if (!ap_sta) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(ap_sta); + if (!mvmsta->mpdu_counters) + return; + + for (int q = 0; q < mvm->trans->num_rx_queues; q++) { + spin_lock_bh(&mvmsta->mpdu_counters[q].lock); + memset(mvmsta->mpdu_counters[q].per_link, 0, + sizeof(mvmsta->mpdu_counters[q].per_link)); + mvmsta->mpdu_counters[q].window_start = jiffies; + spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); + } +} + static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -233,6 +258,22 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, link->phy_ctxt->rlc_disabled = true; } + if (vif->active_links == mvmvif->link_selection_res && + !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary)))) + mvmvif->primary_link = mvmvif->link_selection_primary; + else + mvmvif->primary_link = __ffs(vif->active_links); + + /* Needed for tracking RSSI */ + iwl_mvm_request_periodic_system_statistics(mvm, true); + + /* + * Restart the MPDU counters and the counting window, so when the + * statistics arrive (which is where we look at the counters) we + * will be at the end of the window. + */ + iwl_mvm_restart_mpdu_count(mvm, mvmvif); + return ret; } @@ -245,18 +286,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; int ret; - /* if the assigned one was not counted yet, count it now */ - if (!rcu_access_pointer(link_conf->chanctx_conf)) - n_active++; - if (WARN_ON_ONCE(!mvmvif->link[link_id])) return -EINVAL; + /* if the assigned one was not counted yet, count it now */ + if (!mvmvif->link[link_id]->phy_ctxt) + n_active++; + /* mac parameters such as HE support can change at this stage * For sta, need first to configure correct state from drv_sta_state * and only after that update mac config. @@ -276,6 +317,7 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, ret = iwl_mvm_esr_mode_active(mvm, vif); if (ret) { IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret); + iwl_mvm_request_periodic_system_statistics(mvm, false); goto out; } } @@ -296,13 +338,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, * this needs the phy context assigned (and in FW?), and we cannot * do it later because it needs to be initialized as soon as we're * able to TX on the link, i.e. when active. - * - * Firmware restart isn't quite correct yet for MLO, but we don't - * need to do it in that case anyway since it will happen from the - * normal station state callback. */ - if (mvmvif->ap_sta && - !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (mvmvif->ap_sta) { struct ieee80211_link_sta *link_sta; rcu_read_lock(); @@ -354,6 +391,18 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + /* update EMLSR mode */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, + true); + /* + * Don't activate this link if failed to exit EMLSR in + * the BSS interface + */ + if (ret) + return ret; + } + mutex_lock(&mvm->mutex); ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); mutex_unlock(&mvm->mutex); @@ -404,6 +453,11 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, break; } + iwl_mvm_request_periodic_system_statistics(mvm, false); + + /* Start a new counting window */ + iwl_mvm_restart_mpdu_count(mvm, mvmvif); + return ret; } @@ -416,7 +470,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif); unsigned int link_id = link_conf->link_id; /* shouldn't happen, but verify link_id is valid before accessing */ @@ -472,6 +526,56 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, iwl_mvm_add_link(mvm, vif, link_conf); } mutex_unlock(&mvm->mutex); + + /* update EMLSR mode */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false); +} + +static void +iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_txpower_constraints_cmd cmd = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = + mvmvif->link[bss_conf->link_id]; + u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD); + u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) + return; + + if (!link_info->active || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) + return; + + if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ || + bss_conf->chanreq.oper.chan->flags & + IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) + return; + + cmd.link_id = cpu_to_le16(link_info->fw_link_id); + /* + * Currently supporting VLP Soft AP only. + */ + cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP); + memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr)); + memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr)); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(PHY_OPS_GROUP, + AP_TX_POWER_CONSTRAINTS_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, + "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n", + ret); } static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, @@ -483,6 +587,10 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, link_conf); + /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); if (ret) @@ -601,126 +709,23 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, &callbacks); } -struct iwl_mvm_link_sel_data { - u8 link_id; - enum nl80211_band band; - enum nl80211_chan_width width; - bool active; -}; - -static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a, - struct iwl_mvm_link_sel_data *b) -{ - return a->band != b->band; -} - -void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool valid_links_changed) +static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { - struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - u32 max_active_links = iwl_mvm_max_active_links(mvm, vif); - u16 new_active_links; - u8 link_id, n_data = 0, i, j; - - if (!IWL_MVM_AUTO_EML_ENABLE) - return; - - if (!ieee80211_vif_is_mld(vif) || usable_links == 1) - return; - - /* The logic below is a simple version that doesn't suit more than 2 - * links - */ - WARN_ON_ONCE(max_active_links > 2); - - /* if only a single active link is supported, assume that the one - * selected by higher layer for connection establishment is the best. - */ - if (max_active_links == 1 && !valid_links_changed) - return; - - /* If we are already using the maximal number of active links, don't do - * any change. This can later be optimized to pick a 'better' link pair. - */ - if (hweight16(vif->active_links) == max_active_links) - return; - - rcu_read_lock(); - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - rcu_dereference(vif->link_conf[link_id]); + struct ieee80211_bss_conf *other_link; + int link_id; - if (WARN_ON_ONCE(!link_conf)) + /* Exit EMLSR if links don't have equal bandwidths */ + for_each_vif_active_link(vif, other_link, link_id) { + if (link_id == link_conf->link_id) continue; - - data[n_data].link_id = link_id; - data[n_data].band = link_conf->chanreq.oper.chan->band; - data[n_data].width = link_conf->chanreq.oper.width; - data[n_data].active = vif->active_links & BIT(link_id); - n_data++; - } - - rcu_read_unlock(); - - /* this is expected to be the current active link */ - if (n_data == 1) - return; - - new_active_links = 0; - - /* Assume that after association only a single link is active, thus, - * select only the 2nd link - */ - if (!valid_links_changed) { - for (i = 0; i < n_data; i++) { - if (data[i].active) - break; - } - - if (WARN_ON_ONCE(i == n_data)) - return; - - for (j = 0; j < n_data; j++) { - if (i == j) - continue; - - if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j])) - break; - } - - if (j != n_data) - new_active_links = BIT(data[i].link_id) | - BIT(data[j].link_id); - } else { - /* Try to find a valid link pair for EMLSR operation. If a pair - * is not found continue using the current active link. - */ - for (i = 0; i < n_data; i++) { - for (j = 0; j < n_data; j++) { - if (i == j) - continue; - - if (iwl_mvm_mld_valid_link_pair(&data[i], - &data[j])) - break; - } - - /* found a valid pair for EMLSR, use it */ - if (j != n_data) { - new_active_links = BIT(data[i].link_id) | - BIT(data[j].link_id); - break; - } - } + if (link_conf->chanreq.oper.width == + other_link->chanreq.oper.width) + return true; } - if (!new_active_links) - return; - - if (vif->active_links != new_active_links) - ieee80211_set_active_links_async(vif, new_active_links); + return false; } static void @@ -752,6 +757,14 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; } + if ((changes & BSS_CHANGED_BANDWIDTH) && + ieee80211_vif_link_active(vif, link_conf->link_id) && + mvmvif->esr_active && + !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf)) + iwl_mvm_exit_esr(mvm, vif, + IWL_MVM_ESR_EXIT_BANDWIDTH, + iwl_mvm_get_primary_link(vif)); + /* if associated, maybe puncturing changed - we'll check later */ if (vif->cfg.assoc) link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; @@ -767,9 +780,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); - if (changes & BSS_CHANGED_MLD_VALID_LINKS) - iwl_mvm_mld_select_links(mvm, vif, true); - memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, ETH_ALEN); @@ -912,6 +922,11 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } + + if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) && + ieee80211_vif_is_mld(vif) && mvmvif->authorized) + wiphy_delayed_work_queue(mvm->hw->wiphy, + &mvmvif->mlo_int_scan_wk, 0); } static void @@ -1190,6 +1205,14 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, if (new_links == 0) { mvmvif->link[0] = &mvmvif->deflink; err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (err == 0) + mvmvif->primary_link = 0; + } else if (!(new_links & BIT(mvmvif->primary_link))) { + /* + * Ensure we always have a valid primary_link, the real + * decision happens later when PHY is activated. + */ + mvmvif->primary_link = __ffs(new_links); } out_err: @@ -1218,68 +1241,14 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, return ret; } -/* - * This function receives a subset of the usable links bitmap and - * returns the primary link id, and -1 if such link doesn't exist - * (e.g. non-MLO connection) or wasn't found. - */ -int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - unsigned long usable_links) -{ - struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; - u8 link_id, n_data = 0; - - if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc) - return -1; - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, link_id); - - if (WARN_ON_ONCE(!link_conf)) - continue; - - data[n_data].link_id = link_id; - data[n_data].band = link_conf->chanreq.oper.chan->band; - data[n_data].width = link_conf->chanreq.oper.width; - data[n_data].active = true; - n_data++; - } - - if (n_data <= 1) - return -1; - - /* The logic should be modified to handle more than 2 links */ - WARN_ON_ONCE(n_data > 2); - - /* Primary link is the link with the wider bandwidth or higher band */ - if (data[0].width > data[1].width) - return data[0].link_id; - if (data[0].width < data[1].width) - return data[1].link_id; - if (data[0].band >= data[1].band) - return data[0].link_id; - - return data[1].link_id; -} - -/* - * This function receives a bitmap of usable links and check if we can enter - * eSR on those links. - */ -static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - unsigned long desired_links) +bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif, - desired_links); const struct wiphy_iftype_ext_capab *ext_capa; - bool ret = true; - int link_id; - if (primary_link < 0) + lockdep_assert_held(&mvm->mutex); + + if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc || + hweight16(ieee80211_vif_usable_links(vif)) == 1) return false; if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP)) @@ -1287,30 +1256,8 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm, ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy, ieee80211_vif_type_p2p(vif)); - if (!ext_capa || - !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP)) - return false; - - for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) { - struct ieee80211_bss_conf *link_conf = - link_conf_dereference_protected(vif, link_id); - - if (WARN_ON_ONCE(!link_conf)) - continue; - - /* BT Coex effects eSR mode only if one of the link is on LB */ - if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ) - continue; - - ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id, - primary_link); - // Mark eSR as disabled for the next time - if (!ret) - mvmvif->bt_coex_esr_disabled = true; - break; - } - - return ret; + return (ext_capa && + (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP)); } static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, @@ -1333,8 +1280,9 @@ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, } /* If it is an eSR device, check that we can enter eSR */ - if (iwl_mvm_is_esr_supported(mvm->fwrt.trans)) - ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links); + ret = iwl_mvm_is_esr_supported(mvm->fwrt.trans) && + iwl_mvm_vif_has_esr_cap(mvm, vif); + unlock: mutex_unlock(&mvm->mutex); return ret; @@ -1358,6 +1306,45 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return NEG_TTLM_RES_ACCEPT; } +static int +iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + if (mvmvif->esr_active) { + u8 primary = iwl_mvm_get_primary_link(vif); + int selected; + + /* prefer primary unless quiet CSA on it */ + if (chsw->link_id == primary && chsw->block_tx) + selected = iwl_mvm_get_other_link(vif, primary); + else + selected = primary; + + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected); + mutex_unlock(&mvm->mutex); + + /* + * If we've not kept the link active that's doing the CSA + * then we don't need to do anything else, just return. + */ + if (selected != chsw->link_id) + return 0; + + mutex_lock(&mvm->mutex); + } + + ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw); + mutex_unlock(&mvm->mutex); + + return ret; +} + const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, @@ -1411,7 +1398,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = { .tx_last_beacon = iwl_mvm_tx_last_beacon, .channel_switch = iwl_mvm_channel_switch, - .pre_channel_switch = iwl_mvm_pre_channel_switch, + .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 23e64a757cfe..b7a461dba41e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2023 Intel Corporation + * Copyright (C) 2022-2024 Intel Corporation */ #include "mvm.h" #include "time-sync.h" @@ -9,7 +9,9 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int filter_link_id) { + struct ieee80211_link_sta *link_sta; struct iwl_mvm_sta *mvmsta; + struct ieee80211_vif *vif; unsigned int link_id; u32 result = 0; @@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return 0; mvmsta = iwl_mvm_sta_from_mac80211(sta); + vif = mvmsta->vif; /* it's easy when the STA is not an MLD */ if (!sta->valid_links) return BIT(mvmsta->deflink.sta_id); /* but if it is an MLD, get the mask of all the FW STAs it has ... */ - for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { - struct iwl_mvm_link_sta *link_sta; + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta; /* unless we have a specific link in mind */ if (filter_link_id >= 0 && link_id != filter_link_id) continue; - link_sta = + mvm_link_sta = rcu_dereference_check(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); - if (!link_sta) + if (!mvm_link_sta) continue; - result |= BIT(link_sta->sta_id); + result |= BIT(mvm_link_sta->sta_id); } return result; @@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; unsigned int link_id; int ret; lockdep_assert_held(&mvm->mutex); - for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { - if (!rcu_access_pointer(sta->link[link_id]) || - mvm_sta->link[link_id]) + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON(mvm_sta->link[link_id])) continue; ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); @@ -616,9 +619,6 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta, } } -/* FIXME: consider waiting for mac80211 to add the STA instead of allocating - * queues here - */ static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -723,7 +723,6 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id], mvm_link_sta); } - return 0; err: @@ -849,6 +848,8 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id, stay_in_fw); } + kfree(mvm_sta->mpdu_counters); + mvm_sta->mpdu_counters = NULL; return ret; } @@ -989,6 +990,10 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); int baid; + /* mac80211 will remove sessions later, but we ignore all that */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) { @@ -1122,10 +1127,21 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - if (WARN_ON(!mvm_sta->link[link_id])) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + u32 sta_id; + + if (WARN_ON(!mvm_link_sta)) { ret = -EINVAL; goto err; } + + sta_id = mvm_link_sta->sta_id; + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], + link_sta); } else { if (WARN_ON(mvm_sta->link[link_id])) { ret = -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f0b24f00938b..1f58c727fa63 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -255,18 +255,14 @@ enum iwl_mvm_low_latency_cause { }; /** -* struct iwl_mvm_vif_bf_data - beacon filtering related data -* @bf_enabled: indicates if beacon filtering is enabled -* @ba_enabled: indicated if beacon abort is enabled +* struct iwl_mvm_link_bf_data - beacon filtering related data * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ -struct iwl_mvm_vif_bf_data { - bool bf_enabled; - bool ba_enabled; +struct iwl_mvm_link_bf_data { int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; @@ -309,6 +305,7 @@ struct iwl_probe_resp_data { * @listen_lmac: indicates this link is allocated to the listen LMAC * @mcast_sta: multicast station * @phy_ctxt: phy context allocated to this link, if any + * @bf_data: beacon filtering data */ struct iwl_mvm_vif_link_info { u8 bssid[ETH_ALEN]; @@ -344,6 +341,63 @@ struct iwl_mvm_vif_link_info { struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; u16 mgmt_queue; + + struct iwl_mvm_link_bf_data bf_data; +}; + +/** + * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or + * blocked. + * The low 16 bits are used for blocking reasons, and the 16 higher bits + * are used for exit reasons. + * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit + * reasons - use iwl_mvm_exit_esr(). + * + * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs) + * + * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting + * in a loop. + * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR + * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic + * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR + * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR + * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons + * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR + * due to low RSSI. + * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR + * due to BT Coex. + * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links + * preventing the enablement of EMLSR + * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR + * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link + */ +enum iwl_mvm_esr_state { + IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1, + IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2, + IWL_MVM_ESR_BLOCKED_TPT = 0x4, + IWL_MVM_ESR_BLOCKED_FW = 0x8, + IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10, + IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000, + IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000, + IWL_MVM_ESR_EXIT_COEX = 0x40000, + IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000, + IWL_MVM_ESR_EXIT_CSA = 0x100000, + IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000, +}; + +#define IWL_MVM_BLOCK_ESR_REASONS 0xffff + +const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state); + +/** + * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode. + * @reason: The reason for the last exit from EMLSR. + * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR. + * @ts: the time stamp of the last time we existed EMLSR. + */ +struct iwl_mvm_esr_exit { + unsigned long ts; + enum iwl_mvm_esr_state reason; }; /** @@ -361,7 +415,6 @@ struct iwl_mvm_vif_link_info { * @pm_enabled - indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, @@ -371,14 +424,30 @@ struct iwl_mvm_vif_link_info { * @csa_countdown: indicates that CSA countdown may be started * @csa_failed: CSA failed to schedule time event, report an error later * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel + * @csa_blocks_tx: CSA is blocking TX * @features: hw features active for this vif * @ap_beacon_time: AP beacon time for synchronisation (on older FW) + * @bf_enabled: indicates if beacon filtering is enabled + * @ba_enabled: indicated if beacon abort is enabled * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link) - * @bf_data: beacon filtering data * @deflink: default link data for use in non-MLO * @link: link data for each link in MLO * @esr_active: indicates eSR mode is active + * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state * @pm_enabled: indicates powersave is enabled + * @link_selection_res: bitmap of active links as it was decided in the last + * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't + * any link selection yet. + * @link_selection_primary: primary link selected by link selection + * @primary_link: primary link in eSR. Valid only for an associated MLD vif, + * and in eSR mode. Valid only for a STA. + * @last_esr_exit: Details of the last exit from EMLSR. + * @exit_same_reason_count: The number of times we exited due to the specified + * @last_esr_exit::reason, only counting exits due to + * &IWL_MVM_ESR_PREVENT_REASONS. + * @prevent_esr_done_wk: work that should be done when esr prevention ends. + * @mlo_int_scan_wk: work for the internal MLO scan. + * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -392,7 +461,6 @@ struct iwl_mvm_vif { bool pm_enabled; bool monitor_active; bool esr_active; - bool bt_coex_esr_disabled; u8 low_latency: 6; u8 low_latency_actual: 1; @@ -400,8 +468,10 @@ struct iwl_mvm_vif { u8 authorized:1; bool ps_disabled; + u32 esr_disable_reason; u32 ap_beacon_time; - struct iwl_mvm_vif_bf_data bf_data; + bool bf_enabled; + bool ba_enabled; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ @@ -435,6 +505,7 @@ struct iwl_mvm_vif { struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; + bool ftm_unprotected; #endif /* FW identified misbehaving AP */ @@ -444,6 +515,7 @@ struct iwl_mvm_vif { bool csa_countdown; bool csa_failed; bool csa_bcn_pending; + bool csa_blocks_tx; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; @@ -466,6 +538,15 @@ struct iwl_mvm_vif { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; + u16 link_selection_res; + u8 link_selection_primary; + u8 primary_link; + struct iwl_mvm_esr_exit last_esr_exit; + u8 exit_same_reason_count; + struct wiphy_delayed_work prevent_esr_done_wk; + struct wiphy_delayed_work mlo_int_scan_wk; + struct wiphy_work unblock_esr_tpt_wk; + struct iwl_mvm_vif_link_info deflink; struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; @@ -490,10 +571,12 @@ enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), + IWL_MVM_SCAN_INT_MLO = BIT(3), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), + IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, @@ -501,6 +584,8 @@ enum iwl_scan_status { IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, + IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO | + IWL_MVM_SCAN_STOPPING_INT_MLO, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, @@ -754,9 +839,10 @@ struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; -#define IWL_MVM_TXQ_STATE_STOP_FULL 0 -#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 -#define IWL_MVM_TXQ_STATE_READY 2 +#define IWL_MVM_TXQ_STATE_READY 0 +#define IWL_MVM_TXQ_STATE_STOP_FULL 1 +#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2 +#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3 unsigned long state; }; @@ -834,6 +920,35 @@ struct iwl_mei_scan_filter { struct work_struct scan_work; }; +/** + * struct iwl_mvm_acs_survey_channel - per-channel survey information + * + * Stripped down version of &struct survey_info. + * + * @time: time in ms the radio was on the channel + * @time_busy: time in ms the channel was sensed busy + * @time_tx: time in ms spent transmitting data + * @time_rx: time in ms spent receiving data + * @noise: channel noise in dBm + */ +struct iwl_mvm_acs_survey_channel { + u32 time; + u32 time_busy; + u32 time_tx; + u32 time_rx; + s8 noise; +}; + +struct iwl_mvm_acs_survey { + struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS]; + + /* Overall number of channels */ + int n_channels; + + /* Storage space for per-channel information follows */ + struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels); +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -853,6 +968,8 @@ struct iwl_mvm { /* For async rx handlers that require the wiphy lock */ struct wiphy_work async_handlers_wiphy_wk; + struct wiphy_work trig_link_selection_wk; + struct work_struct roc_done_wk; unsigned long init_status; @@ -1201,6 +1318,8 @@ struct iwl_mvm { struct iwl_mei_scan_filter mei_scan_filter; + struct iwl_mvm_acs_survey *acs_survey; + bool statistics_clear; }; @@ -1570,17 +1689,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_trans *trans = mvm->fwrt.trans; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - lockdep_assert_held(&mvm->mutex); if (vif->type == NL80211_IFTYPE_AP) return mvm->fw->ucode_capa.num_beacons; - if ((iwl_mvm_is_esr_supported(trans) && - !mvmvif->bt_coex_esr_disabled) || - ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && - CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))) + /* Check if HW supports eSR or STR */ + if (iwl_mvm_is_esr_supported(trans) || + (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && + CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; return 1; @@ -1720,6 +1836,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); +int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, + bool enable); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ @@ -1776,6 +1894,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); +void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif); + /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_<NAME OF THE CMD> @@ -1930,6 +2050,26 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); +void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif); +u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id); + +struct iwl_mvm_link_sel_data { + u8 link_id; + const struct cfg80211_chan_def *chandef; + s32 signal; + u16 grade; +}; + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf); +bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif, + const struct iwl_mvm_link_sel_data *a, + const struct iwl_mvm_link_sel_data *b); + +s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif); +#endif + /* AP and IBSS */ bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *ret); @@ -2005,9 +2145,13 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies); size_t iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); + int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); +int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, @@ -2113,7 +2257,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, - u32 cmd_flags); + u32 cmd_flags, + u8 sta_id); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); @@ -2133,12 +2278,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); -bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id, int primary_link); -void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - int link_id); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -2450,6 +2589,21 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) } } +static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band) +{ + switch (phy_band) { + case PHY_BAND_24: + return NL80211_BAND_2GHZ; + case PHY_BAND_5: + return NL80211_BAND_5GHZ; + case PHY_BAND_6: + return NL80211_BAND_6GHZ; + default: + WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); + return NL80211_BAND_5GHZ; + } +} + /* Channel Switch */ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, @@ -2711,7 +2865,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); -int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, +int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw); void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, @@ -2753,12 +2907,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); bool iwl_mvm_enable_fils(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx); -void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool valid_links_changed); -int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - unsigned long usable_links); - bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx); @@ -2779,4 +2927,30 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration, u32 activity); + +/* EMLSR */ +bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep); +int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason); +void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason); +void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + enum iwl_mvm_esr_state reason, + u8 link_to_keep); +s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm, + const struct cfg80211_chan_def *chandef, + bool low); +void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int link_id); +bool +iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + s32 link_rssi, + bool primary); +int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + unsigned int link_id, bool active); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index dfb16ca5b438..1eb21fe861e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2021-2022 Intel Corporation + * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -30,7 +30,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, - u32 cmd_flags) + u32 cmd_flags, + u8 sta_id) { union { struct iwl_proto_offload_cmd_v1 v1; @@ -205,6 +206,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, if (!disable_offloading) common->enabled = cpu_to_le32(enabled); + if (ver >= 4) + cmd.v4.sta_id = cpu_to_le32(sta_id); + hcmd.len[0] = size; return iwl_mvm_send_cmd(mvm, &hcmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index a93981cb9714..53283d052e18 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -145,6 +145,24 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } +static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + + /* FW recommendations is only for entering EMLSR */ + if (!vif || iwl_mvm_vif_from_mac80211(vif)->esr_active) + return; + + if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER) + iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW); + else + iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, + iwl_mvm_get_primary_link(vif)); +} + static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -365,13 +383,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_scan_match_found, RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, - RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete), + RX_HANDLER_ASYNC_LOCKED, + struct iwl_umac_scan_complete), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, - RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_missed_beacons_notif), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, struct iwl_error_resp), @@ -423,6 +443,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_channel_switch_error_notif, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_channel_switch_error_notif), + + RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF, + iwl_mvm_rx_esr_mode_notif, + RX_HANDLER_ASYNC_LOCKED_WIPHY, + struct iwl_mvm_esr_mode_notif), + RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), @@ -447,6 +473,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC, struct iwl_roc_notif), + RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF, + iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED, + struct iwl_umac_scan_channel_survey_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -586,6 +615,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), + HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -604,6 +634,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(SCD_QUEUE_CONFIG_CMD), HCMD_NAME(SEC_KEY_CMD), + HCMD_NAME(ESR_MODE_NOTIF), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), @@ -623,6 +654,7 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { + HCMD_NAME(CHANNEL_SURVEY_NOTIF), HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), }; @@ -1143,6 +1175,27 @@ static const struct iwl_mei_ops mei_ops = { .nic_stolen = iwl_mvm_mei_nic_stolen, }; +static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (ieee80211_vif_is_mld(vif) && mvmvif->authorized) + iwl_mvm_select_links(mvmvif->mvm, vif); +} + +static void iwl_mvm_trig_link_selection(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, trig_link_selection_wk); + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_find_link_selection_vif, + NULL); +} + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -1274,6 +1327,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, wiphy_work_init(&mvm->async_handlers_wiphy_wk, iwl_mvm_async_handlers_wiphy_wk); + + wiphy_work_init(&mvm->trig_link_selection_wk, + iwl_mvm_trig_link_selection); + init_waitqueue_head(&mvm->rx_sync_waitq); mvm->queue_sync_state = 0; @@ -1528,6 +1585,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + kfree(mvm->acs_survey); cancel_delayed_work_sync(&mvm->tcm.work); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 41e68aa6bec8..568f53c56199 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -79,7 +79,7 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); } - cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); + cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled); } static void iwl_mvm_power_log(struct iwl_mvm *mvm, @@ -826,7 +826,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd); if (!ret) - mvmvif->bf_data.bf_enabled = true; + mvmvif->bf_enabled = true; return ret; } @@ -855,7 +855,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); if (!ret) - mvmvif->bf_data.bf_enabled = false; + mvmvif->bf_enabled = false; return ret; } @@ -903,16 +903,16 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, .bf_enable_beacon_filter = cpu_to_le32(1), }; - if (!mvmvif->bf_data.bf_enabled) + if (!mvmvif->bf_enabled) return 0; if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || - mvm->ps_disabled || - !vif->cfg.ps || - iwl_mvm_vif_low_latency(mvmvif)); + mvmvif->ba_enabled = !(!mvmvif->pm_enabled || + mvm->ps_disabled || + !vif->cfg.ps || + iwl_mvm_vif_low_latency(mvmvif)); return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 00860feefa7a..3ba62fb2c85e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -654,10 +654,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, */ sta->deflink.agg.max_amsdu_len = max_amsdu_len; - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD), - 0); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n", @@ -693,9 +690,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, u16 cmd_size = sizeof(cfg_cmd_v3); /* In old versions of the API the struct is 4 bytes smaller */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD), 0) < 3) + if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3) cmd_size -= 4; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index b1add7942c5b..4fa8066a89b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -556,36 +556,40 @@ struct iwl_mvm_stat_data_all_macs { struct iwl_stats_ntfy_per_mac *per_mac; }; -static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) +static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig, + struct iwl_mvm_vif_link_info *link_info) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - int thold = vif->bss_conf.cqm_rssi_thold; - int hyst = vif->bss_conf.cqm_rssi_hyst; + struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm; + struct ieee80211_bss_conf *bss_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_info->fw_link_id, + false); + int thold = bss_conf->cqm_rssi_thold; + int hyst = bss_conf->cqm_rssi_hyst; int last_event; + s8 exit_esr_thresh; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); return; } - mvmvif->bf_data.ave_beacon_signal = sig; + link_info->bf_data.ave_beacon_signal = sig; /* BT Coex */ - if (mvmvif->bf_data.bt_coex_min_thold != - mvmvif->bf_data.bt_coex_max_thold) { - last_event = mvmvif->bf_data.last_bt_coex_event; - if (sig > mvmvif->bf_data.bt_coex_max_thold && - (last_event <= mvmvif->bf_data.bt_coex_min_thold || + if (link_info->bf_data.bt_coex_min_thold != + link_info->bf_data.bt_coex_max_thold) { + last_event = link_info->bf_data.last_bt_coex_event; + if (sig > link_info->bf_data.bt_coex_max_thold && + (last_event <= link_info->bf_data.bt_coex_min_thold || last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; + link_info->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); - } else if (sig < mvmvif->bf_data.bt_coex_min_thold && - (last_event >= mvmvif->bf_data.bt_coex_max_thold || + } else if (sig < link_info->bf_data.bt_coex_min_thold && + (last_event >= link_info->bf_data.bt_coex_max_thold || last_event == 0)) { - mvmvif->bf_data.last_bt_coex_event = sig; + link_info->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); @@ -596,10 +600,10 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) return; /* CQM Notification */ - last_event = mvmvif->bf_data.last_cqm_event; + last_event = link_info->bf_data.last_cqm_event; if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) { - mvmvif->bf_data.last_cqm_event = sig; + link_info->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig); ieee80211_cqm_rssi_notify( @@ -609,7 +613,7 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { - mvmvif->bf_data.last_cqm_event = sig; + link_info->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig); ieee80211_cqm_rssi_notify( @@ -618,6 +622,20 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) sig, GFP_KERNEL); } + + /* ESR recalculation */ + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return; + + exit_esr_thresh = + iwl_mvm_get_esr_rssi_thresh(mvm, + &bss_conf->chanreq.oper, + true); + + if (sig < exit_esr_thresh) + iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI, + iwl_mvm_get_other_link(vif, + bss_conf->link_id)); } static void iwl_mvm_stat_iterator(void *_data, u8 *mac, @@ -651,7 +669,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, mvmvif->deflink.beacon_stats.accu_num_beacons += mvmvif->deflink.beacon_stats.num_beacons; - iwl_mvm_update_vif_sig(vif, sig); + /* This is used in pre-MLO API so use deflink */ + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink); } static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, @@ -684,7 +703,9 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, mvmvif->deflink.beacon_stats.num_beacons; sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); - iwl_mvm_update_vif_sig(vif, sig); + + /* This is used in pre-MLO API so use deflink */ + iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink); } static inline void @@ -889,8 +910,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, if (link_info->phy_ctxt && link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ) - iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif, - link_id); + iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif, + link_id); /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics @@ -900,7 +921,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, mvmvif->link[link_id]->beacon_stats.num_beacons; sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); - iwl_mvm_update_vif_sig(bss_conf->vif, sig); + iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info); if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX, "invalid mvmvif id: %d", mvmvif->id)) @@ -930,6 +951,89 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm, } } +#define SEC_LINK_MIN_PERC 10 +#define SEC_LINK_MIN_TX 3000 +#define SEC_LINK_MIN_RX 400 + +static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) +{ + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_sta *mvmsta; + unsigned long total_tx = 0, total_rx = 0; + unsigned long sec_link_tx = 0, sec_link_rx = 0; + u8 sec_link_tx_perc, sec_link_rx_perc; + u8 sec_link; + + lockdep_assert_held(&mvm->mutex); + + if (!bss_vif) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(bss_vif); + + if (!mvmvif->esr_active || !mvmvif->ap_sta) + return; + + mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta); + /* We only count for the AP sta in a MLO connection */ + if (!mvmsta->mpdu_counters) + return; + + /* Get the FW ID of the secondary link */ + sec_link = iwl_mvm_get_other_link(bss_vif, + iwl_mvm_get_primary_link(bss_vif)); + if (WARN_ON(!mvmvif->link[sec_link])) + return; + sec_link = mvmvif->link[sec_link]->fw_link_id; + + /* Sum up RX and TX MPDUs from the different queues/links */ + for (int q = 0; q < mvm->trans->num_rx_queues; q++) { + spin_lock_bh(&mvmsta->mpdu_counters[q].lock); + + /* The link IDs that doesn't exist will contain 0 */ + for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) { + total_tx += mvmsta->mpdu_counters[q].per_link[link].tx; + total_rx += mvmsta->mpdu_counters[q].per_link[link].rx; + } + + sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx; + sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx; + + /* + * In EMLSR we have statistics every 5 seconds, so we can reset + * the counters upon every statistics notification. + */ + memset(mvmsta->mpdu_counters[q].per_link, 0, + sizeof(mvmsta->mpdu_counters[q].per_link)); + + spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); + } + + /* If we don't have enough MPDUs - exit EMLSR */ + if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH && + total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) { + iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT, + iwl_mvm_get_primary_link(bss_vif)); + return; + } + + /* Calculate the percentage of the secondary link TX/RX */ + sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; + sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; + + /* + * The TX/RX percentage is checked only if it exceeds the required + * minimum. In addition, RX is checked only if the TX check failed. + */ + if ((total_tx > SEC_LINK_MIN_TX && + sec_link_tx_perc < SEC_LINK_MIN_PERC) || + (total_rx > SEC_LINK_MIN_RX && + sec_link_rx_perc < SEC_LINK_MIN_PERC)) + iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE, + iwl_mvm_get_primary_link(bss_vif)); +} + void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -957,6 +1061,8 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm, ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy); + + iwl_mvm_update_esr_mode_tpt(mvm); } void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index ce8d83c771a7..d78af2928152 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -1890,21 +1890,6 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb, } } -static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band) -{ - switch (phy_band) { - case PHY_BAND_24: - return NL80211_BAND_2GHZ; - case PHY_BAND_5: - return NL80211_BAND_5GHZ; - case PHY_BAND_6: - return NL80211_BAND_6GHZ; - default: - WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); - return NL80211_BAND_5GHZ; - } -} - struct iwl_rx_sta_csa { bool all_sta_unblocked; struct ieee80211_vif *vif; @@ -2050,6 +2035,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_link_sta *link_sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; + u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); size_t desc_size; struct iwl_mvm_rx_phy_data phy_data = {}; u32 format; @@ -2168,7 +2154,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (iwl_mvm_is_band_in_rx_supported(mvm)) { u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx); - rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band); + rx_status->band = iwl_mvm_nl80211_band_from_phy(band); } else { rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; @@ -2198,13 +2184,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { - u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); - - if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { - sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); + if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR(sta)) sta = NULL; - link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]); + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]); if (sta && sta->valid_links && link_sta) { rx_status->link_valid = 1; @@ -2325,6 +2309,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } + + if (ieee80211_is_data(hdr->frame_control)) { + u8 sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + /* 0 means not an A-MSDU, and 1 means a new A-MSDU */ + if (!sub_frame_idx || sub_frame_idx == 1) + iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false, + queue); + } } /* management stuff on default queue */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 11559563ae38..a7ec172eeade 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -226,6 +226,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, .global_cnt = 0, }; + /* + * A scanning AP interface probably wants to generate a survey to do + * ACS (automatic channel selection). + * Force a non-fragmented scan in that case. + */ + if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP) + return IWL_SCAN_TYPE_WILD; + ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_iterator, @@ -852,11 +860,13 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, * 4. it's not a p2p find operation. * 5. we are not in low latency mode, * or if fragmented ebs is supported by the FW + * 6. the VIF is not an AP interface (scan wants survey results) */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE && - (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); + (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) && + ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP); } static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) @@ -1377,11 +1387,14 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } -static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) +static u32 iwl_mvm_scan_umac_ooc_priority(int type) { - return iwl_mvm_is_regular_scan(params) ? - IWL_SCAN_PRIORITY_EXT_6 : - IWL_SCAN_PRIORITY_EXT_2; + if (type == IWL_MVM_SCAN_REGULAR) + return IWL_SCAN_PRIORITY_EXT_6; + if (type == IWL_MVM_SCAN_INT_MLO) + return IWL_SCAN_PRIORITY_EXT_4; + + return IWL_SCAN_PRIORITY_EXT_2; } static void @@ -1747,8 +1760,9 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, &cp->channel_config[ch_cnt]; u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; - u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; - bool force_passive, found = false, allow_passive = true, + u8 j, k, n_s_ssids = 0, n_bssids = 0; + u8 max_s_ssids, max_bssids; + bool force_passive = false, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; @@ -1771,20 +1785,15 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, cfg->v5.iter_count = 1; cfg->v5.iter_interval = 0; - /* - * The optimize the scan time, i.e., reduce the scan dwell time - * on each channel, the below logic tries to set 3 direct BSSID - * probe requests for each broadcast probe request with a short - * SSID. - * TODO: improve this logic - */ - n_used_bssid_entries = 3; for (j = 0; j < params->n_6ghz_params; j++) { s8 tmp_psd_20; if (!(scan_6ghz_params[j].channel_idx == i)) continue; + unsolicited_probe_on_chan |= + scan_6ghz_params[j].unsolicited_probe; + /* Use the highest PSD value allowed as advertised by * APs for this channel */ @@ -1796,12 +1805,69 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, psd_20 < tmp_psd_20)) psd_20 = tmp_psd_20; - found = false; - unsolicited_probe_on_chan |= - scan_6ghz_params[j].unsolicited_probe; psc_no_listen |= scan_6ghz_params[j].psc_no_listen; + } - for (k = 0; k < pp->short_ssid_num; k++) { + /* + * In the following cases apply passive scan: + * 1. Non fragmented scan: + * - PSC channel with NO_LISTEN_FLAG on should be treated + * like non PSC channel + * - Non PSC channel with more than 3 short SSIDs or more + * than 9 BSSIDs. + * - Non PSC Channel with unsolicited probe response and + * more than 2 short SSIDs or more than 6 BSSIDs. + * - PSC channel with more than 2 short SSIDs or more than + * 6 BSSIDs. + * 3. Fragmented scan: + * - PSC channel with more than 1 SSID or 3 BSSIDs. + * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. + * - Non PSC channel with unsolicited probe response and + * more than 1 SSID or more than 3 BSSIDs. + */ + if (!iwl_mvm_is_scan_fragmented(params->type)) { + if (!cfg80211_channel_is_psc(params->channels[i]) || + flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { + if (unsolicited_probe_on_chan) { + max_s_ssids = 2; + max_bssids = 6; + } else { + max_s_ssids = 3; + max_bssids = 9; + } + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } else if (cfg80211_channel_is_psc(params->channels[i])) { + max_s_ssids = 1; + max_bssids = 3; + } else { + if (unsolicited_probe_on_chan) { + max_s_ssids = 1; + max_bssids = 3; + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } + + /* + * The optimize the scan time, i.e., reduce the scan dwell time + * on each channel, the below logic tries to set 3 direct BSSID + * probe requests for each broadcast probe request with a short + * SSID. + * TODO: improve this logic + */ + for (j = 0; j < params->n_6ghz_params; j++) { + if (!(scan_6ghz_params[j].channel_idx == i)) + continue; + + found = false; + + for (k = 0; + k < pp->short_ssid_num && n_s_ssids < max_s_ssids; + k++) { if (!scan_6ghz_params[j].unsolicited_probe && le32_to_cpu(pp->short_ssid[k]) == scan_6ghz_params[j].short_ssid) { @@ -1812,25 +1878,25 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, } /* - * Use short SSID only to create a new - * iteration during channel dwell or in - * case that the short SSID has a - * matching SSID, i.e., scan for hidden - * APs. + * Prefer creating BSSID entries unless + * the short SSID probe can be done in + * the same channel dwell iteration. + * + * We also need to create a short SSID + * entry for any hidden AP. */ - if (n_used_bssid_entries >= 3) { - s_ssid_bitmap |= BIT(k); - s_max++; - n_used_bssid_entries -= 3; - found = true; + if (3 * n_s_ssids > n_bssids && + !pp->direct_scan[k].len) break; - } else if (pp->direct_scan[k].len) { - s_ssid_bitmap |= BIT(k); - s_max++; - found = true; + + /* Hidden AP, cannot do passive scan */ + if (pp->direct_scan[k].len) allow_passive = false; - break; - } + + s_ssid_bitmap |= BIT(k); + n_s_ssids++; + found = true; + break; } } @@ -1842,9 +1908,12 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, scan_6ghz_params[j].bssid, ETH_ALEN)) { if (!(bssid_bitmap & BIT(k))) { - bssid_bitmap |= BIT(k); - b_max++; - n_used_bssid_entries++; + if (n_bssids < max_bssids) { + bssid_bitmap |= BIT(k); + n_bssids++; + } else { + force_passive = TRUE; + } } break; } @@ -1858,39 +1927,6 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, if (unsolicited_probe_on_chan) flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES; - /* - * In the following cases apply passive scan: - * 1. Non fragmented scan: - * - PSC channel with NO_LISTEN_FLAG on should be treated - * like non PSC channel - * - Non PSC channel with more than 3 short SSIDs or more - * than 9 BSSIDs. - * - Non PSC Channel with unsolicited probe response and - * more than 2 short SSIDs or more than 6 BSSIDs. - * - PSC channel with more than 2 short SSIDs or more than - * 6 BSSIDs. - * 3. Fragmented scan: - * - PSC channel with more than 1 SSID or 3 BSSIDs. - * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. - * - Non PSC channel with unsolicited probe response and - * more than 1 SSID or more than 3 BSSIDs. - */ - if (!iwl_mvm_is_scan_fragmented(params->type)) { - if (!cfg80211_channel_is_psc(params->channels[i]) || - flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { - force_passive = (s_max > 3 || b_max > 9); - force_passive |= (unsolicited_probe_on_chan && - (s_max > 2 || b_max > 6)); - } else { - force_passive = (s_max > 2 || b_max > 6); - } - } else if (cfg80211_channel_is_psc(params->channels[i])) { - force_passive = (s_max > 1 || b_max > 3); - } else { - force_passive = (s_max > 2 || b_max > 6); - force_passive |= (unsolicited_probe_on_chan && - (s_max > 1 || b_max > 3)); - } if ((allow_passive && force_passive) || (!(bssid_bitmap | s_ssid_bitmap) && !cfg80211_channel_is_psc(params->channels[i]))) @@ -2098,7 +2134,8 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, - struct ieee80211_vif *vif, int type) + struct ieee80211_vif *vif, int type, + u16 gen_flags) { u8 flags = 0; @@ -2118,6 +2155,13 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT)) flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT; + /* Passive and AP interface -> ACS (automatic channel selection) */ + if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE && + ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP && + iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF, + 0) >= 1) + flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS; + return flags; } @@ -2255,8 +2299,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_dwell(mvm, cmd, params); - mvm->scan_uid_status[uid] = type; - cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); cmd->general_flags = cpu_to_le16(gen_flags); @@ -2297,10 +2339,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule, &tail_v2->delay); - if (ret) { - mvm->scan_uid_status[uid] = 0; + if (ret) return ret; - } if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { tail_v2->preq = params->preq; @@ -2450,9 +2490,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int ret; u16 gen_flags; - mvm->scan_uid_status[uid] = type; - - cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); + cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); @@ -2487,15 +2525,14 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, u8 gen_flags2; u32 bitmap_ssid = 0; - mvm->scan_uid_status[uid] = type; - - cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); + cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); if (version >= 15) - gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type); + gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type, + gen_flags); else gen_flags2 = 0; @@ -2532,10 +2569,8 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, params->n_channels, pb, cp, vif->type, version); - if (!cp->count) { - mvm->scan_uid_status[uid] = 0; + if (!cp->count) return -EINVAL; - } if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) @@ -2915,9 +2950,11 @@ static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm, } } -int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_scan_request *req, - struct ieee80211_scan_ies *ies) +static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies, + int type) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, @@ -2935,7 +2972,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EBUSY; } - ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); + ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; @@ -2984,8 +3021,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif); - uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, - IWL_MVM_SCAN_REGULAR); + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); if (uid < 0) return uid; @@ -3000,23 +3036,35 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); iwl_mvm_resume_tcm(mvm); - mvm->scan_uid_status[uid] = 0; return ret; } - IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); - mvm->scan_status |= IWL_MVM_SCAN_REGULAR; - mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n", + type, uid); + + mvm->scan_uid_status[uid] = type; + mvm->scan_status |= type; + + if (type == IWL_MVM_SCAN_REGULAR) { + mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); + schedule_delayed_work(&mvm->scan_timeout_dwork, + msecs_to_jiffies(SCAN_TIMEOUT)); + } if (params.enable_6ghz_passive) mvm->last_6ghz_passive_scan_jiffies = jiffies; - schedule_delayed_work(&mvm->scan_timeout_dwork, - msecs_to_jiffies(SCAN_TIMEOUT)); - return 0; } +int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + return _iwl_mvm_single_scan_start(mvm, vif, req, ies, + IWL_MVM_SCAN_REGULAR); +} + int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -3133,7 +3181,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { IWL_DEBUG_SCAN(mvm, - "Sched scan request was sent successfully\n"); + "Sched scan request send success: type=%u, uid=%u\n", + type, uid); + mvm->scan_uid_status[uid] = type; mvm->scan_status |= type; } else { /* If the scan failed, it usually means that the FW was unable @@ -3141,7 +3191,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, * should try to send the command again with different params. */ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); - mvm->scan_uid_status[uid] = 0; mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } @@ -3155,9 +3204,25 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + bool select_links = false; mvm->mei_scan_filter.is_mei_limited_scan = false; + IWL_DEBUG_SCAN(mvm, + "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n", + uid, mvm->scan_uid_status[uid], + notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? + "completed" : "aborted", + iwl_mvm_ebs_status_str(notif->ebs_status)); + + IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n", + mvm->scan_status); + + IWL_DEBUG_SCAN(mvm, + "Scan completed: line=%u, iter=%u, elapsed time=%u\n", + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return; @@ -3171,8 +3236,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_mvm_vif_link_info *link_info = scan_vif->link[mvm->scan_link_id]; - if (!WARN_ON(!link_info)) + /* It is possible that by the time the scan is complete the link + * was already removed and is not valid. + */ + if (link_info) memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN); + else + IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n"); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; @@ -3181,25 +3251,28 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; + } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n"); + /* + * Other scan types won't necessarily scan for the MLD links channels. + * Therefore, only select links after successful internal scan. + */ + select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; - IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %u, status %s, EBS status %s\n", - uid, mvm->scan_uid_status[uid], - notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? - "completed" : "aborted", - iwl_mvm_ebs_status_str(notif->ebs_status)); - IWL_DEBUG_SCAN(mvm, - "Last line %d, Last iteration %d, Time from last iteration %d\n", - notif->last_schedule, notif->last_iter, - __le32_to_cpu(notif->time_from_last_iter)); + + IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n", + mvm->scan_status); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; + + if (select_links) + wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk); } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, @@ -3367,6 +3440,12 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO); + if (uid >= 0) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n"); + mvm->scan_uid_status[uid] = 0; + } + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_STOPPING_REGULAR); if (uid >= 0) @@ -3377,6 +3456,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) if (uid >= 0) mvm->scan_uid_status[uid] = 0; + uid = iwl_mvm_scan_uid_by_status(mvm, + IWL_MVM_SCAN_STOPPING_INT_MLO); + if (uid >= 0) + mvm->scan_uid_status[uid] = 0; + /* We shouldn't have any UIDs still set. Loop over all the * UIDs to make sure there's nothing left there and warn if * any is found. @@ -3413,6 +3497,10 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { int ret; + IWL_DEBUG_SCAN(mvm, + "Request to stop scan: type=0x%x, status=0x%x\n", + type, mvm->scan_status); + if (!(mvm->scan_status & type)) return 0; @@ -3424,6 +3512,9 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) ret = iwl_mvm_scan_stop_wait(mvm, type); if (!ret) mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; + else + IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n"); + out: /* Clear the scan status so the next scan requests will * succeed and mark the scan as stopping, so that the Rx @@ -3448,3 +3539,276 @@ out: return ret; } + +static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel **channels, + size_t n_channels) +{ + struct cfg80211_scan_request *req = NULL; + struct ieee80211_scan_ies ies = {}; + size_t size, i; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n", + n_channels); + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return -EINVAL; + + size = struct_size(req, channels, n_channels); + req = kzalloc(size, GFP_KERNEL); + if (!req) + return -ENOMEM; + + /* set the requested channels */ + for (i = 0; i < n_channels; i++) + req->channels[i] = channels[i]; + + req->n_channels = n_channels; + + /* set the rates */ + for (i = 0; i < NUM_NL80211_BANDS; i++) + if (mvm->hw->wiphy->bands[i]) + req->rates[i] = + (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1; + + req->wdev = ieee80211_vif_to_wdev(vif); + req->wiphy = mvm->hw->wiphy; + req->scan_start = jiffies; + req->tsf_report_link_id = -1; + + ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies, + IWL_MVM_SCAN_INT_MLO); + kfree(req); + + IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret); + return ret; +} + +int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS]; + unsigned long usable_links = ieee80211_vif_usable_links(vif); + size_t n_channels = 0; + u8 link_id; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n"); + return -EBUSY; + } + + rcu_read_lock(); + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + channels[n_channels++] = link_conf->chanreq.oper.chan; + } + + rcu_read_unlock(); + + if (!n_channels) + return -EINVAL; + + return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels); +} + +static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm, + enum nl80211_band band, + u16 phy_chan_num) +{ + struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band]; + int chan_idx; + + if (WARN_ON_ONCE(!sband)) + return -EINVAL; + + for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) { + struct ieee80211_channel *channel = &sband->channels[chan_idx]; + + if (channel->hw_value == phy_chan_num) + return chan_idx; + } + + return -EINVAL; +} + +static u32 iwl_mvm_div_by_db(u32 value, u8 db) +{ + /* + * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping + * at 10 dB and looping instead of using a much larger table. + * + * Using 64 bit math is overkill, but means the helper does not require + * a limit on the input range. + */ + static const u32 db_to_val[] = { + 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89, + 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a, + }; + + while (value && db > 0) { + u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val)); + + value = (((u64)value) * db_to_val[change - 1]) >> 32; + + db -= change; + } + + return value; +} + +VISIBLE_IF_IWLWIFI_KUNIT s8 +iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif) +{ + s8 average_magnitude; + u32 average_factor; + s8 sum_magnitude = -128; + u32 sum_factor = 0; + int i, count = 0; + + /* + * To properly average the decibel values (signal values given in dBm) + * we need to do the math in linear space. Doing a linear average of + * dB (dBm) values is a bit annoying though due to the large range of + * at least -10 to -110 dBm that will not fit into a 32 bit integer. + * + * A 64 bit integer should be sufficient, but then we still have the + * problem that there are no directly usable utility functions + * available. + * + * So, lets not deal with that and instead do much of the calculation + * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit + * gives us plenty of head-room for adding up a few values and even + * doing some math on it. And the tail should be accurate enough too + * (1/2^16 is somewhere around -48 dB, so effectively zero). + * + * i.e. the real value of sum is: + * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW + * + * However, that does mean we need to be able to bring two values to + * a common base, so we need a helper for that. + * + * Note that this function takes an input with unsigned negative dBm + * values but returns a signed dBm (i.e. a negative value). + */ + + for (i = 0; i < ARRAY_SIZE(notif->noise); i++) { + s8 val_magnitude; + u32 val_factor; + + if (notif->noise[i] == 0xff) + continue; + + val_factor = 0x10000; + val_magnitude = -notif->noise[i]; + + if (val_magnitude <= sum_magnitude) { + u8 div_db = sum_magnitude - val_magnitude; + + val_factor = iwl_mvm_div_by_db(val_factor, div_db); + val_magnitude = sum_magnitude; + } else { + u8 div_db = val_magnitude - sum_magnitude; + + sum_factor = iwl_mvm_div_by_db(sum_factor, div_db); + sum_magnitude = val_magnitude; + } + + sum_factor += val_factor; + count++; + } + + /* No valid noise measurement, return a very high noise level */ + if (count == 0) + return 0; + + average_magnitude = sum_magnitude; + average_factor = sum_factor / count; + + /* + * average_factor will be a number smaller than 1.0 (0x10000) at this + * point. What we need to do now is to adjust average_magnitude so that + * average_factor is between -0.5 dB and 0.5 dB. + * + * Just do -1 dB steps and find the point where + * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB + * = div_by_db(0xe429, i) + * is smaller than average_factor. + */ + for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) { + /* nothing */ + } + + return average_magnitude - i; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values); + +void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct iwl_umac_scan_channel_survey_notif *notif = + (void *)pkt->data; + struct iwl_mvm_acs_survey_channel *info; + enum nl80211_band band; + int chan_idx; + + lockdep_assert_held(&mvm->mutex); + + if (!mvm->acs_survey) { + size_t n_channels = 0; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!mvm->hw->wiphy->bands[band]) + continue; + + n_channels += mvm->hw->wiphy->bands[band]->n_channels; + } + + mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey, + channels, n_channels), + GFP_KERNEL); + + if (!mvm->acs_survey) + return; + + mvm->acs_survey->n_channels = n_channels; + n_channels = 0; + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (!mvm->hw->wiphy->bands[band]) + continue; + + mvm->acs_survey->bands[band] = + &mvm->acs_survey->channels[n_channels]; + n_channels += mvm->hw->wiphy->bands[band]->n_channels; + } + } + + band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band)); + chan_idx = iwl_mvm_chanidx_from_phy(mvm, band, + le32_to_cpu(notif->channel)); + if (WARN_ON_ONCE(chan_idx < 0)) + return; + + IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n", + mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq); + + info = &mvm->acs_survey->bands[band][chan_idx]; + + /* Times are all in ms */ + info->time = le32_to_cpu(notif->active_time); + info->time_busy = le32_to_cpu(notif->busy_time); + info->time_rx = le32_to_cpu(notif->rx_time); + info->time_tx = le32_to_cpu(notif->tx_time); + info->noise = iwl_mvm_average_dbm_values(notif); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 491c449fd431..20d4968d692a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1847,6 +1847,18 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); + /* MPDUs are counted only when EMLSR is possible */ + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !sta->tdls && ieee80211_vif_is_mld(vif)) { + mvm_sta->mpdu_counters = + kcalloc(mvm->trans->num_rx_queues, + sizeof(*mvm_sta->mpdu_counters), + GFP_KERNEL); + if (mvm_sta->mpdu_counters) + for (int q = 0; q < mvm->trans->num_rx_queues; q++) + spin_lock_init(&mvm_sta->mpdu_counters[q].lock); + } + return 0; } @@ -4392,3 +4404,77 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "Failed to cancel the channel switch\n"); } + +static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif, + u8 fw_sta_id) +{ + struct ieee80211_link_sta *link_sta = + rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]); + struct iwl_mvm_vif_link_info *link; + + if (WARN_ON_ONCE(!link_sta)) + return -EINVAL; + + link = mvmvif->link[link_sta->link_id]; + + if (WARN_ON_ONCE(!link)) + return -EINVAL; + + return link->fw_link_id; +} + +#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ) + +void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, + bool tx, int queue) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif); + struct iwl_mvm_tpt_counter *queue_counter; + struct iwl_mvm_mpdu_counter *link_counter; + u32 total_mpdus = 0; + int fw_link_id; + + /* Count only for a BSS sta, and only when EMLSR is possible */ + if (!mvm_sta->mpdu_counters) + return; + + /* Map sta id to link id */ + fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id); + if (fw_link_id < 0) + return; + + queue_counter = &mvm_sta->mpdu_counters[queue]; + link_counter = &queue_counter->per_link[fw_link_id]; + + spin_lock_bh(&queue_counter->lock); + + if (tx) + link_counter->tx += count; + else + link_counter->rx += count; + + /* + * When not in EMLSR, the window and the decision to enter EMLSR are + * handled during counting, when in EMLSR - in the statistics flow + */ + if (mvmvif->esr_active) + goto out; + + if (time_is_before_jiffies(queue_counter->window_start + + IWL_MVM_TPT_COUNT_WINDOW)) { + memset(queue_counter->per_link, 0, + sizeof(queue_counter->per_link)); + queue_counter->window_start = jiffies; + } + + for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++) + total_mpdus += tx ? queue_counter->per_link[i].tx : + queue_counter->per_link[i].rx; + + if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH) + wiphy_work_queue(mvmvif->mvm->hw->wiphy, + &mvmvif->unblock_esr_tpt_wk); + +out: + spin_unlock_bh(&queue_counter->lock); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index b3450569864e..264f1f9394b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -347,6 +347,24 @@ struct iwl_mvm_link_sta { u8 avg_energy; }; +struct iwl_mvm_mpdu_counter { + u32 tx; + u32 rx; +}; + +/** + * struct iwl_mvm_tpt_counter - per-queue MPDU counter + * + * @lock: Needed to protect the counters when modified from statistics. + * @per_link: per-link counters. + * @window_start: timestamp of the counting-window start + */ +struct iwl_mvm_tpt_counter { + spinlock_t lock; + struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID]; + unsigned long window_start; +} ____cacheline_aligned_in_smp; + /** * struct iwl_mvm_sta - representation of a station in the driver * @vif: the interface the station belongs to @@ -394,6 +412,7 @@ struct iwl_mvm_link_sta { * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO, * link[0] points to deflink and link[link_id] is allocated when new link * sta is added. + * @mpdu_counters: RX/TX MPDUs counters for each queue. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -433,6 +452,8 @@ struct iwl_mvm_sta { struct iwl_mvm_link_sta deflink; struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + struct iwl_mvm_tpt_counter *mpdu_counters; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); @@ -514,6 +535,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, + bool tx, int queue); + /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile new file mode 100644 index 000000000000..6bd56a28cffd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile @@ -0,0 +1,3 @@ +iwlmvm-tests-y += module.o links.o scan.o + +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c new file mode 100644 index 000000000000..f49e3c98b1ba --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include <net/mac80211.h> +#include "../mvm.h" +#include <kunit/test.h> + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); + +static struct wiphy wiphy = { + .mtx = __MUTEX_INITIALIZER(wiphy.mtx), +}; + +static struct ieee80211_hw hw = { + .wiphy = &wiphy, +}; + +static struct ieee80211_channel chan_5ghz = { + .band = NL80211_BAND_5GHZ, +}; + +static struct ieee80211_channel chan_6ghz = { + .band = NL80211_BAND_6GHZ, +}; + +static struct ieee80211_channel chan_2ghz = { + .band = NL80211_BAND_2GHZ, +}; + +static struct cfg80211_chan_def chandef_a = {}; + +static struct cfg80211_chan_def chandef_b = {}; + +static struct iwl_mvm_phy_ctxt ctx = {}; + +static struct iwl_mvm_vif_link_info mvm_link = { + .phy_ctxt = &ctx, + .active = true +}; + +static struct cfg80211_bss bss = {}; + +static struct ieee80211_bss_conf link_conf = {.bss = &bss}; + +static const struct iwl_fw_cmd_version entry = { + .group = LEGACY_GROUP, + .cmd = BT_PROFILE_NOTIFICATION, + .notif_ver = 4 +}; + +static struct iwl_fw fw = { + .ucode_capa = { + .n_cmd_versions = 1, + .cmd_versions = &entry, + }, +}; + +static struct iwl_mvm mvm = { + .hw = &hw, + .fw = &fw, +}; + +static const struct link_grading_case { + const char *desc; + const struct cfg80211_chan_def chandef; + s32 signal; + s16 channel_util; + int chan_load_by_us; + unsigned int grade; +} link_grading_cases[] = { + { + .desc = "UHB, RSSI below range, no factors", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -100, + .grade = 177, + }, + { + .desc = "LB, RSSI in range, no factors", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -84, + .grade = 344, + }, + { + .desc = "HB, RSSI above range, no factors", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -50, + .grade = 3442, + }, + { + .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 51, + .grade = 1836, + }, + { + .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -61, + .channel_util = 51, + .chan_load_by_us = 10, + .grade = 2061, + }, + { + .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 102, + .chan_load_by_us = 50, + .grade = 1552, + }, + { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_80, + .punctured = 0x0000 + }, + .signal = -72, + .grade = 1750, + }, + { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1312, + }, + { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_320, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1806, + }, + { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .channel_util = 51, + .chan_load_by_us = 10, + .signal = -72, + .grade = 1179, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc) + +static void setup_link_conf(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + size_t vif_size = sizeof(struct ieee80211_vif) + + sizeof(struct iwl_mvm_vif); + struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL); + struct ieee80211_bss_load_elem *bss_load; + struct element *element; + size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element); + struct cfg80211_bss_ies *ies; + struct iwl_mvm_vif *mvmvif; + + KUNIT_ASSERT_NOT_NULL(test, vif); + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (params->chan_load_by_us > 0) { + ctx.channel_load_by_us = params->chan_load_by_us; + mvmvif->link[0] = &mvm_link; + } + + link_conf.vif = vif; + link_conf.chanreq.oper = params->chandef; + bss.signal = DBM_TO_MBM(params->signal); + + ies = kunit_kzalloc(test, ies_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ies); + ies->len = sizeof(*bss_load) + sizeof(struct element); + + element = (void *)ies->data; + element->datalen = sizeof(*bss_load); + element->id = 11; + + bss_load = (void *)element->data; + bss_load->channel_util = params->channel_util; + + rcu_assign_pointer(bss.ies, ies); +} + +static void test_link_grading(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + unsigned int ret; + + setup_link_conf(test); + + rcu_read_lock(); + ret = iwl_mvm_get_link_grade(&link_conf); + rcu_read_unlock(); + + KUNIT_EXPECT_EQ(test, ret, params->grade); + + kunit_kfree(test, link_conf.vif); + RCU_INIT_POINTER(bss.ies, NULL); +} + +static struct kunit_case link_grading_test_cases[] = { + KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params), + {} +}; + +static struct kunit_suite link_grading = { + .name = "iwlmvm-link-grading", + .test_cases = link_grading_test_cases, +}; + +kunit_test_suite(link_grading); + +static const struct valid_link_pair_case { + const char *desc; + bool bt; + struct ieee80211_channel *chan_a; + struct ieee80211_channel *chan_b; + enum nl80211_chan_width cw_a; + enum nl80211_chan_width cw_b; + s32 sig_a; + s32 sig_b; + bool csa_a; + bool valid; +} valid_link_pair_cases[] = { + { + .desc = "HB + UHB, valid.", + .chan_a = &chan_6ghz, + .chan_b = &chan_5ghz, + .valid = true, + }, + { + .desc = "LB + HB, no BT.", + .chan_a = &chan_2ghz, + .chan_b = &chan_5ghz, + .valid = false, + }, + { + .desc = "LB + HB, with BT.", + .bt = true, + .chan_a = &chan_2ghz, + .chan_b = &chan_5ghz, + .valid = false, + }, + { + .desc = "Same band", + .chan_a = &chan_2ghz, + .chan_b = &chan_2ghz, + .valid = false, + }, + { + .desc = "RSSI: LB, 20 MHz, low", + .chan_a = &chan_2ghz, + .cw_a = NL80211_CHAN_WIDTH_20, + .sig_a = -68, + .chan_b = &chan_5ghz, + .valid = false, + }, + { + .desc = "RSSI: UHB, 20 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_20, + .sig_a = -66, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_20, + .valid = true, + }, + { + .desc = "RSSI: UHB, 40 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_40, + .sig_a = -65, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_40, + .valid = false, + }, + { + .desc = "RSSI: UHB, 40 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_40, + .sig_a = -63, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_40, + .valid = true, + }, + { + .desc = "RSSI: UHB, 80 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_80, + .sig_a = -62, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_80, + .valid = false, + }, + { + .desc = "RSSI: UHB, 80 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_80, + .sig_a = -60, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_80, + .valid = true, + }, + { + .desc = "RSSI: UHB, 160 MHz, low", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -59, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = false, + }, + { + .desc = "RSSI: HB, 160 MHz, high", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -5, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = true, + }, + { + .desc = "CSA active", + .chan_a = &chan_6ghz, + .cw_a = NL80211_CHAN_WIDTH_160, + .sig_a = -5, + .chan_b = &chan_5ghz, + .cw_b = NL80211_CHAN_WIDTH_160, + .valid = false, + /* same as previous entry with valid=true except for CSA */ + .csa_a = true, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc) + +static void test_valid_link_pair(struct kunit *test) +{ + const struct valid_link_pair_case *params = test->param_value; + size_t vif_size = sizeof(struct ieee80211_vif) + + sizeof(struct iwl_mvm_vif); + struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL); + struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans), + GFP_KERNEL); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_link_sel_data link_a = { + .chandef = &chandef_a, + .link_id = 1, + .signal = params->sig_a, + }; + struct iwl_mvm_link_sel_data link_b = { + .chandef = &chandef_b, + .link_id = 5, + .signal = params->sig_b, + }; + struct ieee80211_bss_conf *conf; + bool result; + + KUNIT_ASSERT_NOT_NULL(test, vif); + KUNIT_ASSERT_NOT_NULL(test, trans); + + chandef_a.chan = params->chan_a; + chandef_b.chan = params->chan_b; + + chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20; + chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20; + +#ifdef CONFIG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES + trans->dbg_cfg = default_dbg_config; +#endif + mvm.trans = trans; + + mvm.last_bt_notif.wifi_loss_low_rssi = params->bt; + mvmvif->mvm = &mvm; + + conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, conf); + conf->chanreq.oper = chandef_a; + conf->csa_active = params->csa_a; + vif->link_conf[link_a.link_id] = (void __rcu *)conf; + + conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, conf); + conf->chanreq.oper = chandef_b; + vif->link_conf[link_b.link_id] = (void __rcu *)conf; + + wiphy_lock(&wiphy); + result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b); + wiphy_unlock(&wiphy); + + KUNIT_EXPECT_EQ(test, result, params->valid); + + kunit_kfree(test, vif); + kunit_kfree(test, trans); +} + +static struct kunit_case valid_link_pair_test_cases[] = { + KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params), + {}, +}; + +static struct kunit_suite valid_link_pair = { + .name = "iwlmvm-valid-link-pair", + .test_cases = valid_link_pair_test_cases, +}; + +kunit_test_suite(valid_link_pair); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c new file mode 100644 index 000000000000..f556acbac77f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is just module boilerplate for the iwlmvm kunit module. + * + * Copyright (C) 2024 Intel Corporation + */ +#include <linux/module.h> + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("kunit tests for iwlmvm"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c new file mode 100644 index 000000000000..d3b6a57c3ebe --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include <net/mac80211.h> +#include "../mvm.h" +#include <kunit/test.h> + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); + +static const struct acs_average_db_case { + const char *desc; + u8 neg_dbm[22]; + s8 result; +} acs_average_db_cases[] = { + { + .desc = "Smallest possible value, all filled", + .neg_dbm = { + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 128, 128 + }, + .result = -128, + }, + { + .desc = "Biggest possible value, all filled", + .neg_dbm = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, + }, + .result = 0, + }, + { + .desc = "Smallest possible value, partial filled", + .neg_dbm = { + 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -128, + }, + { + .desc = "Biggest possible value, partial filled", + .neg_dbm = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = 0, + }, + { + .desc = "Adding -80dBm to -75dBm until it is still rounded to -79dBm", + .neg_dbm = { + 75, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -79, + }, + { + .desc = "Adding -80dBm to -75dBm until it is just rounded to -80dBm", + .neg_dbm = { + 75, 80, 80, 80, 80, 80, 80, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, + 0xff, 0xff, + }, + .result = -80, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(acs_average_db, acs_average_db_cases, desc) + +static void test_acs_average_db(struct kunit *test) +{ + const struct acs_average_db_case *params = test->param_value; + struct iwl_umac_scan_channel_survey_notif notif; + int i; + + /* Test the values in the given order */ + for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++) + notif.noise[i] = params->neg_dbm[i]; + KUNIT_ASSERT_EQ(test, + iwl_mvm_average_dbm_values(¬if), + params->result); + + /* Test in reverse order */ + for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++) + notif.noise[ARRAY_SIZE(params->neg_dbm) - i - 1] = + params->neg_dbm[i]; + KUNIT_ASSERT_EQ(test, + iwl_mvm_average_dbm_values(¬if), + params->result); +} + +static struct kunit_case acs_average_db_case[] = { + KUNIT_CASE_PARAM(test_acs_average_db, acs_average_db_gen_params), + {} +}; + +static struct kunit_suite acs_average_db = { + .name = "iwlmvm-acs-average-db", + .test_cases = acs_average_db_case, +}; + +kunit_test_suite(acs_average_db); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index ad960faceb0d..8ee4498f4245 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -47,6 +47,10 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) { + struct ieee80211_vif *vif = mvm->p2p_device_vif; + + lockdep_assert_held(&mvm->mutex); + /* * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. @@ -70,9 +74,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) * not really racy. */ - if (!WARN_ON(!mvm->p2p_device_vif)) { - struct ieee80211_vif *vif = mvm->p2p_device_vif; - + if (!WARN_ON(!vif)) { mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, mvmvif->deflink.bcast_sta.tfd_queue_msk); @@ -106,6 +108,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) if (mvm->mld_api_is_used) { iwl_mvm_mld_rm_aux_sta(mvm); + mutex_unlock(&mvm->mutex); return; } @@ -115,6 +118,10 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); } + + mutex_unlock(&mvm->mutex); + if (vif) + iwl_mvm_esr_non_bss_link(mvm, vif, 0, false); } void iwl_mvm_roc_done_wk(struct work_struct *wk) @@ -122,8 +129,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); mutex_lock(&mvm->mutex); + /* Mutex is released inside */ iwl_mvm_cleanup_roc(mvm); - mutex_unlock(&mvm->mutex); } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) @@ -1220,6 +1227,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_vif *mvmvif; struct iwl_mvm_time_event_data *te_data; + mutex_lock(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1263,6 +1272,8 @@ cleanup_roc: set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ? IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + + /* Mutex is released inside this function */ iwl_mvm_cleanup_roc(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 782ddc8c296b..1d695ece93e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1870,6 +1870,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); + iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); @@ -2247,9 +2248,13 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) le32_to_cpu(ba_res->tx_rate), false); } - if (mvmsta) + if (mvmsta) { iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); + + iwl_mvm_count_mpdu(mvmsta, sta_id, + le16_to_cpu(ba_res->txed), true, 0); + } rcu_read_unlock(); return; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index ab56ff87c6f9..47283a358ffd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -344,6 +344,26 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, return true; } +#define PERIODIC_STAT_RATE 5 + +int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable) +{ + u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK; + u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0; + struct iwl_system_statistics_cmd system_cmd = { + .cfg_mask = cpu_to_le32(flags), + .config_time_sec = cpu_to_le32(enable ? + PERIODIC_STAT_RATE : 0), + .type_id_mask = cpu_to_le32(type), + }; + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(SYSTEM_GROUP, + SYSTEM_STATISTICS_CMD), + 0, sizeof(system_cmd), &system_cmd); +} + static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear, u8 cmd_ver) { @@ -415,6 +435,13 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) IWL_FW_CMD_VER_UNKNOWN); int ret; + /* + * Don't request statistics during restart, they'll not have any useful + * information right after restart, nor is clearing needed + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return 0; + if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index c8fc8b4fd85c..ebf11f276b20 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,13 +1,34 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ +#include <linux/dmi.h> #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" +static const struct dmi_system_id dmi_force_scu_active_approved_list[] = { + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + }, + }, + /* keep last */ + {} +}; + +static bool iwl_is_force_scu_active_approved(void) +{ + return !!dmi_check_system(dmi_force_scu_active_approved_list); +} + static void iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, struct iwl_prph_scratch_hwm_cfg *dbg_cfg, @@ -128,6 +149,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, if (trans->trans_cfg->imr_enabled) control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; + if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + iwl_is_force_scu_active_approved()) { + control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE; + IWL_DEBUG_FW(trans, + "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n", + IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE); + } + /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 4a657036b9d6..fed2754be680 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -33,7 +33,7 @@ extern int _invalid_type; .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ -static const struct pci_device_id iwl_hw_card_ids[] = { +VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ @@ -492,7 +492,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, - {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, @@ -506,6 +505,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)}, /* Sc devices */ {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, @@ -517,6 +517,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ @@ -946,11 +947,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax221_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax231_name), @@ -1002,18 +998,25 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ +/* FIXME: need to change the naming according to the actual CRF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_bz_name), + iwl_cfg_bz, iwl_fm_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_fm_name), /* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_bz_name), + iwl_cfg_gl, iwl_gl_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, @@ -1100,24 +1103,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), -/* MsP */ -/* For now we use the same FW as MR, but this will change in the future. */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_ms_a0, iwl_ax204_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_so_a0_ms_a0, iwl_ax204_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_ma, iwl_ax204_name), - /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, @@ -1150,6 +1135,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans) { u32 sd_reg_ver_addr; u32 val = 0; + u8 step; if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; @@ -1168,16 +1154,23 @@ static void get_crf_id(struct iwl_trans *iwl_trans) iwl_trans->hw_cnv_id = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); + /* For BZ-W, take B step also when A step is indicated */ + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) + step = SILICON_B_STEP; + /* In BZ, the MAC step must be read from the CNVI aux register */ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) { - u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id); + step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id); /* For BZ-U, take B step also when A step is indicated */ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) == CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) && step == SILICON_A_STEP) step = SILICON_B_STEP; + } + if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ || + CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) { iwl_trans->hw_rev_step = step; iwl_trans->hw_rev |= step; } @@ -1224,12 +1217,7 @@ static int map_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_GF: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); break; - case REG_CRF_ID_TYPE_MR: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); - break; case REG_CRF_ID_TYPE_FM: - case REG_CRF_ID_TYPE_FMI: - case REG_CRF_ID_TYPE_FMR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); break; case REG_CRF_ID_TYPE_WHP: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 7805a42948af..a7eebe400b5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -386,7 +386,7 @@ struct iwl_trans_pcie { dma_addr_t iml_dma_addr; struct iwl_trans *trans; - struct net_device napi_dev; + struct net_device *napi_dev; /* INT ICT Table */ __le32 *ict_tbl; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 9c2461ba13c5..984d7bcd381f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1000,6 +1000,11 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); +static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev) +{ + return *(struct iwl_trans_pcie **)netdev_priv(dev); +} + static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) { struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); @@ -1007,7 +1012,7 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) struct iwl_trans *trans; int ret; - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); @@ -1034,7 +1039,7 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) struct iwl_trans *trans; int ret; - trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); + trans_pcie = iwl_netdev_to_trans_pcie(napi->dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); @@ -1131,7 +1136,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) if (trans_pcie->msix_enabled) poll = iwl_pcie_napi_poll_msix; - netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + netif_napi_add(trans_pcie->napi_dev, &rxq->napi, poll); napi_enable(&rxq->napi); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 6c76b2dd6878..d5a887b3a4bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1986,13 +1986,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; - /* Initialize NAPI here - it should be before registering to mac80211 - * in the opmode but after the HW struct is allocated. - * As this function may be called again in some corner cases don't - * do anything if NAPI was already initialized. - */ - if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) - init_dummy_netdev(&trans_pcie->napi_dev); trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } @@ -2074,6 +2067,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_ict(trans); } + free_netdev(trans_pcie->napi_dev); + iwl_pcie_free_invalid_tx_cmd(trans); iwl_pcie_free_fw_monitor(trans); @@ -3594,7 +3589,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans) { - struct iwl_trans_pcie *trans_pcie; + struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; @@ -3623,6 +3618,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + */ + trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *)); + if (!trans_pcie->napi_dev) { + ret = -ENOMEM; + goto out_free_trans; + } + /* The private struct in netdev is a pointer to struct iwl_trans_pcie */ + priv = netdev_priv(trans_pcie->napi_dev); + *priv = trans_pcie; + trans_pcie->trans = trans; trans_pcie->opmode_down = true; spin_lock_init(&trans_pcie->irq_lock); @@ -3637,7 +3644,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, WQ_HIGHPRI | WQ_UNBOUND, 0); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; - goto out_free_trans; + goto out_free_ndev; } INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); @@ -3757,6 +3764,8 @@ out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: destroy_workqueue(trans_pcie->rba.alloc_wq); +out_free_ndev: + free_netdev(trans_pcie->napi_dev); out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index 7aa47fce6e2d..7361b6d0cdb8 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -2,9 +2,10 @@ /* * KUnit tests for the iwlwifi device info table * - * Copyright (C) 2023 Intel Corporation + * Copyright (C) 2023-2024 Intel Corporation */ #include <kunit/test.h> +#include <linux/pci.h> #include "iwl-drv.h" #include "iwl-config.h" @@ -41,8 +42,31 @@ static void devinfo_table_order(struct kunit *test) } } +static void devinfo_pci_ids(struct kunit *test) +{ + struct pci_dev *dev; + + dev = kunit_kmalloc(test, sizeof(*dev), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, dev); + + for (int i = 0; iwl_hw_card_ids[i].vendor; i++) { + const struct pci_device_id *s, *t; + + s = &iwl_hw_card_ids[i]; + dev->vendor = s->vendor; + dev->device = s->device; + dev->subsystem_vendor = s->subvendor; + dev->subsystem_device = s->subdevice; + dev->class = s->class; + + t = pci_match_id(iwl_hw_card_ids, dev); + KUNIT_EXPECT_PTR_EQ(test, t, s); + } +} + static struct kunit_case devinfo_test_cases[] = { KUNIT_CASE(devinfo_table_order), + KUNIT_CASE(devinfo_pci_ids), {} }; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 75f53c2f1e1f..f41048b5cd3c 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -3183,7 +3183,7 @@ static struct mwifiex_if_ops sdio_ops = { .up_dev = mwifiex_sdio_up_dev, }; -module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver); +module_sdio_driver(mwifiex_sdio); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION); @@ -3192,6 +3192,7 @@ MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8801_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index ce8fea76dbb2..d3d07bb26335 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -587,12 +587,14 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image, } struct mwl8k_cmd_pkt { - __le16 code; - __le16 length; - __u8 seq_num; - __u8 macid; - __le16 result; - char payload[]; + __struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed, + __le16 code; + __le16 length; + __u8 seq_num; + __u8 macid; + __le16 result; + ); + char payload[]; } __packed; /* @@ -2201,7 +2203,7 @@ static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable, /* Timeout firmware commands after 10s */ #define MWL8K_CMD_TIMEOUT_MS 10000 -static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) +static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd) { DECLARE_COMPLETION_ONSTACK(cmd_wait); struct mwl8k_priv *priv = hw->priv; @@ -2298,7 +2300,7 @@ exit: static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct mwl8k_cmd_pkt *cmd) + struct mwl8k_cmd_pkt_hdr *cmd) { if (vif != NULL) cmd->macid = MWL8K_VIF(vif)->macid; @@ -2350,7 +2352,7 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw) * CMD_GET_HW_SPEC (STA version). */ struct mwl8k_cmd_get_hw_spec_sta { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __u8 hw_rev; __u8 host_interface; __le16 num_mcaddrs; @@ -2499,7 +2501,7 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) * CMD_GET_HW_SPEC (AP version). */ struct mwl8k_cmd_get_hw_spec_ap { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __u8 hw_rev; __u8 host_interface; __le16 num_wcb; @@ -2593,7 +2595,7 @@ done: * CMD_SET_HW_SPEC. */ struct mwl8k_cmd_set_hw_spec { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __u8 hw_rev; __u8 host_interface; __le16 num_mcaddrs; @@ -2670,7 +2672,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) * CMD_MAC_MULTICAST_ADR. */ struct mwl8k_cmd_mac_multicast_adr { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 numaddr; __u8 addr[][ETH_ALEN]; @@ -2681,7 +2683,7 @@ struct mwl8k_cmd_mac_multicast_adr { #define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004 #define MWL8K_ENABLE_RX_BROADCAST 0x0008 -static struct mwl8k_cmd_pkt * +static struct mwl8k_cmd_pkt_hdr * __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, struct netdev_hw_addr_list *mc_list) { @@ -2718,7 +2720,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); netdev_hw_addr_list_for_each(ha, mc_list) { - memcpy(cmd->addr[i], ha->addr, ETH_ALEN); + memcpy(cmd->addr[i++], ha->addr, ETH_ALEN); } } @@ -2729,7 +2731,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, * CMD_GET_STAT. */ struct mwl8k_cmd_get_stat { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 stats[64]; } __packed; @@ -2771,7 +2773,7 @@ static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw, * CMD_RADIO_CONTROL. */ struct mwl8k_cmd_radio_control { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 control; __le16 radio_on; @@ -2832,7 +2834,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble) #define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8 struct mwl8k_cmd_rf_tx_power { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 support_level; __le16 current_level; @@ -2866,7 +2868,7 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) #define MWL8K_TX_POWER_LEVEL_TOTAL 12 struct mwl8k_cmd_tx_power { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 band; __le16 channel; @@ -2925,7 +2927,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw, * CMD_RF_ANTENNA. */ struct mwl8k_cmd_rf_antenna { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 antenna; __le16 mode; } __packed; @@ -2958,7 +2960,7 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask) * CMD_SET_BEACON. */ struct mwl8k_cmd_set_beacon { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 beacon_len; __u8 beacon[]; }; @@ -2988,7 +2990,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw, * CMD_SET_PRE_SCAN. */ struct mwl8k_cmd_set_pre_scan { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; } __packed; static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) @@ -3013,7 +3015,7 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw) * CMD_BBP_REG_ACCESS. */ struct mwl8k_cmd_bbp_reg_access { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 offset; u8 value; @@ -3054,7 +3056,7 @@ mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw, * CMD_SET_POST_SCAN. */ struct mwl8k_cmd_set_post_scan { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 isibss; __u8 bssid[ETH_ALEN]; } __packed; @@ -3142,7 +3144,7 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv, * CMD_SET_RF_CHANNEL. */ struct mwl8k_cmd_set_rf_channel { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __u8 current_channel; __le32 channel_flags; @@ -3211,7 +3213,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw, #define MWL8K_FRAME_PROT_11N_HT_ALL 0x06 struct mwl8k_cmd_update_set_aid { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 aid; /* AP's MAC address (BSSID) */ @@ -3283,7 +3285,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw, * CMD_SET_RATE. */ struct mwl8k_cmd_set_rate { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __u8 legacy_rates[14]; /* Bitmap for supported MCS codes. */ @@ -3319,7 +3321,7 @@ mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif, #define MWL8K_FJ_BEACON_MAXLEN 128 struct mwl8k_cmd_finalize_join { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 sleep_interval; /* Number of beacon periods to sleep */ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN]; } __packed; @@ -3358,7 +3360,7 @@ static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame, * CMD_SET_RTS_THRESHOLD. */ struct mwl8k_cmd_set_rts_threshold { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 threshold; } __packed; @@ -3388,7 +3390,7 @@ mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh) * CMD_SET_SLOT. */ struct mwl8k_cmd_set_slot { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __u8 short_slot; } __packed; @@ -3417,7 +3419,7 @@ static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time) * CMD_SET_EDCA_PARAMS. */ struct mwl8k_cmd_set_edca_params { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; /* See MWL8K_SET_EDCA_XXX below */ __le16 action; @@ -3502,7 +3504,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum, * CMD_SET_WMM_MODE. */ struct mwl8k_cmd_set_wmm_mode { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; } __packed; @@ -3533,7 +3535,7 @@ static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable) * CMD_MIMO_CONFIG. */ struct mwl8k_cmd_mimo_config { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; __u8 rx_antenna_map; __u8 tx_antenna_map; @@ -3564,7 +3566,7 @@ static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx) * CMD_USE_FIXED_RATE (STA version). */ struct mwl8k_cmd_use_fixed_rate_sta { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; __le32 allow_rate_drop; __le32 num_rates; @@ -3606,7 +3608,7 @@ static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw) * CMD_USE_FIXED_RATE (AP version). */ struct mwl8k_cmd_use_fixed_rate_ap { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; __le32 allow_rate_drop; __le32 num_rates; @@ -3647,7 +3649,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt) * CMD_ENABLE_SNIFFER. */ struct mwl8k_cmd_enable_sniffer { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; } __packed; @@ -3671,7 +3673,7 @@ static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable) } struct mwl8k_cmd_update_mac_addr { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; union { struct { __le16 mac_type; @@ -3756,7 +3758,7 @@ static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw, * CMD_SET_RATEADAPT_MODE. */ struct mwl8k_cmd_set_rate_adapt_mode { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 action; __le16 mode; } __packed; @@ -3785,7 +3787,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode) * CMD_GET_WATCHDOG_BITMAP. */ struct mwl8k_cmd_get_watchdog_bitmap { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; u8 bitmap; } __packed; @@ -3865,7 +3867,7 @@ done: * CMD_BSS_START. */ struct mwl8k_cmd_bss_start { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 enable; } __packed; @@ -3960,7 +3962,7 @@ struct mwl8k_destroy_ba_stream { } __packed; struct mwl8k_cmd_bastream { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; union { struct mwl8k_create_ba_stream create_params; @@ -4070,7 +4072,7 @@ static void mwl8k_destroy_ba(struct ieee80211_hw *hw, * CMD_SET_NEW_STN. */ struct mwl8k_cmd_set_new_stn { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le16 aid; __u8 mac_addr[6]; __le16 stn_id; @@ -4206,7 +4208,7 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, #define MIC_KEY_LENGTH 8 struct mwl8k_cmd_update_encryption { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; __le32 reserved; @@ -4216,7 +4218,7 @@ struct mwl8k_cmd_update_encryption { } __packed; struct mwl8k_cmd_set_key { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; __le32 action; __le32 reserved; @@ -4504,7 +4506,7 @@ struct peer_capability_info { } __packed; struct mwl8k_cmd_update_stadb { - struct mwl8k_cmd_pkt header; + struct mwl8k_cmd_pkt_hdr header; /* See STADB_ACTION_TYPE */ __le32 action; @@ -5174,7 +5176,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { - struct mwl8k_cmd_pkt *cmd; + struct mwl8k_cmd_pkt_hdr *cmd; /* * Synthesize and return a command packet that programs the @@ -5234,7 +5236,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct mwl8k_priv *priv = hw->priv; - struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast; + struct mwl8k_cmd_pkt_hdr *cmd = (void *)(unsigned long)multicast; /* * AP firmware doesn't allow fine-grained control over diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 72a7bd5a8576..f4f88c444e21 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -532,7 +532,7 @@ error: } static int -mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, +mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { @@ -542,6 +542,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct mt76_tx_info tx_info = { .skb = skb, }; + struct mt76_dev *dev = phy->dev; struct ieee80211_hw *hw; int len, n = 0, ret = -ENOMEM; struct mt76_txwi_cache *t; @@ -549,7 +550,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, dma_addr_t addr; u8 *txwi; - if (test_bit(MT76_RESET, &dev->phy.state)) + if (test_bit(MT76_RESET, &phy->state)) goto free_skb; t = mt76_get_txwi(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 068206e48aec..e8ba2e4e8484 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -465,6 +465,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + ieee80211_hw_set(hw, SPECTRUM_MGMT); if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) && hw->max_tx_fragments > 1) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index a91f6ddacbd9..11b9f22ca7f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -256,7 +256,7 @@ struct mt76_queue_ops { int idx, int n_desc, int bufsize, u32 ring_base); - int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, + int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta); @@ -1127,7 +1127,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) -#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) +#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 7a2f5d38562b..14304b063715 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -4,6 +4,13 @@ #include "mac.h" #include "../dma.h" +static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, +}; + static void mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) { @@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) struct ieee80211_sta *sta; struct mt7603_sta *msta; struct mt76_wcid *wcid; + u8 tid = 0, hwq = 0; void *priv; int idx; u32 val; - u8 tid = 0; if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr)) goto free; @@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) goto free; priv = msta = container_of(wcid, struct mt7603_sta, wcid); - val = le32_to_cpu(txd[0]); - val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); - val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT); - txd[0] = cpu_to_le32(val); sta = container_of(priv, struct ieee80211_sta, drv_priv); hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE]; - if (ieee80211_is_data_qos(hdr->frame_control)) + + hwq = wmm_queue_map[IEEE80211_AC_BE]; + if (ieee80211_is_data_qos(hdr->frame_control)) { tid = *ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_TAG1D_MASK; - skb_set_queue_mapping(skb, tid_to_ac[tid]); + IEEE80211_QOS_CTL_TAG1D_MASK; + u8 qid = tid_to_ac[tid]; + hwq = wmm_queue_map[qid]; + skb_set_queue_mapping(skb, qid); + } else if (ieee80211_is_data(hdr->frame_control)) { + skb_set_queue_mapping(skb, IEEE80211_AC_BE); + hwq = wmm_queue_map[IEEE80211_AC_BE]; + } else { + skb_pull(skb, MT_TXD_SIZE); + if (!ieee80211_is_bufferable_mmpdu(skb)) + goto free; + skb_push(skb, MT_TXD_SIZE); + skb_set_queue_mapping(skb, MT_TXQ_PSD); + hwq = MT_TX_HW_QUEUE_MGMT; + } + ieee80211_sta_set_buffered(sta, tid, true); + val = le32_to_cpu(txd[0]); + val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX); + val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq); + txd[0] = cpu_to_le32(val); + spin_lock_bh(&dev->ps_lock); __skb_queue_tail(&msta->psq, skb); if (skb_queue_len(&msta->psq) >= 64) { @@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget) int mt7603_dma_init(struct mt7603_dev *dev) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_BK] = 0, - [IEEE80211_AC_BE] = 1, - [IEEE80211_AC_VI] = 2, - [IEEE80211_AC_VO] = 3, - }; int ret; int i; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index cf21d06257e5..dc8a77f0a1cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev) MT_CLIENT_RESET_TX_R_E_2_S); /* Start PSE client TX abort */ + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, MT_CLIENT_RESET_TX_R_E_1_S, 500); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h index 98d64d3d2993..445d0f0ab779 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h @@ -177,6 +177,11 @@ static inline bool is_mt7925(struct mt76_dev *dev) return mt76_chip(dev) == 0x7925; } +static inline bool is_mt7920(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7920; +} + static inline bool is_mt7922(struct mt76_dev *dev) { return mt76_chip(dev) == 0x7922; @@ -184,7 +189,7 @@ static inline bool is_mt7922(struct mt76_dev *dev) static inline bool is_mt7921(struct mt76_dev *dev) { - return mt76_chip(dev) == 0x7961 || is_mt7922(dev); + return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev); } static inline bool is_mt7663(struct mt76_dev *dev) @@ -259,6 +264,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev) { switch (mt76_chip(dev)) { case 0x7961: + case 0x7920: case 0x7922: case 0x7925: case 0x7663: @@ -445,4 +451,6 @@ void mt76_connac2_tx_token_put(struct mt76_dev *dev); /* connac3 */ void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode); +void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv, + u8 mode); #endif /* __MT76_CONNAC_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c index 73e9f283d0ae..92ad1ecf6c9d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c @@ -6,8 +6,11 @@ #include "dma.h" #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f) +#define EHT_BITS(f) cpu_to_le32(IEEE80211_RADIOTAP_EHT_##f) #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\ IEEE80211_RADIOTAP_HE_##f) +#define EHT_PREP(f, m, v) le32_encode_bits(le32_get_bits(v, MT_CRXV_EHT_##m),\ + IEEE80211_RADIOTAP_EHT_##f) static void mt76_connac3_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, @@ -180,3 +183,85 @@ void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, } } EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_he_radiotap); + +static void * +mt76_connac3_mac_radiotap_push_tlv(struct sk_buff *skb, u16 type, u16 len) +{ + struct ieee80211_radiotap_tlv *tlv; + + tlv = skb_push(skb, sizeof(*tlv) + len); + tlv->type = cpu_to_le16(type); + tlv->len = cpu_to_le16(len); + memset(tlv->data, 0, len); + + return tlv->data; +} + +void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv, + u8 mode) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_radiotap_eht_usig *usig; + struct ieee80211_radiotap_eht *eht; + u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1; + u8 bw = FIELD_GET(MT_PRXV_FRAME_MODE, le32_to_cpu(rxv[2])); + + if (WARN_ONCE(skb_mac_header(skb) != skb->data, + "Should push tlv at the top of mac hdr")) + return; + + eht = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT, + sizeof(*eht) + sizeof(u32)); + usig = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, + sizeof(*usig)); + + status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + + eht->known |= EHT_BITS(KNOWN_SPATIAL_REUSE) | + EHT_BITS(KNOWN_GI) | + EHT_BITS(KNOWN_EHT_LTF) | + EHT_BITS(KNOWN_LDPC_EXTRA_SYM_OM) | + EHT_BITS(KNOWN_PE_DISAMBIGUITY_OM) | + EHT_BITS(KNOWN_NSS_S); + + eht->data[0] |= + EHT_PREP(DATA0_SPATIAL_REUSE, SR_MASK, rxv[13]) | + cpu_to_le32(FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, status->eht.gi) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, ltf_size)) | + EHT_PREP(DATA0_PE_DISAMBIGUITY_OM, PE_DISAMBIG, rxv[5]) | + EHT_PREP(DATA0_LDPC_EXTRA_SYM_OM, LDPC_EXT_SYM, rxv[4]); + + eht->data[7] |= le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + + eht->user_info[0] |= + EHT_BITS(USER_INFO_MCS_KNOWN) | + EHT_BITS(USER_INFO_CODING_KNOWN) | + EHT_BITS(USER_INFO_NSS_KNOWN_O) | + EHT_BITS(USER_INFO_BEAMFORMING_KNOWN_O) | + EHT_BITS(USER_INFO_DATA_FOR_USER) | + le32_encode_bits(status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); + + if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF) + eht->user_info[0] |= EHT_BITS(USER_INFO_BEAMFORMING_O); + + if (le32_to_cpu(rxv[0]) & MT_PRXV_HT_AD_CODE) + eht->user_info[0] |= EHT_BITS(USER_INFO_CODING); + + if (mode == MT_PHY_TYPE_EHT_MU) + eht->user_info[0] |= EHT_BITS(USER_INFO_STA_ID_KNOWN) | + EHT_PREP(USER_INFO_STA_ID, MU_AID, rxv[8]); + + usig->common |= + EHT_BITS(USIG_COMMON_PHY_VER_KNOWN) | + EHT_BITS(USIG_COMMON_BW_KNOWN) | + EHT_BITS(USIG_COMMON_UL_DL_KNOWN) | + EHT_BITS(USIG_COMMON_BSS_COLOR_KNOWN) | + EHT_BITS(USIG_COMMON_TXOP_KNOWN) | + le32_encode_bits(0, IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) | + le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) | + EHT_PREP(USIG_COMMON_UL_DL, UPLINK, rxv[5]) | + EHT_PREP(USIG_COMMON_BSS_COLOR, BSS_COLOR, rxv[9]) | + EHT_PREP(USIG_COMMON_TXOP, TXOP_DUR, rxv[9]); +} +EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_eht_radiotap); diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index 83dcd964bfd0..353e66069840 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -142,6 +142,28 @@ enum { #define MT_CRXV_HE_RU3_L GENMASK(31, 27) #define MT_CRXV_HE_RU3_H GENMASK(3, 0) +#define MT_CRXV_EHT_NUM_USER GENMASK(26, 20) +#define MT_CRXV_EHT_LTF_SIZE GENMASK(28, 27) +#define MT_CRXV_EHT_LDPC_EXT_SYM BIT(30) +#define MT_CRXV_EHT_PE_DISAMBIG BIT(1) +#define MT_CRXV_EHT_UPLINK BIT(2) +#define MT_CRXV_EHT_MU_AID GENMASK(27, 17) +#define MT_CRXV_EHT_BEAM_CHNG BIT(29) +#define MT_CRXV_EHT_DOPPLER BIT(0) +#define MT_CRXV_EHT_BSS_COLOR GENMASK(15, 10) +#define MT_CRXV_EHT_TXOP_DUR GENMASK(23, 17) +#define MT_CRXV_EHT_SR_MASK GENMASK(11, 8) +#define MT_CRXV_EHT_SR1_MASK GENMASK(15, 12) +#define MT_CRXV_EHT_SR2_MASK GENMASK(19, 16) +#define MT_CRXV_EHT_SR3_MASK GENMASK(23, 20) +#define MT_CRXV_EHT_RU0 GENMASK(8, 0) +#define MT_CRXV_EHT_RU1 GENMASK(17, 9) +#define MT_CRXV_EHT_RU2 GENMASK(26, 18) +#define MT_CRXV_EHT_RU3_L GENMASK(31, 27) +#define MT_CRXV_EHT_RU3_H GENMASK(3, 0) +#define MT_CRXV_EHT_SIG_MCS GENMASK(19, 18) +#define MT_CRXV_EHT_LTF_SYM GENMASK(22, 20) + enum tx_header_format { MT_HDR_FORMAT_802_3, MT_HDR_FORMAT_CMD, diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index af0c2b2aacb0..162c57fb7954 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -257,7 +257,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len, }; u16 ntlv; - ptlv = skb_put(skb, len); + ptlv = skb_put_zero(skb, len); memcpy(ptlv, &tlv, sizeof(tlv)); ntlv = le16_to_cpu(ntlv_hdr->tlv_num); @@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif, }; struct sk_buff *skb; - if (is_mt799x(dev) && !wcid->sta) + if (wcid && !wcid->sta) hdr.muar_idx = 0xe; mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, @@ -392,7 +392,14 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, if (!sta) { basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); - eth_broadcast_addr(basic->peer_addr); + + if (vif->type == NL80211_IFTYPE_STATION && + !is_zero_ether_addr(vif->bss_conf.bssid)) { + memcpy(basic->peer_addr, vif->bss_conf.bssid, ETH_ALEN); + basic->aid = cpu_to_le16(vif->cfg.aid); + } else { + eth_broadcast_addr(basic->peer_addr); + } return; } @@ -1670,7 +1677,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, set_bit(MT76_HW_SCANNING, &phy->state); mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; - req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req)); + req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req)); req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; req->bss_idx = mvif->idx; @@ -1798,7 +1805,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy, mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f; - req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req)); + req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req)); req->version = 1; req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7; @@ -2321,7 +2328,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, return -ENOMEM; skb_put_data(skb, &hdr, sizeof(hdr)); - gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb, + gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb, sizeof(*gtk_tlv)); gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY); gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv)); @@ -2446,7 +2453,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev, return -ENOMEM; skb_put_data(skb, &hdr, sizeof(hdr)); - ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv)); + ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv)); ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN); ptlv->len = cpu_to_le16(sizeof(*ptlv)); ptlv->data_len = pattern->pattern_len; @@ -2527,6 +2534,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend) __le16 tag; __le16 len; u8 suspend; + u8 pad[7]; } __packed hif_suspend; } req = { .hif_suspend = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 657a4d1f856b..6873ce14d299 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -610,6 +610,12 @@ struct sta_rec_ra_fixed { u8 mmps_mode; } __packed; +struct sta_rec_tx_proc { + __le16 tag; + __le16 len; + __le32 flag; +} __packed; + /* wtbl_rec */ struct wtbl_req_hdr { @@ -777,6 +783,7 @@ struct wtbl_raw { sizeof(struct sta_rec_ra_fixed) + \ sizeof(struct sta_rec_he_6g_capa) + \ sizeof(struct sta_rec_pn_info) + \ + sizeof(struct sta_rec_tx_proc) + \ sizeof(struct tlv) + \ MT76_CONNAC_WTBL_UPDATE_MAX_SIZE) @@ -1004,6 +1011,7 @@ enum { MCU_EVENT_CH_PRIVILEGE = 0x18, MCU_EVENT_SCHED_SCAN_DONE = 0x23, MCU_EVENT_DBG_MSG = 0x27, + MCU_EVENT_RSSI_NOTIFY = 0x96, MCU_EVENT_TXPWR = 0xd0, MCU_EVENT_EXT = 0xed, MCU_EVENT_RESTART_DL = 0xef, @@ -1182,6 +1190,7 @@ enum { MCU_EXT_CMD_EFUSE_ACCESS = 0x01, MCU_EXT_CMD_RF_REG_ACCESS = 0x02, MCU_EXT_CMD_RF_TEST = 0x04, + MCU_EXT_CMD_ID_RADIO_ON_OFF_CTRL = 0x05, MCU_EXT_CMD_PM_STATE_CTRL = 0x07, MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, @@ -1213,6 +1222,7 @@ enum { MCU_EXT_CMD_TXDPD_CAL = 0x60, MCU_EXT_CMD_CAL_CACHE = 0x67, MCU_EXT_CMD_RED_ENABLE = 0x68, + MCU_EXT_CMD_CP_SUPPORT = 0x75, MCU_EXT_CMD_SET_RADAR_TH = 0x7c, MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d, MCU_EXT_CMD_MWDS_SUPPORT = 0x80, @@ -1303,6 +1313,7 @@ enum { MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61, MCU_CE_CMD_SCHED_SCAN_REQ = 0x62, MCU_CE_CMD_GET_NIC_CAPAB = 0x8a, + MCU_CE_CMD_RSSI_MONITOR = 0xa1, MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0, MCU_CE_CMD_REG_WRITE = 0xc0, MCU_CE_CMD_REG_READ = 0xc0, @@ -1448,6 +1459,10 @@ struct mt76_connac_beacon_loss_event { u8 pad[2]; } __packed; +struct mt76_connac_rssi_notify_event { + __le32 rssi[4]; +} __packed; + struct mt76_connac_mcu_bss_event { u8 bss_idx; u8 is_absent; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 6c3696c8c700..578013884e43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -523,7 +523,8 @@ mt7915_fw_debug_wm_set(void *data, u64 val) /* WM CPU info record control */ mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0)); - mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm); + mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | + (dev->fw.debug_wm ? 0 : BIT(0))); mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5)); mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5)); @@ -1049,6 +1050,7 @@ static ssize_t mt7915_rate_txpower_set(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { + int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0; struct mt7915_phy *phy = file->private_data; struct mt7915_dev *dev = phy->dev; struct mt76_phy *mphy = phy->mt76; @@ -1057,7 +1059,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf, .band_idx = phy->mt76->band_idx, }; char buf[100]; - int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0; enum mac80211_rx_encoding mode; u32 offs = 0, len = 0; @@ -1130,8 +1131,8 @@ skip: if (ret) goto out; - mphy->txpower_cur = max(mphy->txpower_cur, - max(pwr160, max(pwr80, max(pwr40, pwr20)))); + pwr = max3(pwr80, pwr40, pwr20); + mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr); out: mutex_unlock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 3bb2643d1b26..bfdbc15abaa9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -9,28 +9,34 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev) { struct mt76_dev *mdev = &dev->mt76; u8 *eeprom = mdev->eeprom.data; - u32 val = eeprom[MT_EE_DO_PRE_CAL]; - u32 offs; + u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; + u32 size, val = eeprom[offs]; int ret; - if (!dev->flash_mode) + if (!dev->flash_mode || !val) return 0; - if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP)) - return 0; + size = mt7915_get_cal_group_size(dev) + mt7915_get_cal_dpd_size(dev); - val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE; - dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL); + dev->cal = devm_kzalloc(mdev->dev, size, GFP_KERNEL); if (!dev->cal) return -ENOMEM; offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2; - ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, val); + ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, size); if (!ret) return ret; - return mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", val); + ret = mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", size); + if (!ret) + return ret; + + dev_warn(mdev->dev, "missing precal data, size=%d\n", size); + devm_kfree(mdev->dev, dev->cal); + dev->cal = NULL; + + return ret; } static int mt7915_check_eeprom(struct mt7915_dev *dev) @@ -256,10 +262,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev) return ret; } - ret = mt7915_eeprom_load_precal(dev); - if (ret) - return ret; - + mt7915_eeprom_load_precal(dev); mt7915_eeprom_parse_hw_cap(dev, &dev->phy); memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index adc26a222823..509fb43d8a68 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -19,6 +19,7 @@ enum mt7915_eeprom_field { MT_EE_DDIE_FT_VERSION = 0x050, MT_EE_DO_PRE_CAL = 0x062, MT_EE_WIFI_CONF = 0x190, + MT_EE_DO_PRE_CAL_V2 = 0x19a, MT_EE_RATE_DELTA_2G = 0x252, MT_EE_RATE_DELTA_5G = 0x29d, MT_EE_TX0_POWER_2G = 0x2fc, @@ -39,10 +40,19 @@ enum mt7915_eeprom_field { }; #define MT_EE_WIFI_CAL_GROUP BIT(0) -#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1) +#define MT_EE_WIFI_CAL_DPD_2G BIT(2) +#define MT_EE_WIFI_CAL_DPD_5G BIT(1) +#define MT_EE_WIFI_CAL_DPD_6G BIT(3) +#define MT_EE_WIFI_CAL_DPD GENMASK(3, 1) #define MT_EE_CAL_UNIT 1024 -#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16) -#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT) +#define MT_EE_CAL_GROUP_SIZE_7915 (49 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7916 (54 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16) +#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT) +#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT) +#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */ #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) @@ -156,6 +166,37 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band) return val & MT_EE_WIFI_CONF7_TSSI0_5G; } +static inline u32 +mt7915_get_cal_group_size(struct mt7915_dev *dev) +{ + u8 *eep = dev->mt76.eeprom.data; + u32 val; + + if (is_mt7915(&dev->mt76)) { + return MT_EE_CAL_GROUP_SIZE_7915; + } else if (is_mt7916(&dev->mt76)) { + val = eep[MT_EE_WIFI_CONF + 1]; + val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val); + return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G : + MT_EE_CAL_GROUP_SIZE_7916; + } else if (mt7915_check_adie(dev, false)) { + return MT_EE_CAL_GROUP_SIZE_7976; + } else { + return MT_EE_CAL_GROUP_SIZE_7975; + } +} + +static inline u32 +mt7915_get_cal_dpd_size(struct mt7915_dev *dev) +{ + if (is_mt7915(&dev->mt76)) + return MT_EE_CAL_DPD_SIZE_V1; + else if (is_mt7981(&dev->mt76)) + return MT_EE_CAL_DPD_SIZE_V2_7981; + else + return MT_EE_CAL_DPD_SIZE_V2; +} + extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM]; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index cea2f6d9050a..a978f434dc5e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -823,7 +823,7 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2) if (ret < 0) return ret; - if (dev->flash_mode) { + if (dev->cal) { ret = mt7915_mcu_apply_group_cal(dev); if (ret) return ret; @@ -934,11 +934,10 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy, /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */ elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3; - if (vif != NL80211_IFTYPE_AP) + if (vif != NL80211_IFTYPE_AP && vif != NL80211_IFTYPE_STATION) return; elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER; - elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, sts - 1); @@ -947,6 +946,11 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy, sts_160 - 1); elem->phy_cap_info[5] |= c; + if (vif != NL80211_IFTYPE_AP) + return; + + elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; + c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB; elem->phy_cap_info[6] |= c; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index e45361111f9b..8008ce3fa6c7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -140,8 +140,15 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev) msta->airtime_ac[i] = mt76_rr(dev, addr); msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + if (msta->airtime_ac[i] <= tx_last) + tx_time[i] = 0; + else + tx_time[i] = msta->airtime_ac[i] - tx_last; + + if (msta->airtime_ac[i + 4] <= rx_last) + rx_time[i] = 0; + else + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -1338,10 +1345,8 @@ mt7915_mac_restart(struct mt7915_dev *dev) set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); - if (ext_phy) { + if (ext_phy) set_bit(MT76_RESET, &ext_phy->state); - set_bit(MT76_MCU_RESET, &ext_phy->state); - } /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 3709d18da0e6..2624edbb59a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -329,7 +329,7 @@ int mt7915_set_channel(struct mt7915_phy *phy) mt76_set_channel(phy->mt76); - if (dev->flash_mode) { + if (dev->cal) { ret = mt7915_mcu_apply_tx_dpd(phy); if (ret) goto out; @@ -744,6 +744,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; bool ext_phy = mvif->phy != &dev->phy; int ret, idx; + u32 addr; idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA); if (idx < 0) @@ -767,6 +768,9 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (ret) return ret; + addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30); + mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0); + return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false); } @@ -1657,6 +1661,10 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw, #endif const struct ieee80211_ops mt7915_ops = { + .add_chanctx = ieee80211_emulate_add_chanctx, + .remove_chanctx = ieee80211_emulate_remove_chanctx, + .change_chanctx = ieee80211_emulate_change_chanctx, + .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = mt7915_tx, .start = mt7915_start, .stop = mt7915_stop, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index d90f98c50039..9599adf104b1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -331,7 +331,7 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; - ieee80211_color_change_finish(vif); + ieee80211_color_change_finish(vif, 0); } static void @@ -424,7 +424,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len, .len = cpu_to_le16(sub_len), }; - ptlv = skb_put(skb, sub_len); + ptlv = skb_put_zero(skb, sub_len); memcpy(ptlv, &tlv, sizeof(tlv)); le16_add_cpu(sub_ntlv, 1); @@ -1193,7 +1193,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; - const u8 matrix[4][4] = { + static const u8 matrix[4][4] = { {0, 0, 0, 0}, {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ @@ -1919,8 +1919,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, bcn = (struct bss_info_bcn *)tlv; bcn->enable = true; - if (changed & BSS_CHANGED_FILS_DISCOVERY && - vif->bss_conf.fils_discovery.max_interval) { + if (changed & BSS_CHANGED_FILS_DISCOVERY) { interval = vif->bss_conf.fils_discovery.max_interval; skb = ieee80211_get_fils_discovery_tmpl(hw, vif); } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP && @@ -1957,7 +1956,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); discov->tx_interval = interval; discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); - discov->enable = true; + discov->enable = !!interval; buf = (u8 *)sub_tlv + sizeof(*discov); @@ -2904,9 +2903,10 @@ static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev) { u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data; - u32 total = MT_EE_CAL_GROUP_SIZE; + u32 total = mt7915_get_cal_group_size(dev); + u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; - if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP)) + if (!(eep[offs] & MT_EE_WIFI_CAL_GROUP)) return 0; /* @@ -2942,9 +2942,9 @@ static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) return -1; } -static int mt7915_dpd_freq_idx(u16 freq, u8 bw) +static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw) { - static const u16 freq_list[] = { + static const u16 freq_list_v1[] = { 5180, 5200, 5220, 5240, 5260, 5280, 5300, 5320, 5500, 5520, 5540, 5560, @@ -2952,65 +2952,135 @@ static int mt7915_dpd_freq_idx(u16 freq, u8 bw) 5660, 5680, 5700, 5745, 5765, 5785, 5805, 5825 }; - int offset_2g = ARRAY_SIZE(freq_list); + static const u16 freq_list_v2[] = { + /* 6G BW20*/ + 5955, 5975, 5995, 6015, + 6035, 6055, 6075, 6095, + 6115, 6135, 6155, 6175, + 6195, 6215, 6235, 6255, + 6275, 6295, 6315, 6335, + 6355, 6375, 6395, 6415, + 6435, 6455, 6475, 6495, + 6515, 6535, 6555, 6575, + 6595, 6615, 6635, 6655, + 6675, 6695, 6715, 6735, + 6755, 6775, 6795, 6815, + 6835, 6855, 6875, 6895, + 6915, 6935, 6955, 6975, + 6995, 7015, 7035, 7055, + 7075, 7095, 7115, + /* 6G BW160 */ + 6025, 6185, 6345, 6505, + 6665, 6825, 6985, + /* 5G BW20 */ + 5180, 5200, 5220, 5240, + 5260, 5280, 5300, 5320, + 5500, 5520, 5540, 5560, + 5580, 5600, 5620, 5640, + 5660, 5680, 5700, 5720, + 5745, 5765, 5785, 5805, + 5825, 5845, 5865, 5885, + /* 5G BW160 */ + 5250, 5570, 5815 + }; + static const u16 freq_list_v2_7981[] = { + /* 5G BW20 */ + 5180, 5200, 5220, 5240, + 5260, 5280, 5300, 5320, + 5500, 5520, 5540, 5560, + 5580, 5600, 5620, 5640, + 5660, 5680, 5700, 5720, + 5745, 5765, 5785, 5805, + 5825, 5845, 5865, 5885, + /* 5G BW160 */ + 5250, 5570, 5815 + }; + const u16 *freq_list = freq_list_v1; + int n_freqs = ARRAY_SIZE(freq_list_v1); int idx; + if (!is_mt7915(&dev->mt76)) { + if (is_mt7981(&dev->mt76)) { + freq_list = freq_list_v2_7981; + n_freqs = ARRAY_SIZE(freq_list_v2_7981); + } else { + freq_list = freq_list_v2; + n_freqs = ARRAY_SIZE(freq_list_v2); + } + } + if (freq < 4000) { if (freq < 2432) - return offset_2g; + return n_freqs; if (freq < 2457) - return offset_2g + 1; + return n_freqs + 1; - return offset_2g + 2; + return n_freqs + 2; } - if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160) + if (bw == NL80211_CHAN_WIDTH_80P80) return -1; if (bw != NL80211_CHAN_WIDTH_20) { - idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), - freq + 10); + idx = mt7915_find_freq_idx(freq_list, n_freqs, freq + 10); if (idx >= 0) return idx; - idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), - freq - 10); + idx = mt7915_find_freq_idx(freq_list, n_freqs, freq - 10); if (idx >= 0) return idx; } - return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); + return mt7915_find_freq_idx(freq_list, n_freqs, freq); } int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - u16 total = 2, center_freq = chandef->center_freq1; + enum nl80211_band band = chandef->chan->band; + u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; + u16 center_freq = chandef->center_freq1; u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data; + u8 dpd_mask, cal_num = is_mt7915(&dev->mt76) ? 2 : 3; int idx; - if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD)) + switch (band) { + case NL80211_BAND_2GHZ: + dpd_mask = MT_EE_WIFI_CAL_DPD_2G; + break; + case NL80211_BAND_5GHZ: + dpd_mask = MT_EE_WIFI_CAL_DPD_5G; + break; + case NL80211_BAND_6GHZ: + dpd_mask = MT_EE_WIFI_CAL_DPD_6G; + break; + default: + dpd_mask = 0; + break; + } + + if (!(eep[offs] & dpd_mask)) return 0; - idx = mt7915_dpd_freq_idx(center_freq, chandef->width); + idx = mt7915_dpd_freq_idx(dev, center_freq, chandef->width); if (idx < 0) return -EINVAL; /* Items: Tx DPD, Tx Flatness */ - idx = idx * 2; - cal += MT_EE_CAL_GROUP_SIZE; + idx = idx * cal_num; + cal += mt7915_get_cal_group_size(dev) + (idx * MT_EE_CAL_UNIT); - while (total--) { + while (cal_num--) { int ret; - cal += (idx * MT_EE_CAL_UNIT); ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT, MCU_EXT_CMD(DPD_PRE_CAL_INFO)); if (ret) return ret; idx++; + cal += MT_EE_CAL_UNIT; } return 0; @@ -3801,30 +3871,38 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) { struct { __le32 cmd; - __le32 num; - __le32 __rsv; - __le16 wlan_idx; - } req = { + __le32 arg0; + __le32 arg1; + __le16 arg2; + } __packed req = { .cmd = cpu_to_le32(0x15), - .num = cpu_to_le32(1), - .wlan_idx = cpu_to_le16(wlan_idx), }; struct mt7915_mcu_wa_tx_stat { - __le16 wlan_idx; - u8 __rsv[2]; + __le16 wcid; + u8 __rsv2[2]; /* tx_bytes is deprecated since WA byte counter uses u32, * which easily leads to overflow. */ __le32 tx_bytes; __le32 tx_packets; - } *res; + } __packed *res; struct mt76_wcid *wcid; struct sk_buff *skb; - int ret; + int ret, len; + u16 ret_wcid; + + if (is_mt7915(&dev->mt76)) { + req.arg0 = cpu_to_le32(wlan_idx); + len = sizeof(req) - sizeof(req.arg2); + } else { + req.arg0 = cpu_to_le32(1); + req.arg2 = cpu_to_le16(wlan_idx); + len = sizeof(req); + } ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY), - &req, sizeof(req), true, &skb); + &req, len, true, &skb); if (ret) return ret; @@ -3833,7 +3911,11 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) res = (struct mt7915_mcu_wa_tx_stat *)skb->data; - if (le16_to_cpu(res->wlan_idx) != wlan_idx) { + ret_wcid = le16_to_cpu(res->wcid); + if (is_mt7915(&dev->mt76)) + ret_wcid &= 0xff; + + if (ret_wcid != wlan_idx) { ret = -EINVAL; goto out; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 6e79bc65f5a5..a30d08eb0656 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -302,6 +302,10 @@ struct mt7915_dev { struct rchan *relay_fwlog; void *cal; + u32 cur_prek_offset; + u8 dpd_chan_num_2g; + u8 dpd_chan_num_5g; + u8 dpd_chan_num_6g; struct { u8 debug_wm; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index f5b99917c08e..90a6f61d1089 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -7,7 +7,6 @@ #include <linux/pinctrl/consumer.h> #include <linux/of.h> #include <linux/of_reserved_mem.h> -#include <linux/of_gpio.h> #include <linux/iopoll.h> #include <linux/reset.h> #include <linux/of_net.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 867e14f6b93a..73e42ef42983 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work) int i, ret; dev_dbg(dev->mt76.dev, "chip reset\n"); + set_bit(MT76_RESET, &dev->mphy.state); dev->hw_full_reset = true; ieee80211_stop_queues(hw); @@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work) } dev->hw_full_reset = false; + clear_bit(MT76_RESET, &dev->mphy.state); pm->suspended = false; ieee80211_wake_queues(hw); ieee80211_iterate_active_interfaces(hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index ca36de34171b..3e3ad3518d85 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -242,6 +242,15 @@ int __mt7921_start(struct mt792x_phy *phy) ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, MT792x_WATCHDOG_TIME); + if (mt76_is_mmio(mphy->dev)) { + err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_LED_CTRL_ENABLE); + if (err) + return err; + + err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_ON_LED); + if (err) + return err; + } return 0; } @@ -259,6 +268,22 @@ static int mt7921_start(struct ieee80211_hw *hw) return err; } +static void mt7921_stop(struct ieee80211_hw *hw) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err = 0; + + if (mt76_is_mmio(&dev->mt76)) { + mt792x_mutex_acquire(dev); + err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED); + mt792x_mutex_release(dev); + if (err) + return; + } + + mt792x_stop(hw); +} + static int mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -310,6 +335,8 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN) + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI; out: mt792x_mutex_release(dev); @@ -679,6 +706,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) mt7921_mcu_uni_bss_ps(dev, vif); + if (changed & BSS_CHANGED_CQM) + mt7921_mcu_set_rssimonitor(dev, vif); + if (changed & BSS_CHANGED_ASSOC) { mt7921_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_ASSOC); @@ -1372,7 +1402,7 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw, const struct ieee80211_ops mt7921_ops = { .tx = mt792x_tx, .start = mt7921_start, - .stop = mt792x_stop, + .stop = mt7921_stop, .add_interface = mt7921_add_interface, .remove_interface = mt792x_remove_interface, .config = mt7921_config, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 8b4ce32a2cd1..bdd8b5f19b24 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -254,6 +254,42 @@ mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb) } static void +mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt76_connac_rssi_notify_event *event = priv; + enum nl80211_cqm_rssi_threshold_event nl_event; + s32 rssi = le32_to_cpu(event->rssi[mvif->mt76.idx]); + + if (!rssi) + return; + + if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) + return; + + if (rssi > vif->bss_conf.cqm_rssi_thold) + nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; + else + nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; + + ieee80211_cqm_rssi_notify(vif, nl_event, rssi, GFP_KERNEL); +} + +static void +mt7921_mcu_rssi_monitor_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct mt76_connac_rssi_notify_event *event; + + skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); + event = (struct mt76_connac_rssi_notify_event *)skb->data; + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7921_mcu_rssi_monitor_iter, event); +} + +static void mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb) { struct mt76_connac2_mcu_rxd *rxd; @@ -281,6 +317,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb) case MCU_EVENT_TX_DONE: mt7921_mcu_tx_done_event(dev, skb); break; + case MCU_EVENT_RSSI_NOTIFY: + mt7921_mcu_rssi_monitor_event(dev, skb); + break; default: break; } @@ -327,6 +366,7 @@ void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || + rxd->eid == MCU_EVENT_RSSI_NOTIFY || rxd->eid == MCU_EVENT_SCAN_DONE || rxd->eid == MCU_EVENT_TX_DONE || rxd->eid == MCU_EVENT_DBG_MSG || @@ -606,6 +646,20 @@ int mt7921_run_firmware(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt7921_run_firmware); +int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value) +{ + struct { + u8 ctrlid; + u8 rsv[3]; + } __packed req = { + .ctrlid = value, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ID_RADIO_ON_OFF_CTRL), + &req, sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt7921_mcu_radio_led_ctrl); + int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; @@ -1100,12 +1154,12 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, { struct cfg80211_chan_def *chandef = &ctx->def; int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; - const u8 ch_band[] = { + static const u8 ch_band[] = { [NL80211_BAND_2GHZ] = 1, [NL80211_BAND_5GHZ] = 2, [NL80211_BAND_6GHZ] = 3, }; - const u8 ch_width[] = { + static const u8 ch_width[] = { [NL80211_CHAN_WIDTH_20_NOHT] = 0, [NL80211_CHAN_WIDTH_20] = 0, [NL80211_CHAN_WIDTH_40] = 0, @@ -1391,3 +1445,24 @@ int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER), &data, sizeof(data), false); } + +int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + u8 enable; + s8 cqm_rssi_high; + s8 cqm_rssi_low; + u8 bss_idx; + u16 duration; + u8 rsv2[2]; + } __packed data = { + .enable = vif->cfg.assoc, + .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst, + .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst, + .bss_idx = mvif->mt76.idx, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR), + &data, sizeof(data), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h index 3016636d18c6..6c5392c5d207 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h @@ -27,6 +27,10 @@ #define MCU_UNI_EVENT_ROC 0x27 #define MCU_UNI_EVENT_CLC 0x80 +#define EXT_CMD_RADIO_LED_CTRL_ENABLE 0x1 +#define EXT_CMD_RADIO_ON_LED 0x2 +#define EXT_CMD_RADIO_OFF_LED 0x3 + enum { UNI_ROC_ACQUIRE, UNI_ROC_ABORT, @@ -196,6 +200,7 @@ int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl); void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb); int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map); +int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value); static inline u32 mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr) @@ -323,4 +328,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, u8 token_id); void mt7921_roc_abort_sync(struct mt792x_dev *dev); +int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index cda853e86676..f768e9389ac6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -24,6 +24,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = { .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM }, { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616), .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM }, + { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7920), + .driver_data = (kernel_ulong_t)MT7920_FIRMWARE_WM }, { }, }; @@ -269,9 +271,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev, struct mt76_bus_ops *bus_ops; struct mt792x_dev *dev; struct mt76_dev *mdev; + u16 cmd, chipid; u8 features; int ret; - u16 cmd; ret = pcim_enable_device(pdev); if (ret) @@ -345,7 +347,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev, if (ret) goto err_free_dev; - mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) | + chipid = mt7921_l1_rr(dev, MT_HW_CHIPID); + if (chipid == 0x7961 && (mt7921_l1_rr(dev, MT_HW_BOUND) & BIT(7))) + chipid = 0x7920; + mdev->rev = (chipid << 16) | (mt7921_l1_rr(dev, MT_HW_REV) & 0xff); dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); @@ -422,6 +427,10 @@ static int mt7921_pci_suspend(struct device *device) wait_event_timeout(dev->wait, !dev->regd_in_progress, 5 * HZ); + err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED); + if (err < 0) + goto restore_suspend; + err = mt76_connac_mcu_set_hif_suspend(mdev, true); if (err) goto restore_suspend; @@ -520,9 +529,11 @@ static int mt7921_pci_resume(struct device *device) mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); err = mt76_connac_mcu_set_hif_suspend(mdev, false); + if (err < 0) + goto failed; mt7921_regd_update(dev); - + err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_ON_LED); failed: pm->suspended = false; @@ -551,6 +562,8 @@ static struct pci_driver mt7921_pci_driver = { module_pci_driver(mt7921_pci_driver); MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table); +MODULE_FIRMWARE(MT7920_FIRMWARE_WM); +MODULE_FIRMWARE(MT7920_ROM_PATCH); MODULE_FIRMWARE(MT7921_FIRMWARE_WM); MODULE_FIRMWARE(MT7921_ROM_PATCH); MODULE_FIRMWARE(MT7922_FIRMWARE_WM); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index c866144ff061..031ba9aaa4e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev) mt76_wr(dev, dev->irq_map->host_irq_enable, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); skb_queue_purge(&dev->mt76.mcu.res_q); @@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev) err = __mt7921_start(&dev->phy); out: - clear_bit(MT76_RESET, &dev->mphy.state); local_bh_disable(); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 389eb0903807..1f77cf71ca70 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev) mt76_connac_free_pending_tx_skbs(&dev->pm, NULL); mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&dev->mt76.tx_worker); - set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); skb_queue_purge(&dev->mt76.mcu.res_q); @@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev) err = __mt7921_start(&dev->phy); out: - clear_bit(MT76_RESET, &dev->mphy.state); mt76_worker_enable(&dev->mt76.tx_worker); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index 1b9fbd9a140d..c2460ef4993d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -590,14 +590,25 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) seq_ctrl = le16_to_cpu(hdr->seq_ctrl); qos_ctl = *ieee80211_get_qos_ctl(hdr); } + skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); } else { status->flag |= RX_FLAG_8023; } mt792x_mac_assoc_rssi(dev, skb); - if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023)) - mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); + if (rxv && !(status->flag & RX_FLAG_8023)) { + switch (status->encoding) { + case RX_ENC_EHT: + mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); + break; + case RX_ENC_HE: + mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); + break; + default: + break; + } + } if (!status->wcid || !ieee80211_is_data_qos(fc)) return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index bd37cb8d734b..652a9accc43c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -1768,12 +1768,12 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &mphy->chandef; int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; - const u8 ch_band[] = { + static const u8 ch_band[] = { [NL80211_BAND_2GHZ] = 1, [NL80211_BAND_5GHZ] = 2, [NL80211_BAND_6GHZ] = 3, }; - const u8 ch_width[] = { + static const u8 ch_width[] = { [NL80211_CHAN_WIDTH_20_NOHT] = 0, [NL80211_CHAN_WIDTH_20] = 0, [NL80211_CHAN_WIDTH_40] = 0, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 2a0bbfe7bfa5..b8315a89f4a9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -535,7 +535,7 @@ struct mt7925_wow_pattern_tlv { u8 offset; u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; - u8 rsv[4]; + u8 rsv[7]; } __packed; static inline enum connac3_mcu_cipher_type diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index a8556de3d480..20578497a405 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -26,6 +26,7 @@ #define MT792x_FW_CAP_CNM BIT(7) #define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) +#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1) /* NOTE: used to map mt76_rates. idx may change if firmware expands table */ #define MT792x_BASIC_RATES_TBL 11 @@ -36,10 +37,12 @@ #define MT792x_MCU_INIT_RETRY_COUNT 10 #define MT792x_WFSYS_INIT_RETRY_COUNT 2 +#define MT7920_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1a.bin" #define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin" #define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin" #define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin" +#define MT7920_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1a_2_hdr.bin" #define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin" #define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin" #define MT7925_ROM_PATCH "mediatek/mt7925/WIFI_MT7925_PATCH_MCU_1_1_hdr.bin" @@ -326,6 +329,8 @@ int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev); static inline char *mt792x_ram_name(struct mt792x_dev *dev) { switch (mt76_chip(&dev->mt76)) { + case 0x7920: + return MT7920_FIRMWARE_WM; case 0x7922: return MT7922_FIRMWARE_WM; case 0x7925: @@ -338,6 +343,8 @@ static inline char *mt792x_ram_name(struct mt792x_dev *dev) static inline char *mt792x_patch_name(struct mt792x_dev *dev) { switch (mt76_chip(&dev->mt76)) { + case 0x7920: + return MT7920_ROM_PATCH; case 0x7922: return MT7922_ROM_PATCH; case 0x7925: diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c index 9bd953586b04..62c03d088925 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c @@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val) if (val > MT_RX_SEL2) return -EINVAL; + if (val == MT_RX_SEL2 && !dev->rdd2_phy) { + dev_err(dev->mt76.dev, "Background radar is not enabled\n"); + return -EINVAL; + } + return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, val, 0, 0); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 0384fb059ddf..bc7111a71f98 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -1655,14 +1655,10 @@ mt7996_mac_restart(struct mt7996_dev *dev) set_bit(MT76_RESET, &dev->mphy.state); set_bit(MT76_MCU_RESET, &dev->mphy.state); wake_up(&dev->mt76.mcu.wait); - if (phy2) { + if (phy2) set_bit(MT76_RESET, &phy2->mt76->state); - set_bit(MT76_MCU_RESET, &phy2->mt76->state); - } - if (phy3) { + if (phy3) set_bit(MT76_RESET, &phy3->mt76->state); - set_bit(MT76_MCU_RESET, &phy3->mt76->state); - } /* lock/unlock all queues to ensure that no tx is pending */ mt76_txq_schedule_all(&dev->mphy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index f7da8d6dd903..7c97140d8255 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -35,6 +35,14 @@ int mt7996_run(struct ieee80211_hw *hw) ret = mt7996_mcu_set_hdr_trans(dev, true); if (ret) goto out; + + if (is_mt7992(&dev->mt76)) { + u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI); + + ret = mt7996_mcu_cp_support(dev, queue); + if (ret) + goto out; + } } mt7996_mac_enable_nf(dev, phy->mt76->band_idx); @@ -238,7 +246,11 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mt7996_init_bitrate_mask(vif); mt7996_mcu_add_bss_info(phy, vif, true); - mt7996_mcu_add_sta(dev, vif, NULL, true); + /* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA + * interface, since firmware only records BSSID when the entry is new + */ + if (vif->type != NL80211_IFTYPE_STATION) + mt7996_mcu_add_sta(dev, vif, NULL, true, true); rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); out: @@ -256,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw, struct mt7996_phy *phy = mt7996_hw_phy(hw); int idx = msta->wcid.idx; - mt7996_mcu_add_sta(dev, vif, NULL, false); + mt7996_mcu_add_sta(dev, vif, NULL, false, false); mt7996_mcu_add_bss_info(phy, vif, false); if (vif == phy->monitor_vif) @@ -340,10 +352,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { - case WLAN_CIPHER_SUITE_AES_CMAC: - wcid_keyidx = &wcid->hw_key_idx2; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; - break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@ -351,6 +359,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_SMS4: break; + case WLAN_CIPHER_SUITE_AES_CMAC: + wcid_keyidx = &wcid->hw_key_idx2; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + fallthrough; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (key->keyidx == 6 || key->keyidx == 7) @@ -438,7 +451,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_tx_queue_params *params) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - const u8 mq_to_aci[] = { + static const u8 mq_to_aci[] = { [IEEE80211_AC_VO] = 3, [IEEE80211_AC_VI] = 2, [IEEE80211_AC_BE] = 0, @@ -599,7 +612,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) || (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) { mt7996_mcu_add_bss_info(phy, vif, true); - mt7996_mcu_add_sta(dev, vif, NULL, true); + mt7996_mcu_add_sta(dev, vif, NULL, true, + !!(changed & BSS_CHANGED_BSSID)); } if (changed & BSS_CHANGED_ERP_CTS_PROT) @@ -688,7 +702,7 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ret = mt7996_mcu_add_sta(dev, vif, sta, true); + ret = mt7996_mcu_add_sta(dev, vif, sta, true, true); if (ret) return ret; @@ -702,7 +716,7 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; int i; - mt7996_mcu_add_sta(dev, vif, sta, false); + mt7996_mcu_add_sta(dev, vif, sta, false, false); mt7996_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index b44abe2acc81..2c8578677800 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -355,7 +355,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb) if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys)) return; - if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2) + if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy) + return; + + if (r->band_idx == MT_RX_SEL2) mphy = dev->rdd2_phy->mt76; else mphy = dev->mt76.phys[r->band_idx]; @@ -418,7 +421,7 @@ mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) return; - ieee80211_color_change_finish(vif); + ieee80211_color_change_finish(vif, 0); } static void @@ -819,11 +822,14 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct bss_info_uni_mbssid *mbssid; struct tlv *tlv; + if (!vif->bss_conf.bssid_indicator) + return; + tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid)); mbssid = (struct bss_info_uni_mbssid *)tlv; - if (enable && vif->bss_conf.bssid_indicator) { + if (enable) { mbssid->max_indicator = vif->bss_conf.bssid_indicator; mbssid->mbss_idx = vif->bss_conf.bssid_index; mbssid->tx_bss_omac_idx = 0; @@ -1650,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, int tx_ant = hweight8(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; - const u8 matrix[4][4] = { + static const u8 matrix[4][4] = { {0, 0, 0, 0}, {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ @@ -1749,6 +1755,18 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb, } static void +mt7996_mcu_sta_tx_proc_tlv(struct sk_buff *skb) +{ + struct sta_rec_tx_proc *tx_proc; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_TX_PROC, sizeof(*tx_proc)); + + tx_proc = (struct sta_rec_tx_proc *)tlv; + tx_proc->flag = cpu_to_le32(0); +} + +static void mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb) { struct sta_rec_hdrt *hdrt; @@ -1778,10 +1796,10 @@ mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb, else hdr_trans->from_ds = true; - wcid = (struct mt76_wcid *)sta->drv_priv; - if (!wcid) + if (!sta) return; + wcid = (struct mt76_wcid *)sta->drv_priv; hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags); if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) { hdr_trans->to_ds = true; @@ -1968,6 +1986,7 @@ static void mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { +#define INIT_RCPI 180 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt76_phy *mphy = mvif->phy->mt76; struct cfg80211_chan_def *chandef = &mphy->chandef; @@ -2065,6 +2084,8 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); } ra->sta_cap = cpu_to_le32(cap); + + memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi)); } int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, @@ -2133,7 +2154,7 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif, } int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) + struct ieee80211_sta *sta, bool enable, bool newly) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_sta *msta; @@ -2149,11 +2170,16 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, - !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx])); + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, newly); + if (!enable) goto out; + /* starec hdr trans */ + mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta); + /* starec tx proc */ + mt7996_mcu_sta_tx_proc_tlv(skb); + /* tag order is in accordance with firmware dependency. */ if (sta) { /* starec hdrt mode */ @@ -2178,8 +2204,6 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta); /* starec bfee */ mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta); - /* starec hdr trans */ - mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta); } ret = mt7996_mcu_add_group(dev, vif, sta); @@ -3729,6 +3753,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy) } __packed * res; struct sk_buff *skb; int ret; + u32 temp; ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL), &req, sizeof(req), true, &skb); @@ -3736,8 +3761,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy) return ret; res = (void *)skb->data; + temp = le32_to_cpu(res->temperature); + dev_kfree_skb(skb); - return le32_to_cpu(res->temperature); + return temp; } int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state) @@ -4464,7 +4491,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) u8 band_idx; } __packed req = { .tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL), - .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4), + .len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4), .power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL, .power_limit_type = TX_POWER_LIMIT_TABLE_RATE, .band_idx = phy->mt76->band_idx, @@ -4479,7 +4506,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) mphy->txpower_cur = tx_power; skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, - sizeof(req) + MT7996_SKU_RATE_NUM); + sizeof(req) + MT7996_SKU_PATH_NUM); if (!skb) return -ENOMEM; @@ -4503,6 +4530,22 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) /* eht */ skb_put_data(skb, &la.eht[0], sizeof(la.eht)); + /* padding */ + skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WM_UNI_CMD(TXPOWER), true); } + +int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode) +{ + __le32 cp_mode; + + if (mode < mt76_connac_lmac_mapping(IEEE80211_AC_BE) || + mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO)) + return -EINVAL; + + cp_mode = cpu_to_le32(mode); + return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT), + &cp_mode, sizeof(cp_mode), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 304e5fd14803..928a9663b49e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t) struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet); struct mtk_wed_device *wed = &dev->mt76.mmio.wed; struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2; - u32 i, intr, mask, intr1; + u32 i, intr, mask, intr1 = 0; if (dev->hif2 && mtk_wed_device_active(wed_hif2)) { mtk_wed_device_irq_set_mask(wed_hif2, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 36d1f247d55a..177cfff31120 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -50,6 +50,7 @@ #define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ #define MT7996_SKU_RATE_NUM 417 +#define MT7996_SKU_PATH_NUM 494 #define MT7996_MAX_TWT_AGRT 16 #define MT7996_MAX_STA_TWT_AGRT 8 @@ -450,7 +451,7 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, int enable); int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable); + struct ieee80211_sta *sta, bool enable, bool newly); int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, struct ieee80211_ampdu_params *params, bool add); @@ -612,6 +613,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode); #ifdef CONFIG_MAC80211_DEBUGFS void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 3e88798df017..8e9576747052 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker) dev = container_of(sdio, struct mt76_dev, sdio); while (true) { - if (test_bit(MT76_REMOVED, &dev->phy.state)) + if (test_bit(MT76_RESET, &dev->phy.state) || + test_bit(MT76_REMOVED, &dev->phy.state)) break; if (!dev->drv->tx_status_data(dev, &update)) @@ -514,13 +515,14 @@ static void mt76s_tx_status_data(struct mt76_worker *worker) } static int -mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, +mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { struct mt76_tx_info tx_info = { .skb = skb, }; + struct mt76_dev *dev = phy->dev; int err, len = skb->len; u16 idx = q->head; @@ -548,10 +550,7 @@ static int mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info) { - int ret = -ENOSPC, len = skb->len, pad; - - if (q->queued == q->ndesc) - goto error; + int ret, len = skb->len, pad; pad = round_up(skb->len, 4) - skb->len; ret = mt76_skb_adjust_pad(skb, pad); @@ -560,6 +559,12 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, spin_lock_bh(&q->lock); + if (q->queued == q->ndesc) { + ret = -ENOSPC; + spin_unlock_bh(&q->lock); + goto error; + } + q->entry[q->head].buf_sz = len; q->entry[q->head].skb = skb; diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index 4644dace9bb3..ca4feccf38ca 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -53,7 +53,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy) q->queued < q->ndesc / 2) { int ret; - ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb), + ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb), wcid, NULL); if (ret < 0) break; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 1809b03292c3..5cf6edee4d13 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -308,7 +308,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, int idx; non_aql = !info->tx_time_est; - idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); + idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta); if (idx < 0 || !sta) return idx; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 342c3aea549d..58ff06823389 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -850,13 +850,14 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, } static int -mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, +mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { struct mt76_tx_info tx_info = { .skb = skb, }; + struct mt76_dev *dev = phy->dev; u16 idx = q->head; int err; diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 089102ed9ae5..7d9fb9f2d527 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -237,12 +237,11 @@ static int set_channel(struct wiphy *wiphy, struct wilc_vif *vif; u32 channelnum; int result; - int srcu_idx; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return PTR_ERR(vif); } @@ -253,7 +252,7 @@ static int set_channel(struct wiphy *wiphy, if (result) netdev_err(vif->ndev, "Error in setting channel\n"); - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return result; } @@ -806,9 +805,8 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; struct wilc_priv *priv; - int srcu_idx; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) goto out; @@ -863,7 +861,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n"); out: - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return ret; } @@ -1539,20 +1537,19 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (type == NL80211_IFTYPE_MONITOR) { struct net_device *ndev; - int srcu_idx; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); vif = wilc_get_vif_from_type(wl, WILC_AP_MODE); if (!vif) { vif = wilc_get_vif_from_type(wl, WILC_GO_MODE); if (!vif) { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); goto validate_interface; } } if (vif->monitor_flag) { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); goto validate_interface; } @@ -1560,12 +1557,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (ndev) { vif->monitor_flag = 1; } else { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return ERR_PTR(-EINVAL); } wdev = &vif->priv.wdev; - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return wdev; } @@ -1613,7 +1610,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) list_del_rcu(&vif->list); wl->vif_num--; mutex_unlock(&wl->vif_mutex); - synchronize_srcu(&wl->srcu); + synchronize_rcu(); return 0; } @@ -1638,25 +1635,23 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; - int srcu_idx; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return; } netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); wilc_set_wowlan_trigger(vif, enabled); - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); } static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; - int srcu_idx; s32 tx_power = MBM_TO_DBM(mbm); struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; @@ -1664,10 +1659,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, if (!wl->initialized) return -EIO; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return -EINVAL; } @@ -1679,7 +1674,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, ret = wilc_set_tx_power(vif, tx_power); if (ret) netdev_err(vif->ndev, "Failed to set tx power\n"); - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return ret; } @@ -1762,7 +1757,6 @@ static void wlan_init_locks(struct wilc *wl) init_completion(&wl->cfg_event); init_completion(&wl->sync_event); init_completion(&wl->txq_thread_started); - init_srcu_struct(&wl->srcu); } void wlan_deinit_locks(struct wilc *wilc) @@ -1773,7 +1767,6 @@ void wlan_deinit_locks(struct wilc *wilc) mutex_destroy(&wilc->txq_add_to_head_cs); mutex_destroy(&wilc->vif_mutex); mutex_destroy(&wilc->deinit_lock); - cleanup_srcu_struct(&wilc->srcu); } int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index f1085ccb7eed..919de6ffb821 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1570,12 +1570,11 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; - int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1594,7 +1593,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) msg->body.net_info.rssi = buffer[8]; msg->body.net_info.mgmt = kmemdup(&buffer[9], msg->body.net_info.frame_len, - GFP_KERNEL); + GFP_ATOMIC); if (!msg->body.net_info.mgmt) { kfree(msg); goto out; @@ -1607,7 +1606,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); } void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) @@ -1615,14 +1614,13 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; - int srcu_idx; int result; int id; mutex_lock(&wilc->deinit_lock); id = get_unaligned_le32(&buffer[length - 4]); - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1649,7 +1647,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); mutex_unlock(&wilc->deinit_lock); } @@ -1657,12 +1655,11 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) { struct host_if_drv *hif_drv; struct wilc_vif *vif; - int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1687,7 +1684,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) } } out: - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); } int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan, diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 710e29bea560..73f56f7b002b 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -127,30 +127,28 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) { - int srcu_idx; u8 ret_val = 0; struct wilc_vif *vif; - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_for_each_vif(wilc, vif) { if (!is_zero_ether_addr(vif->bssid)) ret_val++; } - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); return ret_val; } static void wilc_wake_tx_queues(struct wilc *wl) { - int srcu_idx; struct wilc_vif *ifc; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); wilc_for_each_vif(wl, ifc) { if (ifc->mac_opened && netif_queue_stopped(ifc->ndev)) netif_wake_queue(ifc->ndev); } - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); } static int wilc_txq_task(void *vp) @@ -655,7 +653,6 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) struct sockaddr *addr = (struct sockaddr *)p; unsigned char mac_addr[ETH_ALEN]; struct wilc_vif *tmp_vif; - int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -667,19 +664,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) /* Verify MAC Address is not already in use: */ - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_for_each_vif(wilc, tmp_vif) { wilc_get_mac_address(tmp_vif, mac_addr); if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); return -EADDRNOTAVAIL; } - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); return 0; } } - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); if (result) @@ -767,15 +764,14 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) wilc_tx_complete); if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) { - int srcu_idx; struct wilc_vif *vif; - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_for_each_vif(wilc, vif) { if (vif->mac_opened) netif_stop_queue(vif->ndev); } - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); } return NETDEV_TX_OK; @@ -819,13 +815,12 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, unsigned int frame_len = 0; struct wilc_vif *vif; struct sk_buff *skb; - int srcu_idx; int stats; if (!wilc) return; - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_netdev = get_if_handler(wilc, buff); if (!wilc_netdev) goto out; @@ -853,15 +848,14 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats); } out: - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); } void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) { - int srcu_idx; struct wilc_vif *vif; - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_for_each_vif(wilc, vif) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff; u16 type = le16_to_cpup((__le16 *)buff); @@ -882,7 +876,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) if (vif->monitor_flag) wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size); } - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); } static const struct net_device_ops wilc_netdev_ops = { @@ -912,7 +906,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) list_del_rcu(&vif->list); wilc->vif_num--; mutex_unlock(&wilc->vif_mutex); - synchronize_srcu(&wilc->srcu); + synchronize_rcu(); if (vif->ndev) unregister_netdev(vif->ndev); } @@ -931,16 +925,15 @@ static u8 wilc_get_available_idx(struct wilc *wl) { int idx = 0; struct wilc_vif *vif; - int srcu_idx; - srcu_idx = srcu_read_lock(&wl->srcu); + rcu_read_lock(); wilc_for_each_vif(wl, vif) { if (vif->idx == 0) idx = 1; else idx = 0; } - srcu_read_unlock(&wl->srcu, srcu_idx); + rcu_read_unlock(); return idx; } @@ -990,7 +983,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, list_add_tail_rcu(&vif->list, &wl->vif_list); wl->vif_num += 1; mutex_unlock(&wl->vif_mutex); - synchronize_srcu(&wl->srcu); + synchronize_rcu(); return vif; diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index 5937d6d45695..eecee3973d6a 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -32,8 +32,8 @@ #define wilc_for_each_vif(w, v) \ struct wilc *_w = w; \ - list_for_each_entry_srcu(v, &_w->vif_list, list, \ - srcu_read_lock_held(&_w->srcu)) + list_for_each_entry_rcu(v, &_w->vif_list, list, \ + rcu_read_lock_held()) struct wilc_wfi_stats { unsigned long rx_packets; @@ -220,7 +220,6 @@ struct wilc { /* protect vif list */ struct mutex vif_mutex; - struct srcu_struct srcu; u8 open_ifcs; /* protect head of transmit queue */ diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index d6d394693090..52a770c5e76f 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -981,8 +981,7 @@ static struct sdio_driver wilc_sdio_driver = { .of_match_table = wilc_of_match, } }; -module_driver(wilc_sdio_driver, - sdio_register_driver, - sdio_unregister_driver); +module_sdio_driver(wilc_sdio_driver); + MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index a9e872a7b2c3..37c32d17856e 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -712,7 +712,6 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) u32 *vmm_table = wilc->vmm_table; u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; const struct wilc_hif_func *func; - int srcu_idx; u8 *txb = wilc->tx_buffer; struct wilc_vif *vif; @@ -724,10 +723,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) mutex_lock(&wilc->txq_add_to_head_cs); - srcu_idx = srcu_read_lock(&wilc->srcu); + rcu_read_lock(); wilc_for_each_vif(wilc, vif) wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev); - srcu_read_unlock(&wilc->srcu, srcu_idx); + rcu_read_unlock(); for (ac = 0; ac < NQUEUES; ac++) tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 3334c45aac13..7f8646e77ee0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -59,7 +59,7 @@ struct qtnf_bus { struct qtnf_qlink_transport trans; struct qtnf_hw_info hw_info; struct napi_struct mux_napi; - struct net_device mux_dev; + struct net_device *mux_dev; struct workqueue_struct *workqueue; struct workqueue_struct *hprio_workqueue; struct work_struct fw_work; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 677bac835330..825b05dd3271 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -196,27 +196,12 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev, return 0; } -static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev) -{ - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - - return dev->tstats ? 0 : -ENOMEM; -} - -static void qtnf_netdev_free_pcpu_stats(struct net_device *dev) -{ - free_percpu(dev->tstats); -} - /* Network device ops handlers */ const struct net_device_ops qtnf_netdev_ops = { - .ndo_init = qtnf_netdev_alloc_pcpu_stats, - .ndo_uninit = qtnf_netdev_free_pcpu_stats, .ndo_open = qtnf_netdev_open, .ndo_stop = qtnf_netdev_close, .ndo_start_xmit = qtnf_netdev_hard_start_xmit, .ndo_tx_timeout = qtnf_netdev_tx_timeout, - .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = qtnf_netdev_set_mac_address, .ndo_get_port_parent_id = qtnf_netdev_port_parent_id, }; @@ -483,6 +468,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT; dev->tx_queue_len = 100; dev->ethtool_ops = &qtnf_ethtool_ops; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE)) dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 9ad4c120fa28..f66eb43094d4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -372,7 +372,12 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto error; } - init_dummy_netdev(&bus->mux_dev); + bus->mux_dev = alloc_netdev_dummy(0); + if (!bus->mux_dev) { + ret = -ENOMEM; + goto error; + } + qtnf_pcie_init_irq(pcie_priv, use_msi); pcie_priv->sysctl_bar = sysctl_bar; pcie_priv->dmareg_bar = dmareg_bar; @@ -381,11 +386,13 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param); if (ret) - goto error; + goto error_free; qtnf_pcie_bringup_fw_async(bus); return 0; +error_free: + free_netdev(bus->mux_dev); error: destroy_workqueue(pcie_priv->workqueue); pci_set_drvdata(pdev, NULL); @@ -417,6 +424,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev) netif_napi_del(&bus->mux_napi); destroy_workqueue(priv->workqueue); tasklet_kill(&priv->reclaim_tq); + free_netdev(bus->mux_dev); qtnf_pcie_free_shm_ipc(priv); qtnf_debugfs_remove(bus); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 8c23a77d1671..c1a53e1ba3be 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -761,12 +761,12 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget) napi_gro_receive(napi, skb); } else { pr_debug("drop untagged skb\n"); - bus->mux_dev.stats.rx_dropped++; + bus->mux_dev->stats.rx_dropped++; dev_kfree_skb_any(skb); } } else { if (skb) { - bus->mux_dev.stats.rx_dropped++; + bus->mux_dev->stats.rx_dropped++; dev_kfree_skb_any(skb); } } @@ -1146,7 +1146,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size, } tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn); - netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi, + netif_napi_add_weight(bus->mux_dev, &bus->mux_napi, qtnf_pcie_pearl_rx_poll, 10); ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index d83362578374..ef5c069542d4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -667,12 +667,12 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget) netif_receive_skb(skb); } else { pr_debug("drop untagged skb\n"); - bus->mux_dev.stats.rx_dropped++; + bus->mux_dev->stats.rx_dropped++; dev_kfree_skb_any(skb); } } else { if (skb) { - bus->mux_dev.stats.rx_dropped++; + bus->mux_dev->stats.rx_dropped++; dev_kfree_skb_any(skb); } } @@ -1159,7 +1159,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, } tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn); - netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi, + netif_napi_add_weight(bus->mux_dev, &bus->mux_napi, qtnf_topaz_rx_poll, 10); ipc_int.fn = qtnf_topaz_ipc_gen_ep_int; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c index afe9cc1b49dc..3d04df0f5bf4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c @@ -13,24 +13,8 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8188e_mac_init_table[] = { {0x026, 0x41}, {0x027, 0x35}, {0x040, 0x00}, {0x421, 0x0f}, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c index 464216d007ce..bd5a0603b4a2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c @@ -11,24 +11,8 @@ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8188f_mac_init_table[] = { {0x024, 0xDF}, {0x025, 0x07}, {0x02B, 0x1C}, {0x283, 0x20}, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c index 3ee7d8f87da6..0abb1b092bc2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c @@ -13,24 +13,8 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" #ifdef CONFIG_RTL8XXXU_UNTESTED static struct rtl8xxxu_power_base rtl8192c_power_base = { @@ -77,6 +61,32 @@ static struct rtl8xxxu_power_base rtl8188r_power_base = { .reg_0868 = 0x00020204, }; +static const struct rtl8xxxu_reg8val rtl8192cu_mac_init_table[] = { + {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00}, + {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, + {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00}, + {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05}, + {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01}, + {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f}, + {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72}, + {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08}, + {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, + {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, + {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, + {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, + {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, + {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, + {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16}, + {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00}, + {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02}, + {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, + {0x652, 0x20}, {0x652, 0x20}, {0x63c, 0x08}, {0x63d, 0x08}, + {0x63e, 0x0c}, {0x63f, 0x0c}, {0x66e, 0x05}, {0x700, 0x21}, + {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, + {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87}, + {0xffff, 0xff}, +}; + static const struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00018c63}, @@ -583,6 +593,26 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) return 0; } +static int rtl8192cu_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rtl8xxxu_priv *priv = container_of(led_cdev, + struct rtl8xxxu_priv, + led_cdev); + u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG0); + + if (brightness == LED_OFF) + ledcfg = LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE; + else if (brightness == LED_ON) + ledcfg = LEDCFG2_SW_LED_CONTROL; + else if (brightness == RTL8XXXU_HW_LED_CONTROL) + ledcfg = LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE; + + rtl8xxxu_write8(priv, REG_LEDCFG0, ledcfg); + + return 0; +} + struct rtl8xxxu_fileops rtl8192cu_fops = { .identify_chip = rtl8192cu_identify_chip, .parse_efuse = rtl8192cu_parse_efuse, @@ -609,6 +639,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = { .report_rssi = rtl8xxxu_gen1_report_rssi, .fill_txdesc = rtl8xxxu_fill_txdesc_v1, .cck_rssi = rtl8723a_cck_rssi, + .led_classdev_brightness_set = rtl8192cu_led_brightness_set, .writeN_block_size = 128, .rx_agg_buf_size = 16000, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), @@ -621,7 +652,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = { .trxff_boundary = 0x27ff, .pbp_rx = PBP_PAGE_SIZE_128, .pbp_tx = PBP_PAGE_SIZE_128, - .mactable = rtl8xxxu_gen1_mac_init_table, + .mactable = rtl8192cu_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM, .page_num_hi = TX_PAGE_NUM_HI_PQ, .page_num_lo = TX_PAGE_NUM_LO_PQ, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c index 63b73ace27ec..8e123bbfc665 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c @@ -13,24 +13,8 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = { {0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7}, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c index 21e4204769d0..cd2156b7a57a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c @@ -11,24 +11,8 @@ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8192f_mac_init_table[] = { {0x420, 0x00}, {0x422, 0x78}, {0x428, 0x0a}, {0x429, 0x10}, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c index 46d57510e9fc..11c63c320eae 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c @@ -11,24 +11,8 @@ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8710b_mac_init_table[] = { {0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00}, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c index ad1bb9377ca2..ecbc324e4609 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c @@ -13,24 +13,8 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static struct rtl8xxxu_power_base rtl8723a_power_base = { .reg_0e00 = 0x0a0c0c0c, @@ -54,6 +38,31 @@ static struct rtl8xxxu_power_base rtl8723a_power_base = { .reg_0868 = 0x02040608, }; +static const struct rtl8xxxu_reg8val rtl8723au_mac_init_table[] = { + {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00}, + {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, + {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00}, + {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05}, + {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01}, + {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f}, + {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72}, + {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08}, + {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, + {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, + {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, + {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, + {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, + {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, + {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16}, + {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00}, + {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02}, + {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, + {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, + {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, + {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, + {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff}, +}; + static const struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00039c63}, @@ -518,7 +527,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = { .trxff_boundary = 0x27ff, .pbp_rx = PBP_PAGE_SIZE_128, .pbp_tx = PBP_PAGE_SIZE_128, - .mactable = rtl8xxxu_gen1_mac_init_table, + .mactable = rtl8723au_mac_init_table, .total_page_num = TX_TOTAL_PAGE_NUM, .page_num_hi = TX_PAGE_NUM_HI_PQ, .page_num_lo = TX_PAGE_NUM_LO_PQ, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c index 9640c841d20a..cc2e60b06f64 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c @@ -13,24 +13,8 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> -#include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = { {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0}, @@ -1701,6 +1685,28 @@ static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_sta return rx_pwr_all; } +static int rtl8723bu_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct rtl8xxxu_priv *priv = container_of(led_cdev, + struct rtl8xxxu_priv, + led_cdev); + u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2); + + ledcfg &= LEDCFG2_DPDT_SELECT; + + if (brightness == LED_OFF) + ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE; + else if (brightness == LED_ON) + ledcfg |= LEDCFG2_SW_LED_CONTROL; + else if (brightness == RTL8XXXU_HW_LED_CONTROL) + ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE; + + rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg); + + return 0; +} + struct rtl8xxxu_fileops rtl8723bu_fops = { .identify_chip = rtl8723bu_identify_chip, .parse_efuse = rtl8723bu_parse_efuse, @@ -1731,6 +1737,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = { .fill_txdesc = rtl8xxxu_fill_txdesc_v2, .set_crystal_cap = rtl8723a_set_crystal_cap, .cck_rssi = rtl8723b_cck_rssi, + .led_classdev_brightness_set = rtl8723bu_led_brightness_set, .writeN_block_size = 1024, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24), diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile index fa466589eccb..580a2fa675ee 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile +++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o -rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \ - rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o \ - rtl8xxxu_8188e.o rtl8xxxu_8710b.o rtl8xxxu_8192f.o +rtl8xxxu-y := core.o 8192e.o 8723b.o \ + 8723a.o 8192c.o 8188f.o \ + 8188e.o 8710b.o 8192f.o diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 4a49f8f9d80f..89a841b4e8d5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -13,24 +13,9 @@ * additional 8xxx chips like the 8192cu, 8188cus, etc. */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ethtool.h> -#include <linux/wireless.h> #include <linux/firmware.h> -#include <linux/moduleparam.h> -#include <net/mac80211.h> +#include "regs.h" #include "rtl8xxxu.h" -#include "rtl8xxxu_regs.h" #define DRIVER_NAME "rtl8xxxu" @@ -132,31 +117,6 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = { .n_bitrates = ARRAY_SIZE(rtl8xxxu_rates), }; -const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = { - {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00}, - {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, - {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00}, - {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05}, - {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01}, - {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f}, - {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72}, - {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08}, - {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, - {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, - {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, - {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, - {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, - {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, - {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16}, - {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00}, - {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02}, - {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, - {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, - {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, - {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, - {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff}, -}; - static const struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, @@ -1505,13 +1465,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS]; u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS]; u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b; - u8 val8; + u8 val8, base; int group, i; group = rtl8xxxu_gen1_channel_to_group(channel); - cck[0] = priv->cck_tx_power_index_A[group] - 1; - cck[1] = priv->cck_tx_power_index_B[group] - 1; + cck[0] = priv->cck_tx_power_index_A[group]; + cck[1] = priv->cck_tx_power_index_B[group]; if (priv->hi_pa) { if (cck[0] > 0x20) @@ -1522,10 +1482,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) ofdm[0] = priv->ht40_1s_tx_power_index_A[group]; ofdm[1] = priv->ht40_1s_tx_power_index_B[group]; - if (ofdm[0]) - ofdm[0] -= 1; - if (ofdm[1]) - ofdm[1] -= 1; ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a; ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b; @@ -1614,20 +1570,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a + power_base->reg_0e1c); + val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000); for (i = 0; i < 3; i++) { - if (i != 2) - val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0; - else - val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0; + base = i != 2 ? 8 : 6; + val8 = max_t(int, val8 - base, 0); rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8); } + rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b + power_base->reg_0868); + val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000); for (i = 0; i < 3; i++) { - if (i != 2) - val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0; - else - val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0; + base = i != 2 ? 8 : 6; + val8 = max_t(int, val8 - base, 0); rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8); } } @@ -4385,7 +4340,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* Let the 8051 take control of antenna setting */ if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188F && - priv->rtl_chip != RTL8710B) { + priv->rtl_chip != RTL8710B && priv->rtl_chip != RTL8192C) { val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); val8 |= LEDCFG2_DPDT_SELECT; rtl8xxxu_write8(priv, REG_LEDCFG2, val8); @@ -6473,7 +6428,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; - if (!rx_desc->swdec) + if (!rx_desc->swdec && + rx_desc->security != RX_DESC_ENC_NONE) rx_status->flag |= RX_FLAG_DECRYPTED; if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -6578,7 +6534,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) rx_status->mactime = rx_desc->tsfl; rx_status->flag |= RX_FLAG_MACTIME_START; - if (!rx_desc->swdec) + if (!rx_desc->swdec && + rx_desc->security != RX_DESC_ENC_NONE) rx_status->flag |= RX_FLAG_DECRYPTED; if (rx_desc->crc32) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; @@ -7998,6 +7955,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, MFP_CAPABLE); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/regs.h index 61c0c0ec07b3..61c0c0ec07b3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/regs.h diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index fd92d23c43d9..f42463e595cc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -5,8 +5,9 @@ * Register definitions taken from original Realtek rtl8723au driver */ -#include <asm/byteorder.h> #include <linux/average.h> +#include <linux/usb.h> +#include <net/mac80211.h> #define RTL8XXXU_DEBUG_REG_WRITE 0x01 #define RTL8XXXU_DEBUG_REG_READ 0x02 @@ -122,6 +123,15 @@ enum rtl8xxxu_rx_type { RX_TYPE_ERROR = -1 }; +enum rtl8xxxu_rx_desc_enc { + RX_DESC_ENC_NONE = 0, + RX_DESC_ENC_WEP40 = 1, + RX_DESC_ENC_TKIP_WO_MIC = 2, + RX_DESC_ENC_TKIP_MIC = 3, + RX_DESC_ENC_AES = 4, + RX_DESC_ENC_WEP104 = 5, +}; + struct rtl8xxxu_rxdesc16 { #ifdef __LITTLE_ENDIAN u32 pktlen:14; @@ -2022,7 +2032,6 @@ struct rtl8xxxu_fileops { extern int rtl8xxxu_debug; -extern const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[]; extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[]; u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr); u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr); diff --git a/drivers/net/wireless/realtek/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig index 9f6a4e35543c..cfe63f7b28d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/Kconfig +++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig @@ -37,6 +37,7 @@ config RTL8192SE config RTL8192DE tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter" depends on PCI + select RTL8192D_COMMON select RTLWIFI select RTLWIFI_PCI help @@ -142,6 +143,9 @@ config RTL8192C_COMMON depends on RTL8192CE || RTL8192CU default y +config RTL8192D_COMMON + tristate + config RTL8723_COMMON tristate depends on RTL8723AE || RTL8723BE diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile index 09c30e428375..423981b148df 100644 --- a/drivers/net/wireless/realtek/rtlwifi/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ obj-$(CONFIG_RTL8192CE) += rtl8192ce/ obj-$(CONFIG_RTL8192CU) += rtl8192cu/ obj-$(CONFIG_RTL8192SE) += rtl8192se/ +obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d/ obj-$(CONFIG_RTL8192DE) += rtl8192de/ obj-$(CONFIG_RTL8723AE) += rtl8723ae/ obj-$(CONFIG_RTL8723BE) += rtl8723be/ diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index 32970ea4b4e7..f9d0d1394442 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c @@ -18,7 +18,8 @@ void rtl_cam_reset_sec_info(struct ieee80211_hw *hw) } static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, - u8 *mac_addr, u8 *key_cont_128, u16 us_config) + const u8 *mac_addr, u8 *key_cont_128, + u16 us_config) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -94,7 +95,7 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, "after set key, usconfig:%x\n", us_config); } -u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, +u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr, u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, u32 ul_default_key, u8 *key_content) { diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h index 2461fa9afda0..144807a405b7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.h +++ b/drivers/net/wireless/realtek/rtlwifi/cam.h @@ -14,9 +14,9 @@ #define CAM_CONFIG_NO_USEDK 0 void rtl_cam_reset_all_entry(struct ieee80211_hw *hw); -u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, - u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, - u32 ul_default_key, u8 *key_content); +u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr, + u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, + u32 ul_default_key, u8 *key_content); int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, u32 ul_key_id); void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index c1fbc29d5ca1..82cf5fb5175f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -1211,7 +1211,7 @@ static u8 efuse_calculate_word_cnts(u8 word_en) } int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, - int max_size, u8 *hwinfo, int *params) + int max_size, u8 *hwinfo, const int *params) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index 4821625ad1e5..e250ffb0f4b2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -89,7 +89,7 @@ void efuse_force_write_vendor_id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate); int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, - int max_size, u8 *hwinfo, int *params); + int max_size, u8 *hwinfo, const int *params); void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen); void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer, u32 size); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 4217c9a08d01..0195c9a3e9e8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -489,7 +489,6 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) } static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, - bool wmm_enable, u8 out_ep_num, u8 queue_sel) { @@ -505,66 +504,39 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, u8 value8; u32 txqpagenum, txqpageunit, txqremaininpage; - if (!wmm_enable) { - numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ : - CHIP_A_PAGE_NUM_PUBQ; - txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq; - - txqpageunit = txqpagenum / outepnum; - txqremaininpage = txqpagenum % outepnum; - if (queue_sel & TX_SELE_HQ) - numhq = txqpageunit; - if (queue_sel & TX_SELE_LQ) - numlq = txqpageunit; - /* HIGH priority queue always present in the configuration of - * 2 out-ep. Remainder pages have assigned to High queue */ - if (outepnum > 1 && txqremaininpage) - numhq += txqremaininpage; - /* NOTE: This step done before writing REG_RQPN. */ - if (ischipn) { - if (queue_sel & TX_SELE_NQ) - numnq = txqpageunit; - value8 = (u8)_NPQ(numnq); - rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); - } - } else { - /* for WMM ,number of out-ep must more than or equal to 2! */ - numpubq = ischipn ? WMM_CHIP_B_PAGE_NUM_PUBQ : - WMM_CHIP_A_PAGE_NUM_PUBQ; - if (queue_sel & TX_SELE_HQ) { - numhq = ischipn ? WMM_CHIP_B_PAGE_NUM_HPQ : - WMM_CHIP_A_PAGE_NUM_HPQ; - } - if (queue_sel & TX_SELE_LQ) { - numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ : - WMM_CHIP_A_PAGE_NUM_LPQ; - } - /* NOTE: This step done before writing REG_RQPN. */ - if (ischipn) { - if (queue_sel & TX_SELE_NQ) - numnq = WMM_CHIP_B_PAGE_NUM_NPQ; - value8 = (u8)_NPQ(numnq); - rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); - } + numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ : + CHIP_A_PAGE_NUM_PUBQ; + txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq; + + txqpageunit = txqpagenum / outepnum; + txqremaininpage = txqpagenum % outepnum; + if (queue_sel & TX_SELE_HQ) + numhq = txqpageunit; + if (queue_sel & TX_SELE_LQ) + numlq = txqpageunit; + /* HIGH priority queue always present in the configuration of + * 2 out-ep. Remainder pages have assigned to High queue. + */ + if (outepnum > 1 && txqremaininpage) + numhq += txqremaininpage; + /* NOTE: This step done before writing REG_RQPN. */ + if (ischipn) { + if (queue_sel & TX_SELE_NQ) + numnq = txqpageunit; + value8 = (u8)_NPQ(numnq); + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); } /* TX DMA */ value32 = _HPQ(numhq) | _LPQ(numlq) | _PUBQ(numpubq) | LD_RQPN; rtl_write_dword(rtlpriv, REG_RQPN, value32); } -static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable) +static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 txpktbuf_bndy; + u8 txpktbuf_bndy = TX_PAGE_BOUNDARY; u8 value8; - if (!wmm_enable) - txpktbuf_bndy = TX_PAGE_BOUNDARY; - else /* for WMM */ - txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version)) - ? WMM_CHIP_B_TX_PAGE_BOUNDARY - : WMM_CHIP_A_TX_PAGE_BOUNDARY; rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy); @@ -589,7 +561,6 @@ static void _rtl92c_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq, } static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 queue_sel) { u16 value; @@ -614,7 +585,6 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, } static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 queue_sel) { u16 beq, bkq, viq, voq, mgtq, hiq; @@ -638,67 +608,47 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw, valuelow = QUEUE_NORMAL; break; } - if (!wmm_enable) { - beq = valuelow; - bkq = valuelow; - viq = valuehi; - voq = valuehi; - mgtq = valuehi; - hiq = valuehi; - } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */ - beq = valuehi; - bkq = valuelow; - viq = valuelow; - voq = valuehi; - mgtq = valuehi; - hiq = valuehi; - } + + beq = valuelow; + bkq = valuelow; + viq = valuehi; + voq = valuehi; + mgtq = valuehi; + hiq = valuehi; + _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); pr_info("Tx queue select: 0x%02x\n", queue_sel); } static void _rtl92cu_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 queue_sel) { u16 beq, bkq, viq, voq, mgtq, hiq; - if (!wmm_enable) { /* typical setting */ - beq = QUEUE_LOW; - bkq = QUEUE_LOW; - viq = QUEUE_NORMAL; - voq = QUEUE_HIGH; - mgtq = QUEUE_HIGH; - hiq = QUEUE_HIGH; - } else { /* for WMM */ - beq = QUEUE_LOW; - bkq = QUEUE_NORMAL; - viq = QUEUE_NORMAL; - voq = QUEUE_HIGH; - mgtq = QUEUE_HIGH; - hiq = QUEUE_HIGH; - } + beq = QUEUE_LOW; + bkq = QUEUE_LOW; + viq = QUEUE_NORMAL; + voq = QUEUE_HIGH; + mgtq = QUEUE_HIGH; + hiq = QUEUE_HIGH; + _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); pr_info("Tx queue select :0x%02x..\n", queue_sel); } static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 out_ep_num, u8 queue_sel) { switch (out_ep_num) { case 1: - _rtl92cu_init_chipn_one_out_ep_priority(hw, wmm_enable, - queue_sel); + _rtl92cu_init_chipn_one_out_ep_priority(hw, queue_sel); break; case 2: - _rtl92cu_init_chipn_two_out_ep_priority(hw, wmm_enable, - queue_sel); + _rtl92cu_init_chipn_two_out_ep_priority(hw, queue_sel); break; case 3: - _rtl92cu_init_chipn_three_out_ep_priority(hw, wmm_enable, - queue_sel); + _rtl92cu_init_chipn_three_out_ep_priority(hw, queue_sel); break; default: WARN_ON(1); /* Shall not reach here! */ @@ -707,7 +657,6 @@ static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw, } static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 out_ep_num, u8 queue_sel) { @@ -716,12 +665,7 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw, switch (out_ep_num) { case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */ - if (!wmm_enable) /* typical setting */ - hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | - HQSEL_HIQ; - else /* for WMM */ - hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ | - HQSEL_HIQ; + hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | HQSEL_HIQ; break; case 1: if (TX_SELE_LQ == queue_sel) { @@ -742,18 +686,15 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw, } static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, - bool wmm_enable, u8 out_ep_num, u8 queue_sel) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (IS_NORMAL_CHIP(rtlhal->version)) - _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num, - queue_sel); + _rtl92cu_init_chipn_queue_priority(hw, out_ep_num, queue_sel); else - _rtl92cu_init_chipt_queue_priority(hw, wmm_enable, out_ep_num, - queue_sel); + _rtl92cu_init_chipt_queue_priority(hw, out_ep_num, queue_sel); } static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) @@ -810,8 +751,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); int err = 0; - u32 boundary = 0; - u8 wmm_enable = false; /* TODO */ + u32 boundary = TX_PAGE_BOUNDARY; u8 out_ep_nums = rtlusb->out_ep_nums; u8 queue_sel = rtlusb->out_queue_sel; @@ -821,22 +761,13 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) pr_err("Failed to init power on!\n"); return err; } - if (!wmm_enable) { - boundary = TX_PAGE_BOUNDARY; - } else { /* for WMM */ - boundary = (IS_NORMAL_CHIP(rtlhal->version)) - ? WMM_CHIP_B_TX_PAGE_BOUNDARY - : WMM_CHIP_A_TX_PAGE_BOUNDARY; - } if (!rtl92c_init_llt_table(hw, boundary)) { pr_err("Failed to init LLT Table!\n"); return -EINVAL; } - _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums, - queue_sel); - _rtl92c_init_trx_buffer(hw, wmm_enable); - _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums, - queue_sel); + _rtl92cu_init_queue_reserved_page(hw, out_ep_nums, queue_sel); + _rtl92c_init_trx_buffer(hw); + _rtl92cu_init_queue_priority(hw, out_ep_nums, queue_sel); /* Get Rx PHY status in order to report RSSI and others. */ rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE); rtl92c_init_interrupt(hw); @@ -1553,7 +1484,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - enum wireless_mode wirelessmode = mac->mode; u8 idx = 0; switch (variable) { @@ -1605,36 +1535,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_SLOT_TIME:{ u8 e_aci; - u8 QOS_MODE = 1; rtl_write_byte(rtlpriv, REG_SLOT, val[0]); rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SLOT_TIME %x\n", val[0]); - if (QOS_MODE) { - for (e_aci = 0; e_aci < AC_MAX; e_aci++) - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_AC_PARAM, - &e_aci); - } else { - u8 sifstime = 0; - u8 u1baifs; - if (IS_WIRELESS_MODE_A(wirelessmode) || - IS_WIRELESS_MODE_N_24G(wirelessmode) || - IS_WIRELESS_MODE_N_5G(wirelessmode)) - sifstime = 16; - else - sifstime = 10; - u1baifs = sifstime + (2 * val[0]); - rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM, - u1baifs); - rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM, - u1baifs); - rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM, - u1baifs); - rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM, - u1baifs); - } + for (e_aci = 0; e_aci < AC_MAX; e_aci++) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + &e_aci); break; } case HW_VAR_ACK_PREAMBLE:{ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile new file mode 100644 index 000000000000..beebdfa3f7ff --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +rtl8192d-common-objs := \ + dm_common.o \ + fw_common.o \ + hw_common.o \ + main.o \ + phy_common.o \ + rf_common.o \ + trx_common.o + +obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d-common.o diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h index 21726d9b4aef..21726d9b4aef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c new file mode 100644 index 000000000000..20373ce998bf --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../base.h" +#include "../core.h" +#include "reg.h" +#include "def.h" +#include "phy_common.h" +#include "dm_common.h" + +static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { + 0x7f8001fe, /* 0, +6.0dB */ + 0x788001e2, /* 1, +5.5dB */ + 0x71c001c7, /* 2, +5.0dB */ + 0x6b8001ae, /* 3, +4.5dB */ + 0x65400195, /* 4, +4.0dB */ + 0x5fc0017f, /* 5, +3.5dB */ + 0x5a400169, /* 6, +3.0dB */ + 0x55400155, /* 7, +2.5dB */ + 0x50800142, /* 8, +2.0dB */ + 0x4c000130, /* 9, +1.5dB */ + 0x47c0011f, /* 10, +1.0dB */ + 0x43c0010f, /* 11, +0.5dB */ + 0x40000100, /* 12, +0dB */ + 0x3c8000f2, /* 13, -0.5dB */ + 0x390000e4, /* 14, -1.0dB */ + 0x35c000d7, /* 15, -1.5dB */ + 0x32c000cb, /* 16, -2.0dB */ + 0x300000c0, /* 17, -2.5dB */ + 0x2d4000b5, /* 18, -3.0dB */ + 0x2ac000ab, /* 19, -3.5dB */ + 0x288000a2, /* 20, -4.0dB */ + 0x26000098, /* 21, -4.5dB */ + 0x24000090, /* 22, -5.0dB */ + 0x22000088, /* 23, -5.5dB */ + 0x20000080, /* 24, -6.0dB */ + 0x1e400079, /* 25, -6.5dB */ + 0x1c800072, /* 26, -7.0dB */ + 0x1b00006c, /* 27. -7.5dB */ + 0x19800066, /* 28, -8.0dB */ + 0x18000060, /* 29, -8.5dB */ + 0x16c0005b, /* 30, -9.0dB */ + 0x15800056, /* 31, -9.5dB */ + 0x14400051, /* 32, -10.0dB */ + 0x1300004c, /* 33, -10.5dB */ + 0x12000048, /* 34, -11.0dB */ + 0x11000044, /* 35, -11.5dB */ + 0x10000040, /* 36, -12.0dB */ + 0x0f00003c, /* 37, -12.5dB */ + 0x0e400039, /* 38, -13.0dB */ + 0x0d800036, /* 39, -13.5dB */ + 0x0cc00033, /* 40, -14.0dB */ + 0x0c000030, /* 41, -14.5dB */ + 0x0b40002d, /* 42, -15.0dB */ +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */ + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */ + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */ + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */ + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */ + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */ + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */ + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */ + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */ + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */ + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */ + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */ + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */ +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */ + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */ + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */ + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */ + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */ + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */ + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */ + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */ + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */ + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */ + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */ + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */ + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ +}; + +static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw) +{ + static const u8 index_mapping[RX_INDEX_MAPPING_NUM] = { + 0x0f, 0x0f, 0x0d, 0x0c, 0x0b, + 0x0a, 0x09, 0x08, 0x07, 0x06, + 0x05, 0x04, 0x04, 0x03, 0x02 + }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, idx; + u32 u4tmp; + + idx = rtlpriv->efuse.eeprom_thermalmeter - rtlpriv->dm.thermalvalue_rxgain; + u4tmp = index_mapping[idx] << 12; + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "===> Rx Gain %x\n", u4tmp); + + for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++) + rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK, + (rtlpriv->phy.reg_rf3c[i] & ~0xF000) | u4tmp); +} + +static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg, + u8 *cck_index_old) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flag = 0; + const u8 *cckswing; + long temp_cck; + int i; + + /* Query CCK default setting From 0xa24 */ + rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); + temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, + MASKDWORD) & MASKCCK; + rtl92d_release_cckandrw_pagea_ctl(hw, &flag); + + for (i = 0; i < CCK_TABLE_LENGTH; i++) { + if (rtlpriv->dm.cck_inch14) + cckswing = &cckswing_table_ch14[i][2]; + else + cckswing = &cckswing_table_ch1ch13[i][2]; + + if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) { + *cck_index_old = (u8)i; + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", + RCCK0_TXFILTER2, temp_cck, + *cck_index_old, + rtlpriv->dm.cck_inch14); + break; + } + } + *temp_cckg = temp_cck; +} + +static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index, + bool *internal_pa, u8 thermalvalue, u8 delta, + u8 rf, struct rtl_efuse *rtlefuse, + struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy, + const u8 index_mapping[5][INDEX_MAPPING_NUM], + const u8 index_mapping_pa[8][INDEX_MAPPING_NUM]) +{ + u8 offset = 0; + u8 index; + int i; + + for (i = 0; i < rf; i++) { + if (rtlhal->macphymode == DUALMAC_DUALPHY && + rtlhal->interfaceindex == 1) /* MAC 1 5G */ + *internal_pa = rtlefuse->internal_pa_5g[1]; + else + *internal_pa = rtlefuse->internal_pa_5g[i]; + + if (*internal_pa) { + if (rtlhal->interfaceindex == 1 || i == rf) + offset = 4; + else + offset = 0; + if (rtlphy->current_channel >= 100 && + rtlphy->current_channel <= 165) + offset += 2; + } else { + if (rtlhal->interfaceindex == 1 || i == rf) + offset = 2; + else + offset = 0; + } + + if (thermalvalue > rtlefuse->eeprom_thermalmeter) + offset++; + + if (*internal_pa) { + if (delta > INDEX_MAPPING_NUM - 1) + index = index_mapping_pa[offset] + [INDEX_MAPPING_NUM - 1]; + else + index = + index_mapping_pa[offset][delta]; + } else { + if (delta > INDEX_MAPPING_NUM - 1) + index = + index_mapping[offset][INDEX_MAPPING_NUM - 1]; + else + index = index_mapping[offset][delta]; + } + + if (thermalvalue > rtlefuse->eeprom_thermalmeter) { + if (*internal_pa && thermalvalue > 0x12) { + ofdm_index[i] = rtlpriv->dm.ofdm_index[i] - + ((delta / 2) * 3 + (delta % 2)); + } else { + ofdm_index[i] -= index; + } + } else { + ofdm_index[i] += index; + } + } +} + +static void +rtl92d_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw) +{ + static const u8 index_mapping[5][INDEX_MAPPING_NUM] = { + /* 5G, path A/MAC 0, decrease power */ + {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, + /* 5G, path A/MAC 0, increase power */ + {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, + /* 5G, path B/MAC 1, decrease power */ + {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, + /* 5G, path B/MAC 1, increase power */ + {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, + /* 2.4G, for decreas power */ + {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10}, + }; + static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = { + /* 5G, path A/MAC 0, ch36-64, decrease power */ + {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, + /* 5G, path A/MAC 0, ch36-64, increase power */ + {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, + /* 5G, path A/MAC 0, ch100-165, decrease power */ + {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15}, + /* 5G, path A/MAC 0, ch100-165, increase power */ + {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, + /* 5G, path B/MAC 1, ch36-64, decrease power */ + {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, + /* 5G, path B/MAC 1, ch36-64, increase power */ + {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, + /* 5G, path B/MAC 1, ch100-165, decrease power */ + {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14}, + /* 5G, path B/MAC 1, ch100-165, increase power */ + {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, + }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_dm *dm = &rtlpriv->dm; + u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain; + u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf; + long ele_a = 0, ele_d, temp_cck, val_x, value32; + bool is2t = IS_92D_SINGLEPHY(rtlhal->version); + u8 offset, thermalvalue_avg_count = 0; + u8 ofdm_index_old[2] = {0, 0}; + u32 thermalvalue_avg = 0; + bool internal_pa = false; + long val_y, ele_c = 0; + s8 cck_index_old = 0; + u8 indexforchannel; + u8 ofdm_index[2]; + s8 cck_index = 0; + u8 index, swing; + int i; + + indexforchannel = rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel); + + dm->txpower_trackinginit = true; + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n"); + + thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800); + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", + thermalvalue, + dm->thermalvalue, rtlefuse->eeprom_thermalmeter); + + if (!thermalvalue) + goto exit; + + if (is2t) + rf = 2; + else + rf = 1; + + if (dm->thermalvalue && !rtlhal->reloadtxpowerindex) + goto old_index_done; + + ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; + + for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { + if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { + ofdm_index_old[0] = (u8)i; + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", + ROFDM0_XATXIQIMBALANCE, + ele_d, ofdm_index_old[0]); + break; + } + } + + if (is2t) { + ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD); + ele_d &= MASKOFDM_D; + + for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { + if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { + ofdm_index_old[1] = (u8)i; + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n", + ROFDM0_XBTXIQIMBALANCE, ele_d, + ofdm_index_old[1]); + break; + } + } + } + + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old); + } else { + temp_cck = 0x090e1317; + cck_index_old = 12; + } + + if (!dm->thermalvalue) { + dm->thermalvalue = rtlefuse->eeprom_thermalmeter; + dm->thermalvalue_lck = thermalvalue; + dm->thermalvalue_iqk = thermalvalue; + dm->thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter; + + for (i = 0; i < rf; i++) + dm->ofdm_index[i] = ofdm_index_old[i]; + + dm->cck_index = cck_index_old; + } + + if (rtlhal->reloadtxpowerindex) { + for (i = 0; i < rf; i++) + dm->ofdm_index[i] = ofdm_index_old[i]; + + dm->cck_index = cck_index_old; + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "reload ofdm index for band switch\n"); + } + +old_index_done: + for (i = 0; i < rf; i++) + ofdm_index[i] = dm->ofdm_index[i]; + + dm->thermalvalue_avg[dm->thermalvalue_avg_index] = thermalvalue; + dm->thermalvalue_avg_index++; + + if (dm->thermalvalue_avg_index == AVG_THERMAL_NUM) + dm->thermalvalue_avg_index = 0; + + for (i = 0; i < AVG_THERMAL_NUM; i++) { + if (dm->thermalvalue_avg[i]) { + thermalvalue_avg += dm->thermalvalue_avg[i]; + thermalvalue_avg_count++; + } + } + + if (thermalvalue_avg_count) + thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); + + if (rtlhal->reloadtxpowerindex) { + delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); + rtlhal->reloadtxpowerindex = false; + dm->done_txpower = false; + } else if (dm->done_txpower) { + delta = abs_diff(thermalvalue, dm->thermalvalue); + } else { + delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); + } + + delta_lck = abs_diff(thermalvalue, dm->thermalvalue_lck); + delta_iqk = abs_diff(thermalvalue, dm->thermalvalue_iqk); + delta_rxgain = abs_diff(thermalvalue, dm->thermalvalue_rxgain); + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", + thermalvalue, dm->thermalvalue, rtlefuse->eeprom_thermalmeter, + delta, delta_lck, delta_iqk); + + if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) { + dm->thermalvalue_lck = thermalvalue; + rtlpriv->cfg->ops->phy_lc_calibrate(hw, is2t); + } + + if (delta == 0 || !dm->txpower_track_control) + goto check_delta; + + dm->done_txpower = true; + delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter); + + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + offset = 4; + if (delta > INDEX_MAPPING_NUM - 1) + index = index_mapping[offset][INDEX_MAPPING_NUM - 1]; + else + index = index_mapping[offset][delta]; + + if (thermalvalue > dm->thermalvalue) { + for (i = 0; i < rf; i++) + ofdm_index[i] -= delta; + + cck_index -= delta; + } else { + for (i = 0; i < rf; i++) + ofdm_index[i] += index; + + cck_index += index; + } + } else if (rtlhal->current_bandtype == BAND_ON_5G) { + rtl92d_bandtype_5G(rtlhal, ofdm_index, &internal_pa, + thermalvalue, delta, rf, rtlefuse, rtlpriv, + rtlphy, index_mapping, + index_mapping_internal_pa); + } + + if (is2t) { + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n", + dm->ofdm_index[0], dm->ofdm_index[1], dm->cck_index); + } else { + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "temp OFDM_A_index=0x%x, cck_index = 0x%x\n", + dm->ofdm_index[0], dm->cck_index); + } + + for (i = 0; i < rf; i++) { + if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) { + ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1; + } else if (internal_pa || + rtlhal->current_bandtype == BAND_ON_2_4G) { + if (ofdm_index[i] < ofdm_min_index_internal_pa) + ofdm_index[i] = ofdm_min_index_internal_pa; + } else if (ofdm_index[i] < ofdm_min_index) { + ofdm_index[i] = ofdm_min_index; + } + } + + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + if (cck_index > CCK_TABLE_SIZE - 1) + cck_index = CCK_TABLE_SIZE - 1; + else if (cck_index < 0) + cck_index = 0; + } + + if (is2t) { + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n", + ofdm_index[0], ofdm_index[1], cck_index); + } else { + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "new OFDM_A_index=0x%x, cck_index = 0x%x\n", + ofdm_index[0], cck_index); + } + + ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; + val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0]; + val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1]; + + if (val_x != 0) { + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; + + /* new element C = element D x Y */ + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; + + /* write new elements A, C, D to regC80 and + * regC94, element B is always 0 + */ + value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD, value32); + + value32 = (ele_c & 0x000003C0) >> 6; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); + + value32 = ((val_x * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), value32); + + } else { + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, + ofdmswing_table[ofdm_index[0]]); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00); + } + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n", + rtlhal->interfaceindex, + val_x, val_y, ele_a, ele_c, ele_d, + val_x, val_y); + + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* Adjust CCK according to IQK result */ + for (i = 0; i < 8; i++) { + if (dm->cck_inch14) + swing = cckswing_table_ch14[cck_index][i]; + else + swing = cckswing_table_ch1ch13[cck_index][i]; + + rtl_write_byte(rtlpriv, 0xa22 + i, swing); + } + } + + if (is2t) { + ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22; + val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4]; + val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5]; + + if (val_x != 0) { + if ((val_x & 0x00000200) != 0) + /* consider minus */ + val_x = val_x | 0xFFFFFC00; + ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; + + /* new element C = element D x Y */ + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + ele_c = ((val_y * ele_d) >> 8) & 0x00003FF; + + /* write new elements A, C, D to regC88 + * and regC9C, element B is always 0 + */ + value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, value32); + + value32 = (ele_c & 0x000003C0) >> 6; + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); + + value32 = ((val_x * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32); + } else { + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, ofdmswing_table[ofdm_index[1]]); + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00); + } + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n", + val_x, val_y, ele_a, ele_c, ele_d, val_x, val_y); + } + + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n", + rtl_get_bbreg(hw, 0xc80, MASKDWORD), + rtl_get_bbreg(hw, 0xc94, MASKDWORD), + rtl_get_rfreg(hw, RF90_PATH_A, 0x24, RFREG_OFFSET_MASK)); + +check_delta: + if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) { + rtl92d_phy_reset_iqk_result(hw); + dm->thermalvalue_iqk = thermalvalue; + rtlpriv->cfg->ops->phy_iq_calibrate(hw); + } + + if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G && + thermalvalue <= rtlefuse->eeprom_thermalmeter) { + dm->thermalvalue_rxgain = thermalvalue; + rtl92d_dm_rxgain_tracking_thermalmeter(hw); + } + + if (dm->txpower_track_control) + dm->thermalvalue = thermalvalue; + +exit: + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n"); +} + +void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.txpower_tracking = true; + rtlpriv->dm.txpower_trackinginit = false; + rtlpriv->dm.txpower_track_control = true; + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "pMgntInfo->txpower_tracking = %d\n", + rtlpriv->dm.txpower_tracking); +} +EXPORT_SYMBOL_GPL(rtl92d_dm_initialize_txpower_tracking); + +void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (!rtlpriv->dm.txpower_tracking) + return; + + if (!rtlpriv->dm.tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | + BIT(16), 0x03); + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Trigger 92S Thermal Meter!!\n"); + rtlpriv->dm.tm_trigger = 1; + } else { + rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Schedule TxPowerTracking direct call!!\n"); + rtl92d_dm_txpower_tracking_callback_thermalmeter(hw); + rtlpriv->dm.tm_trigger = 0; + } +} +EXPORT_SYMBOL_GPL(rtl92d_dm_check_txpower_tracking_thermal_meter); + +void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; + unsigned long flag = 0; + u32 ret_value; + + /* hold ofdm counter */ + rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */ + rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /* hold page D counter */ + + ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); + falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff; + falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); + falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); + falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff; + falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); + falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff; + + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail; + + if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { + rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); + ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); + falsealm_cnt->cnt_cck_fail = ret_value; + ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); + falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; + rtl92d_release_cckandrw_pagea_ctl(hw, &flag); + } else { + falsealm_cnt->cnt_cck_fail = 0; + } + + falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + + falsealm_cnt->cnt_cck_fail; + + /* reset false alarm counter registers */ + rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); + + /* update ofdm counter */ + rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); /* update page C counter */ + rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); /* update page D counter */ + + if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { + /* reset cck counter */ + rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); + /* enable cck counter */ + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); + rtl92d_release_cckandrw_pagea_ctl(hw, &flag); + } + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n", + falsealm_cnt->cnt_fast_fsync_fail, + falsealm_cnt->cnt_sb_search_fail); + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n", + falsealm_cnt->cnt_parity_fail, + falsealm_cnt->cnt_rate_illegal, + falsealm_cnt->cnt_crc8_fail, + falsealm_cnt->cnt_mcs_fail); + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n", + falsealm_cnt->cnt_ofdm_fail, + falsealm_cnt->cnt_cck_fail, + falsealm_cnt->cnt_all); +} +EXPORT_SYMBOL_GPL(rtl92d_dm_false_alarm_counter_statistics); + +void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); + + /* Determine the minimum RSSI */ + if (mac->link_state < MAC80211_LINKED && + rtlpriv->dm.entry_min_undec_sm_pwdb == 0) { + de_digtable->min_undec_pwdb_for_dm = 0; + rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "Not connected to any\n"); + } + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + de_digtable->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Client PWDB = 0x%lx\n", + rtlpriv->dm.entry_min_undec_sm_pwdb); + } else { + de_digtable->min_undec_pwdb_for_dm = + rtlpriv->dm.undec_sm_pwdb; + rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "STA Default Port PWDB = 0x%x\n", + de_digtable->min_undec_pwdb_for_dm); + } + } else { + de_digtable->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Ext Port or disconnect PWDB = 0x%x\n", + de_digtable->min_undec_pwdb_for_dm); + } + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", + de_digtable->min_undec_pwdb_for_dm); +} +EXPORT_SYMBOL_GPL(rtl92d_dm_find_minimum_rssi); + +static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + unsigned long flag = 0; + + if (de_digtable->cursta_cstate == DIG_STA_CONNECT) { + if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { + if (de_digtable->min_undec_pwdb_for_dm <= 25) + de_digtable->cur_cck_pd_state = + CCK_PD_STAGE_LOWRSSI; + else + de_digtable->cur_cck_pd_state = + CCK_PD_STAGE_HIGHRSSI; + } else { + if (de_digtable->min_undec_pwdb_for_dm <= 20) + de_digtable->cur_cck_pd_state = + CCK_PD_STAGE_LOWRSSI; + else + de_digtable->cur_cck_pd_state = + CCK_PD_STAGE_HIGHRSSI; + } + } else { + de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; + } + if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) { + if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { + rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); + rtl92d_release_cckandrw_pagea_ctl(hw, &flag); + } else { + rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); + rtl92d_release_cckandrw_pagea_ctl(hw, &flag); + } + de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; + } + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", + de_digtable->cursta_cstate == DIG_STA_CONNECT ? + "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", + de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? + "Low RSSI " : "High RSSI "); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", + IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); +} + +void rtl92d_dm_write_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n", + de_digtable->cur_igvalue, de_digtable->pre_igvalue, + de_digtable->back_val); + if (!de_digtable->dig_enable_flag) { + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); + de_digtable->pre_igvalue = 0x17; + return; + } + if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) { + rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, + de_digtable->cur_igvalue); + rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, + de_digtable->cur_igvalue); + de_digtable->pre_igvalue = de_digtable->cur_igvalue; + } +} +EXPORT_SYMBOL_GPL(rtl92d_dm_write_dig); + +static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) +{ + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED && + rtlpriv->mac80211.vendor == PEER_CISCO) { + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); + if (de_digtable->last_min_undec_pwdb_for_dm >= 50 && + de_digtable->min_undec_pwdb_for_dm < 50) { + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "Early Mode Off\n"); + } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 && + de_digtable->min_undec_pwdb_for_dm > 55) { + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "Early Mode On\n"); + } + } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) { + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n"); + } +} + +void rtl92d_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + u8 value_igi = de_digtable->cur_igvalue; + struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt; + + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); + if (rtlpriv->rtlhal.earlymode_enable) { + rtl92d_early_mode_enabled(rtlpriv); + de_digtable->last_min_undec_pwdb_for_dm = + de_digtable->min_undec_pwdb_for_dm; + } + if (!rtlpriv->dm.dm_initialgain_enable) + return; + + /* because we will send data pkt when scanning + * this will cause some ap like gear-3700 wep TP + * lower if we return here, this is the diff of + * mac80211 driver vs ieee80211 driver + */ + /* if (rtlpriv->mac80211.act_scanning) + * return; + */ + + /* Not STA mode return tmp */ + if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) + return; + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); + /* Decide the current status and if modify initial gain or not */ + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) + de_digtable->cursta_cstate = DIG_STA_CONNECT; + else + de_digtable->cursta_cstate = DIG_STA_DISCONNECT; + + /* adjust initial gain according to false alarm counter */ + if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) + value_igi--; + else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1) + value_igi += 0; + else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2) + value_igi++; + else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2) + value_igi += 2; + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", + de_digtable->large_fa_hit, de_digtable->forbidden_igi); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n", + de_digtable->recover_cnt, de_digtable->rx_gain_min); + + /* deal with abnormally large false alarm */ + if (falsealm_cnt->cnt_all > 10000) { + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_DIG(): Abnormally false alarm case\n"); + + de_digtable->large_fa_hit++; + if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) { + de_digtable->forbidden_igi = de_digtable->cur_igvalue; + de_digtable->large_fa_hit = 1; + } + if (de_digtable->large_fa_hit >= 3) { + if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX) + de_digtable->rx_gain_min = DM_DIG_MAX; + else + de_digtable->rx_gain_min = + (de_digtable->forbidden_igi + 1); + de_digtable->recover_cnt = 3600; /* 3600=2hr */ + } + } else { + /* Recovery mechanism for IGI lower bound */ + if (de_digtable->recover_cnt != 0) { + de_digtable->recover_cnt--; + } else { + if (de_digtable->large_fa_hit == 0) { + if ((de_digtable->forbidden_igi - 1) < + DM_DIG_FA_LOWER) { + de_digtable->forbidden_igi = + DM_DIG_FA_LOWER; + de_digtable->rx_gain_min = + DM_DIG_FA_LOWER; + + } else { + de_digtable->forbidden_igi--; + de_digtable->rx_gain_min = + (de_digtable->forbidden_igi + 1); + } + } else if (de_digtable->large_fa_hit == 3) { + de_digtable->large_fa_hit = 0; + } + } + } + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", + de_digtable->large_fa_hit, de_digtable->forbidden_igi); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, + "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n", + de_digtable->recover_cnt, de_digtable->rx_gain_min); + + if (value_igi > DM_DIG_MAX) + value_igi = DM_DIG_MAX; + else if (value_igi < de_digtable->rx_gain_min) + value_igi = de_digtable->rx_gain_min; + de_digtable->cur_igvalue = value_igi; + rtl92d_dm_write_dig(hw); + if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) + rtl92d_dm_cck_packet_detection_thresh(hw); + rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n"); +} +EXPORT_SYMBOL_GPL(rtl92d_dm_dig); + +void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.current_turbo_edca = false; + rtlpriv->dm.is_any_nonbepkts = false; + rtlpriv->dm.is_cur_rdlstate = false; +} +EXPORT_SYMBOL_GPL(rtl92d_dm_init_edca_turbo); + +void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + const u32 edca_be_ul = 0x5ea42b; + const u32 edca_be_dl = 0x5ea42b; + static u64 last_txok_cnt; + static u64 last_rxok_cnt; + u64 cur_txok_cnt; + u64 cur_rxok_cnt; + + if (mac->link_state != MAC80211_LINKED) { + rtlpriv->dm.current_turbo_edca = false; + goto exit; + } + + if (!rtlpriv->dm.is_any_nonbepkts && + !rtlpriv->dm.disable_framebursting) { + cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; + if (cur_rxok_cnt > 4 * cur_txok_cnt) { + if (!rtlpriv->dm.is_cur_rdlstate || + !rtlpriv->dm.current_turbo_edca) { + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, + edca_be_dl); + rtlpriv->dm.is_cur_rdlstate = true; + } + } else { + if (rtlpriv->dm.is_cur_rdlstate || + !rtlpriv->dm.current_turbo_edca) { + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, + edca_be_ul); + rtlpriv->dm.is_cur_rdlstate = false; + } + } + rtlpriv->dm.current_turbo_edca = true; + } else { + if (rtlpriv->dm.current_turbo_edca) { + u8 tmp = AC0_BE; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + &tmp); + rtlpriv->dm.current_turbo_edca = false; + } + } + +exit: + rtlpriv->dm.is_any_nonbepkts = false; + last_txok_cnt = rtlpriv->stats.txbytesunicast; + last_rxok_cnt = rtlpriv->stats.rxbytesunicast; +} +EXPORT_SYMBOL_GPL(rtl92d_dm_check_edca_turbo); + +void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *ra = &rtlpriv->ra; + + ra->ratr_state = DM_RATR_STA_INIT; + ra->pre_ratr_state = DM_RATR_STA_INIT; + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.useramask = true; + else + rtlpriv->dm.useramask = false; +} +EXPORT_SYMBOL_GPL(rtl92d_dm_init_rate_adaptive_mask); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h new file mode 100644 index 000000000000..a146fc975421 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_DM_COMMON_H__ +#define __RTL92D_DM_COMMON_H__ + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 37 +#define OFDM_TABLE_SIZE_92D 43 +#define CCK_TABLE_LENGTH 33 + +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_FA_UPPER 0x32 +#define DM_DIG_FA_LOWER 0x20 +#define DM_DIG_FA_TH0 0x100 +#define DM_DIG_FA_TH1 0x400 +#define DM_DIG_FA_TH2 0x600 + +#define RXPATHSELECTION_SS_TH_LOW 30 +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVAL 25 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define INDEX_MAPPING_NUM 13 + +enum dm_1r_cca { + CCA_1R = 0, + CCA_2R = 1, + CCA_MAX = 2, +}; + +enum dm_rf { + RF_SAVE = 0, + RF_NORMAL = 1, + RF_MAX = 2, +}; + +enum dm_sw_ant_switch { + ANS_ANTENNA_B = 1, + ANS_ANTENNA_A = 2, + ANS_ANTENNA_MAX = 3, +}; + +void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw); +void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw); +void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw); +void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw); +void rtl92d_dm_write_dig(struct ieee80211_hw *hw); +void rtl92d_dm_dig(struct ieee80211_hw *hw); +void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw); +void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c new file mode 100644 index 000000000000..aa54dbde6ea8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../efuse.h" +#include "def.h" +#include "reg.h" +#include "fw_common.h" + +bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv) +{ + return !!(rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY); +} +EXPORT_SYMBOL_GPL(rtl92d_is_fw_downloaded); + +void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + if (enable) { + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); + rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); + } else { + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); + /* Reserved for fw extension. + * 0x81[7] is used for mac0 status , + * so don't write this reg here + * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00); + */ + } +} +EXPORT_SYMBOL_GPL(rtl92d_enable_fw_download); + +void rtl92d_write_fw(struct ieee80211_hw *hw, + enum version_8192d version, u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 *bufferptr = buffer; + u32 pagenums, remainsize; + u32 page, offset; + + rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) + rtl_fill_dummy(bufferptr, &size); + + pagenums = size / FW_8192D_PAGE_SIZE; + remainsize = size % FW_8192D_PAGE_SIZE; + + if (pagenums > 8) + pr_err("Page numbers should not greater then 8\n"); + + for (page = 0; page < pagenums; page++) { + offset = page * FW_8192D_PAGE_SIZE; + rtl_fw_page_write(hw, page, (bufferptr + offset), + FW_8192D_PAGE_SIZE); + } + + if (remainsize) { + offset = pagenums * FW_8192D_PAGE_SIZE; + page = pagenums; + rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); + } +} +EXPORT_SYMBOL_GPL(rtl92d_write_fw); + +int rtl92d_fw_free_to_go(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 counter = 0; + u32 value32; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && + (!(value32 & FWDL_CHKSUM_RPT))); + + if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { + pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", + value32); + return -EIO; + } + + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + value32 |= MCUFWDL_RDY; + rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); + + return 0; +} +EXPORT_SYMBOL_GPL(rtl92d_fw_free_to_go); + +#define RTL_USB_DELAY_FACTOR 60 + +void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 u1b_tmp; + u8 delay = 100; + + if (rtlhal->interface == INTF_USB) { + delay *= RTL_USB_DELAY_FACTOR; + + rtl_write_byte(rtlpriv, REG_FSIMR, 0); + + /* We need to disable other HRCV INT to influence 8051 reset. */ + rtl_write_byte(rtlpriv, REG_FWIMR, 0x20); + + /* Close mask to prevent incorrect FW write operation. */ + rtl_write_byte(rtlpriv, REG_FTIMR, 0); + } + + /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ + rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + + while (u1b_tmp & (FEN_CPUEN >> 8)) { + delay--; + if (delay == 0) + break; + udelay(50); + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + } + + if (rtlhal->interface == INTF_USB) { + if ((u1b_tmp & (FEN_CPUEN >> 8)) && delay == 0) + rtl_write_byte(rtlpriv, REG_FWIMR, 0); + } + + WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n"); + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "=====> 8051 reset success (%d)\n", delay); +} +EXPORT_SYMBOL_GPL(rtl92d_firmware_selfreset); + +int rtl92d_fw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 counter; + + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n"); + /* polling for FW ready */ + counter = 0; + do { + if (rtlhal->interfaceindex == 0) { + if (rtl_read_byte(rtlpriv, FW_MAC0_READY) & + MAC0_READY) { + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", + rtl_read_byte(rtlpriv, + FW_MAC0_READY)); + return 0; + } + udelay(5); + } else { + if (rtl_read_byte(rtlpriv, FW_MAC1_READY) & + MAC1_READY) { + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", + rtl_read_byte(rtlpriv, + FW_MAC1_READY)); + return 0; + } + udelay(5); + } + } while (counter++ < POLLING_READY_TIMEOUT_COUNT); + + if (rtlhal->interfaceindex == 0) { + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n", + rtl_read_byte(rtlpriv, FW_MAC0_READY)); + } else { + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n", + rtl_read_byte(rtlpriv, FW_MAC1_READY)); + } + rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, + "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", + rtl_read_dword(rtlpriv, REG_MCUFWDL)); + return -1; +} +EXPORT_SYMBOL_GPL(rtl92d_fw_init); + +static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val_hmetfr; + bool result = false; + + val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); + if (((val_hmetfr >> boxnum) & BIT(0)) == 0) + result = true; + return result; +} + +void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *cmdbuffer) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 boxcontent[4], boxextcontent[2]; + u16 box_reg = 0, box_extreg = 0; + u8 wait_writeh2c_limmit = 100; + bool bwrite_success = false; + u8 wait_h2c_limmit = 100; + u32 h2c_waitcounter = 0; + bool isfw_read = false; + unsigned long flag; + u8 u1b_tmp; + u8 boxnum; + u8 idx; + + if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Return as RF is off!!!\n"); + return; + } + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); + + while (true) { + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + if (rtlhal->h2c_setinprogress) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C set in progress! Wait to set..element_id(%d)\n", + element_id); + + while (rtlhal->h2c_setinprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, + flag); + h2c_waitcounter++; + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Wait 100 us (%d times)...\n", + h2c_waitcounter); + udelay(100); + + if (h2c_waitcounter > 1000) + return; + + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, + flag); + } + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + } else { + rtlhal->h2c_setinprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + break; + } + } + + while (!bwrite_success) { + wait_writeh2c_limmit--; + if (wait_writeh2c_limmit == 0) { + pr_err("Write H2C fail because no trigger for FW INT!\n"); + break; + } + + boxnum = rtlhal->last_hmeboxnum; + if (boxnum > 3) { + pr_err("boxnum %#x too big\n", boxnum); + break; + } + + box_reg = REG_HMEBOX_0 + boxnum * SIZE_OF_REG_HMEBOX; + box_extreg = REG_HMEBOX_EXT_0 + boxnum * SIZE_OF_REG_HMEBOX_EXT; + + isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Waiting too long for FW read clear HMEBox(%d)!\n", + boxnum); + break; + } + + udelay(10); + + isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n", + boxnum, u1b_tmp); + } + + if (!isfw_read) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", + boxnum); + break; + } + + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "Write element_id box_reg(%4x) = %2x\n", + box_reg, element_id); + + switch (cmd_len) { + case 1 ... 3: + /* BOX: | ID | A0 | A1 | A2 | + * BOX_EXT: --- N/A ------ + */ + boxcontent[0] &= ~BIT(7); + memcpy(boxcontent + 1, cmdbuffer, cmd_len); + + for (idx = 0; idx < 4; idx++) + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + break; + case 4 ... 5: + /* * ID ext = ID | BIT(7) + * BOX: | ID ext | A2 | A3 | A4 | + * BOX_EXT: | A0 | A1 | + */ + boxcontent[0] |= BIT(7); + memcpy(boxextcontent, cmdbuffer, 2); + memcpy(boxcontent + 1, cmdbuffer + 2, cmd_len - 2); + + for (idx = 0; idx < 2; idx++) + rtl_write_byte(rtlpriv, box_extreg + idx, + boxextcontent[idx]); + + for (idx = 0; idx < 4; idx++) + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + break; + default: + pr_err("switch case %#x not processed\n", cmd_len); + break; + } + + bwrite_success = true; + rtlhal->last_hmeboxnum = boxnum + 1; + if (rtlhal->last_hmeboxnum == 4) + rtlhal->last_hmeboxnum = 0; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, + "pHalData->last_hmeboxnum = %d\n", + rtlhal->last_hmeboxnum); + } + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + rtlhal->h2c_setinprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); +} +EXPORT_SYMBOL_GPL(rtl92d_fill_h2c_cmd); + +void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) +{ + u8 u1_joinbssrpt_parm[1] = {0}; + + u1_joinbssrpt_parm[0] = mstatus; + rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); +} +EXPORT_SYMBOL_GPL(rtl92d_set_fw_joinbss_report_cmd); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h new file mode 100644 index 000000000000..4b73e0bd4ac4 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_FW_COMMON_H__ +#define __RTL92D_FW_COMMON_H__ + +#define FW_8192D_START_ADDRESS 0x1000 +#define FW_8192D_PAGE_SIZE 4096 +#define FW_8192D_POLLING_TIMEOUT_COUNT 1000 + +#define IS_FW_HEADER_EXIST(_pfwhdr) \ + ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \ + (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \ + (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \ + (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \ + (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \ + (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3) + +/* Firmware Header(8-byte alinment required) */ +/* --- LONG WORD 0 ---- */ +#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \ + le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0)) +#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \ + le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(15, 0)) +#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \ + le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(23, 16)) + +#define RAID_MASK GENMASK(31, 28) +#define RATE_MASK_MASK GENMASK(27, 0) +#define SHORT_GI_MASK BIT(5) +#define MACID_MASK GENMASK(4, 0) + +struct rtl92d_rate_mask_h2c { + __le32 rate_mask_and_raid; + u8 macid_and_short_gi; +} __packed; + +bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv); +void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable); +void rtl92d_write_fw(struct ieee80211_hw *hw, + enum version_8192d version, u8 *buffer, u32 size); +int rtl92d_fw_free_to_go(struct ieee80211_hw *hw); +void rtl92d_firmware_selfreset(struct ieee80211_hw *hw); +int rtl92d_fw_init(struct ieee80211_hw *hw); +void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); +void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c new file mode 100644 index 000000000000..6570d5e168e9 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c @@ -0,0 +1,1225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../base.h" +#include "../cam.h" +#include "../efuse.h" +#include "../pci.h" +#include "../regd.h" +#include "def.h" +#include "reg.h" +#include "dm_common.h" +#include "fw_common.h" +#include "hw_common.h" +#include "phy_common.h" + +void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} +EXPORT_SYMBOL_GPL(rtl92de_stop_tx_beacon); + +void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte |= BIT(0); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} +EXPORT_SYMBOL_GPL(rtl92de_resume_tx_beacon); + +void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + switch (variable) { + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else + *((bool *)(val)) = true; + } + break; + } + case HW_VAR_FW_PSMODE_STATUS: + *((bool *)(val)) = ppsc->fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF:{ + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + *((u64 *)(val)) = tsf; + break; + } + case HW_VAR_INT_MIGRATION: + *((bool *)(val)) = rtlpriv->dm.interrupt_migration; + break; + case HW_VAR_INT_AC: + *((bool *)(val)) = rtlpriv->dm.disable_tx_int; + break; + case HAL_DEF_WOWLAN: + break; + default: + pr_err("switch case %#x not processed\n", variable); + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_get_hw_reg); + +void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 idx; + + switch (variable) { + case HW_VAR_ETHER_ADDR: + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } + break; + case HW_VAR_BASIC_RATE: { + u16 rate_cfg = ((u16 *)val)[0]; + u8 rate_index = 0; + + rate_cfg = rate_cfg & 0x15f; + if (mac->vendor == PEER_CISCO && + ((rate_cfg & 0x150) == 0)) + rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, + (rate_cfg >> 8) & 0xff); + while (rate_cfg > 0x1) { + rate_cfg = (rate_cfg >> 1); + rate_index++; + } + if (rtlhal->fw_version > 0xe) + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, + rate_index); + break; + } + case HW_VAR_BSSID: + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } + break; + case HW_VAR_SIFS: + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + if (!mac->ht_enable) + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + 0x0e0e); + else + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + *((u16 *)val)); + break; + case HW_VAR_SLOT_TIME: { + u8 e_aci; + + rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, + "HW_VAR_SLOT_TIME %x\n", val[0]); + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + for (e_aci = 0; e_aci < AC_MAX; e_aci++) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (&e_aci)); + break; + } + case HW_VAR_ACK_PREAMBLE: { + u8 reg_tmp; + u8 short_preamble = (bool)(*val); + + reg_tmp = (mac->cur_40_prime_sc) << 5; + if (short_preamble) + reg_tmp |= 0x80; + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); + break; + } + case HW_VAR_AMPDU_MIN_SPACE: { + u8 min_spacing_to_set; + + min_spacing_to_set = *val; + if (min_spacing_to_set <= 7) { + mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | + min_spacing_to_set); + *val = min_spacing_to_set; + rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; + } + case HW_VAR_SHORTGI_DENSITY: { + u8 density_to_set; + + density_to_set = *val; + mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; + mac->min_space_cfg |= (density_to_set << 3); + rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + break; + } + case HW_VAR_AMPDU_FACTOR: { + u8 factor_toset; + u32 regtoset; + u8 *ptmp_byte = NULL; + u8 index; + + if (rtlhal->macphymode == DUALMAC_DUALPHY) + regtoset = 0xb9726641; + else if (rtlhal->macphymode == DUALMAC_SINGLEPHY) + regtoset = 0x66626641; + else + regtoset = 0xb972a841; + factor_toset = *val; + if (factor_toset <= 3) { + factor_toset = (1 << (factor_toset + 2)); + if (factor_toset > 0xf) + factor_toset = 0xf; + for (index = 0; index < 4; index++) { + ptmp_byte = (u8 *)(®toset) + index; + if ((*ptmp_byte & 0xf0) > + (factor_toset << 4)) + *ptmp_byte = (*ptmp_byte & 0x0f) + | (factor_toset << 4); + if ((*ptmp_byte & 0x0f) > factor_toset) + *ptmp_byte = (*ptmp_byte & 0xf0) + | (factor_toset); + } + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset); + rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_FACTOR: %#x\n", + factor_toset); + } + break; + } + case HW_VAR_RETRY_LIMIT: { + u8 retry_limit = val[0]; + + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + break; + } + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + rtlefuse->efuse_usedbytes = *((u16 *)val); + break; + case HW_VAR_EFUSE_USAGE: + rtlefuse->efuse_usedpercentage = *val; + break; + case HW_VAR_IO_CMD: + rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *val); + break; + case HW_VAR_SET_RPWM: + rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val)); + break; + case HW_VAR_H2C_FW_PWRMODE: + break; + case HW_VAR_FW_PSMODE_STATUS: + ppsc->fw_current_inpsmode = *((bool *)val); + break; + case HW_VAR_AID: { + u16 u2btmp; + + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | + mac->assoc_id)); + break; + } + default: + pr_err("switch case %#x not processed\n", variable); + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_set_hw_reg); + +bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool status = true; + long count = 0; + u32 value = _LLT_INIT_ADDR(address) | + _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); + + rtl_write_dword(rtlpriv, REG_LLT_INIT, value); + do { + value = rtl_read_dword(rtlpriv, REG_LLT_INIT); + if (_LLT_OP_VALUE(value) == _LLT_NO_ACTIVE) + break; + if (count > POLLING_LLT_THRESHOLD) { + pr_err("Failed to polling write LLT done at address %d!\n", + address); + status = false; + break; + } + } while (++count); + return status; +} +EXPORT_SYMBOL_GPL(rtl92de_llt_write); + +void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, + "not open hw encryption\n"); + return; + } + sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE; + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; + } + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); + rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, + "The SECR-value %x\n", sec_reg_value); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); +} +EXPORT_SYMBOL_GPL(rtl92de_enable_hw_security_config); + +/* don't set REG_EDCA_BE_PARAM here because + * mac80211 will send pkt when scan + */ +void rtl92de_set_qos(struct ieee80211_hw *hw, int aci) +{ + rtl92d_dm_init_edca_turbo(hw); +} +EXPORT_SYMBOL_GPL(rtl92de_set_qos); + +static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY; + u32 value32; + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); + if (!(value32 & 0x000f0000)) { + version = VERSION_TEST_CHIP_92D_SINGLEPHY; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n"); + } else { + version = VERSION_NORMAL_CHIP_92D_SINGLEPHY; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n"); + } + return version; +} + +static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo, + u8 *efuse, bool autoloadfail) +{ + u32 rfpath, eeaddr, group, offset, offset1, offset2; + u8 i, val8; + + memset(pwrinfo, 0, sizeof(struct txpower_info)); + if (autoloadfail) { + for (group = 0; group < CHANNEL_GROUP_MAX; group++) { + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { + if (group < CHANNEL_GROUP_MAX_2G) { + pwrinfo->cck_index[rfpath][group] = + EEPROM_DEFAULT_TXPOWERLEVEL_2G; + pwrinfo->ht40_1sindex[rfpath][group] = + EEPROM_DEFAULT_TXPOWERLEVEL_2G; + } else { + pwrinfo->ht40_1sindex[rfpath][group] = + EEPROM_DEFAULT_TXPOWERLEVEL_5G; + } + pwrinfo->ht40_2sindexdiff[rfpath][group] = + EEPROM_DEFAULT_HT40_2SDIFF; + pwrinfo->ht20indexdiff[rfpath][group] = + EEPROM_DEFAULT_HT20_DIFF; + pwrinfo->ofdmindexdiff[rfpath][group] = + EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; + pwrinfo->ht40maxoffset[rfpath][group] = + EEPROM_DEFAULT_HT40_PWRMAXOFFSET; + pwrinfo->ht20maxoffset[rfpath][group] = + EEPROM_DEFAULT_HT20_PWRMAXOFFSET; + } + } + for (i = 0; i < 3; i++) { + pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI; + pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI; + } + return; + } + + /* Maybe autoload OK,buf the tx power index value is not filled. + * If we find it, we set it to default value. + */ + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { + for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) { + eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3) + group; + + pwrinfo->cck_index[rfpath][group] = + efuse[eeaddr] == 0xFF ? + (eeaddr > 0x7B ? + EEPROM_DEFAULT_TXPOWERLEVEL_5G : + EEPROM_DEFAULT_TXPOWERLEVEL_2G) : + efuse[eeaddr]; + } + } + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { + for (group = 0; group < CHANNEL_GROUP_MAX; group++) { + offset1 = group / 3; + offset2 = group % 3; + eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3); + eeaddr += offset2 + offset1 * 21; + + pwrinfo->ht40_1sindex[rfpath][group] = + efuse[eeaddr] == 0xFF ? + (eeaddr > 0x7B ? + EEPROM_DEFAULT_TXPOWERLEVEL_5G : + EEPROM_DEFAULT_TXPOWERLEVEL_2G) : + efuse[eeaddr]; + } + } + + /* These just for 92D efuse offset. */ + for (group = 0; group < CHANNEL_GROUP_MAX; group++) { + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { + offset1 = group / 3; + offset2 = group % 3; + offset = offset2 + offset1 * 21; + + val8 = efuse[EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G + offset]; + if (val8 != 0xFF) + pwrinfo->ht40_2sindexdiff[rfpath][group] = + (val8 >> (rfpath * 4)) & 0xF; + else + pwrinfo->ht40_2sindexdiff[rfpath][group] = + EEPROM_DEFAULT_HT40_2SDIFF; + + val8 = efuse[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset]; + if (val8 != 0xFF) + pwrinfo->ht20indexdiff[rfpath][group] = + (val8 >> (rfpath * 4)) & 0xF; + else + pwrinfo->ht20indexdiff[rfpath][group] = + EEPROM_DEFAULT_HT20_DIFF; + + val8 = efuse[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset]; + if (val8 != 0xFF) + pwrinfo->ofdmindexdiff[rfpath][group] = + (val8 >> (rfpath * 4)) & 0xF; + else + pwrinfo->ofdmindexdiff[rfpath][group] = + EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; + + val8 = efuse[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset]; + if (val8 != 0xFF) + pwrinfo->ht40maxoffset[rfpath][group] = + (val8 >> (rfpath * 4)) & 0xF; + else + pwrinfo->ht40maxoffset[rfpath][group] = + EEPROM_DEFAULT_HT40_PWRMAXOFFSET; + + val8 = efuse[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset]; + if (val8 != 0xFF) + pwrinfo->ht20maxoffset[rfpath][group] = + (val8 >> (rfpath * 4)) & 0xF; + else + pwrinfo->ht20maxoffset[rfpath][group] = + EEPROM_DEFAULT_HT20_PWRMAXOFFSET; + } + } + + if (efuse[EEPROM_TSSI_A_5G] != 0xFF) { + /* 5GL */ + pwrinfo->tssi_a[0] = efuse[EEPROM_TSSI_A_5G] & 0x3F; + pwrinfo->tssi_b[0] = efuse[EEPROM_TSSI_B_5G] & 0x3F; + /* 5GM */ + pwrinfo->tssi_a[1] = efuse[EEPROM_TSSI_AB_5G] & 0x3F; + pwrinfo->tssi_b[1] = (efuse[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 | + (efuse[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2; + /* 5GH */ + pwrinfo->tssi_a[2] = (efuse[EEPROM_TSSI_AB_5G + 1] & 0xF0) >> 4 | + (efuse[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4; + pwrinfo->tssi_b[2] = (efuse[EEPROM_TSSI_AB_5G + 2] & 0xFC) >> 2; + } else { + for (i = 0; i < 3; i++) { + pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI; + pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI; + } + } +} + +static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info pwrinfo; + u8 tempval[2], i, pwr, diff; + u32 ch, rfpath, group; + + _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail); + if (!autoload_fail) { + /* bit0~2 */ + rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7); + rtlefuse->eeprom_thermalmeter = + hwinfo[EEPROM_THERMAL_METER] & 0x1f; + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K]; + tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03; + tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2; + rtlefuse->txpwr_fromeprom = true; + if (IS_92D_D_CUT(rtlpriv->rtlhal.version) || + IS_92D_E_CUT(rtlpriv->rtlhal.version)) { + rtlefuse->internal_pa_5g[0] = + !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6); + rtlefuse->internal_pa_5g[1] = + !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6); + rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, + "Is D cut,Internal PA0 %d Internal PA1 %d\n", + rtlefuse->internal_pa_5g[0], + rtlefuse->internal_pa_5g[1]); + } + rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6]; + rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7]; + } else { + rtlefuse->eeprom_regulatory = 0; + rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; + rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP; + tempval[0] = 3; + tempval[1] = tempval[0]; + } + + /* Use default value to fill parameters if + * efuse is not filled on some place. + */ + + /* ThermalMeter from EEPROM */ + if (rtlefuse->eeprom_thermalmeter < 0x06 || + rtlefuse->eeprom_thermalmeter > 0x1c) + rtlefuse->eeprom_thermalmeter = 0x12; + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + + /* check XTAL_K */ + if (rtlefuse->crystalcap == 0xFF) + rtlefuse->crystalcap = 0; + if (rtlefuse->eeprom_regulatory > 3) + rtlefuse->eeprom_regulatory = 0; + + for (i = 0; i < 2; i++) { + switch (tempval[i]) { + case 0: + tempval[i] = 5; + break; + case 1: + tempval[i] = 4; + break; + case 2: + tempval[i] = 3; + break; + case 3: + default: + tempval[i] = 0; + break; + } + } + + rtlefuse->delta_iqk = tempval[0]; + if (tempval[1] > 0) + rtlefuse->delta_lck = tempval[1] - 1; + if (rtlefuse->eeprom_c9 == 0xFF) + rtlefuse->eeprom_c9 = 0x00; + rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, + "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory); + rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, + "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); + rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, + "CrystalCap = 0x%x\n", rtlefuse->crystalcap); + rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, + "Delta_IQK = 0x%x Delta_LCK = 0x%x\n", + rtlefuse->delta_iqk, rtlefuse->delta_lck); + + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { + for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { + group = rtl92d_get_chnlgroup_fromarray((u8)ch); + if (ch < CHANNEL_MAX_NUMBER_2G) + rtlefuse->txpwrlevel_cck[rfpath][ch] = + pwrinfo.cck_index[rfpath][group]; + rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] = + pwrinfo.ht40_1sindex[rfpath][group]; + rtlefuse->txpwr_ht20diff[rfpath][ch] = + pwrinfo.ht20indexdiff[rfpath][group]; + rtlefuse->txpwr_legacyhtdiff[rfpath][ch] = + pwrinfo.ofdmindexdiff[rfpath][group]; + rtlefuse->pwrgroup_ht20[rfpath][ch] = + pwrinfo.ht20maxoffset[rfpath][group]; + rtlefuse->pwrgroup_ht40[rfpath][ch] = + pwrinfo.ht40maxoffset[rfpath][group]; + pwr = pwrinfo.ht40_1sindex[rfpath][group]; + diff = pwrinfo.ht40_2sindexdiff[rfpath][group]; + rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] = + (pwr > diff) ? (pwr - diff) : 0; + } + } +} + +static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw, + u8 *content) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool is_single_mac = true; + + if (rtlhal->interface == INTF_PCI) + is_single_mac = !!(content[EEPROM_MAC_FUNCTION] & BIT(3)); + else if (rtlhal->interface == INTF_USB) + is_single_mac = !(content[EEPROM_ENDPOINT_SETTING] & BIT(0)); + + if (is_single_mac) { + rtlhal->macphymode = SINGLEMAC_SINGLEPHY; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "MacPhyMode SINGLEMAC_SINGLEPHY\n"); + } else { + rtlhal->macphymode = DUALMAC_DUALPHY; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "MacPhyMode DUALMAC_DUALPHY\n"); + } +} + +static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw, + u8 *content) +{ + _rtl92de_read_macphymode_from_prom(hw, content); + rtl92d_phy_config_macphymode(hw); + rtl92d_phy_config_macphymode_info(hw); +} + +static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + enum version_8192d chipver = rtlpriv->rtlhal.version; + u8 cutvalue[2]; + u16 chipvalue; + + read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]); + read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]); + chipvalue = (cutvalue[1] << 8) | cutvalue[0]; + switch (chipvalue) { + case 0xAA55: + chipver |= CHIP_92D_C_CUT; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n"); + break; + case 0x9966: + chipver |= CHIP_92D_D_CUT; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); + break; + case 0xCC33: + case 0x33CC: + chipver |= CHIP_92D_E_CUT; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n"); + break; + default: + chipver |= CHIP_92D_D_CUT; + pr_err("Unknown CUT!\n"); + break; + } + rtlpriv->rtlhal.version = chipver; +} + +static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) +{ + static const int params_pci[] = { + RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D, + EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13 + }; + static const int params_usb[] = { + RTL8190_EEPROM_ID, EEPROM_VID_USB, EEPROM_PID_USB, + EEPROM_VID_USB, EEPROM_PID_USB, EEPROM_MAC_ADDR_MAC0_92DU, + EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13 + }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + const int *params = params_pci; + u8 *hwinfo; + + if (rtlhal->interface == INTF_USB) + params = params_usb; + + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) + return; + + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; + + _rtl92de_efuse_update_chip_version(hw); + _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); + + /* Read Permanent MAC address for 2nd interface */ + if (rtlhal->interfaceindex != 0) + ether_addr_copy(rtlefuse->dev_addr, + &hwinfo[EEPROM_MAC_ADDR_MAC1_92D]); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, + rtlefuse->dev_addr); + rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); + _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo); + + /* Read Channel Plan */ + switch (rtlhal->bandset) { + case BAND_ON_2_4G: + rtlefuse->channel_plan = COUNTRY_CODE_TELEC; + break; + case BAND_ON_5G: + rtlefuse->channel_plan = COUNTRY_CODE_FCC; + break; + case BAND_ON_BOTH: + rtlefuse->channel_plan = COUNTRY_CODE_FCC; + break; + default: + rtlefuse->channel_plan = COUNTRY_CODE_FCC; + break; + } + rtlefuse->txpwr_fromeprom = true; +exit: + kfree(hwinfo); +} + +void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + rtlhal->version = _rtl92d_read_chip_version(hw); + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + rtlefuse->autoload_status = tmp_u1b; + if (tmp_u1b & BIT(4)) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); + rtlefuse->epromtype = EEPROM_93C46; + } else { + rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); + rtlefuse->epromtype = EEPROM_BOOT_EFUSE; + } + if (tmp_u1b & BIT(5)) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + + rtlefuse->autoload_failflag = false; + _rtl92de_read_adapter_info(hw); + } else { + pr_err("Autoload ERR!!\n"); + } +} +EXPORT_SYMBOL_GPL(rtl92de_read_eeprom_info); + +static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + enum wireless_mode wirelessmode; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u8 curtxbw_40mhz = mac->bw_40; + u8 nmode = mac->ht_enable; + u8 curshortgi_40mhz; + u8 curshortgi_20mhz; + u32 tmp_ratr_value; + u8 ratr_index = 0; + u16 shortgi_rate; + u32 ratr_value; + + curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + wirelessmode = mac->mode; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_value = sta->deflink.supp_rates[1] << 4; + else + ratr_value = sta->deflink.supp_rates[0]; + ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_A: + ratr_value &= 0x00000FF0; + break; + case WIRELESS_MODE_B: + if (ratr_value & 0x0000000c) + ratr_value &= 0x0000000d; + else + ratr_value &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_value &= 0x00000FF5; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + nmode = 1; + if (mimo_ps == IEEE80211_SMPS_STATIC) { + ratr_value &= 0x0007F005; + } else { + u32 ratr_mask; + + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) { + ratr_mask = 0x000ff005; + } else { + ratr_mask = 0x0f0ff005; + } + + ratr_value &= ratr_mask; + } + break; + default: + if (rtlphy->rf_type == RF_1T2R) + ratr_value &= 0x000ff0ff; + else + ratr_value &= 0x0f0ff0ff; + + break; + } + ratr_value &= 0x0FFFFFFF; + if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { + ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", + rtl_read_dword(rtlpriv, REG_ARFR0)); +} + +static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl92d_rate_mask_h2c rate_mask = {}; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_sta_info *sta_entry = NULL; + enum wireless_mode wirelessmode; + bool shortgi = false; + u8 curshortgi_40mhz; + u8 curshortgi_20mhz; + u8 curtxbw_40mhz; + u32 ratr_bitmap; + u8 ratr_index; + u8 macid = 0; + u8 mimo_ps; + + curtxbw_40mhz = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40; + curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + mimo_ps = sta_entry->mimo_ps; + wirelessmode = sta_entry->wireless_mode; + + if (mac->opmode == NL80211_IFTYPE_STATION) + curtxbw_40mhz = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_bitmap = sta->deflink.supp_rates[1] << 4; + else + ratr_bitmap = sta->deflink.supp_rates[0]; + ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | + sta->deflink.ht_cap.mcs.rx_mask[0] << 12); + + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_A: + ratr_index = RATR_INX_WIRELESS_G; + ratr_bitmap &= 0x00000ff0; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + if (wirelessmode == WIRELESS_MODE_N_24G) + ratr_index = RATR_INX_WIRELESS_NGB; + else + ratr_index = RATR_INX_WIRELESS_NG; + + if (mimo_ps == IEEE80211_SMPS_STATIC) { + if (rssi_level == 1) + ratr_bitmap &= 0x00070000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0007f000; + else + ratr_bitmap &= 0x0007f005; + } else { + if (rtlphy->rf_type == RF_1T2R || + rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff005; + } + } + } + + if ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz)) { + if (macid == 0) + shortgi = true; + else if (macid == 1) + shortgi = false; + } + break; + default: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (rtlphy->rf_type == RF_1T2R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f0ff0ff; + break; + } + + le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_bitmap, RATE_MASK_MASK); + le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_index, RAID_MASK); + u8p_replace_bits(&rate_mask.macid_and_short_gi, macid, MACID_MASK); + u8p_replace_bits(&rate_mask.macid_and_short_gi, shortgi, SHORT_GI_MASK); + u8p_replace_bits(&rate_mask.macid_and_short_gi, 1, BIT(7)); + + rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, + "Rate_index:%x, ratr_val:%x, %5phC\n", + ratr_index, ratr_bitmap, &rate_mask); + + if (rtlhal->interface == INTF_PCI) { + rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, sizeof(rate_mask), + (u8 *)&rate_mask); + } else { + /* rtl92d_fill_h2c_cmd() does USB I/O and will result in a + * "scheduled while atomic" if called directly + */ + memcpy(rtlpriv->rate_mask, &rate_mask, + sizeof(rtlpriv->rate_mask)); + schedule_work(&rtlpriv->works.fill_h2c_cmd); + } + + if (macid != 0) + sta_entry->ratr_index = ratr_index; +} + +void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dm.useramask) + rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw); + else + rtl92de_update_hal_rate_table(hw, sta); +} +EXPORT_SYMBOL_GPL(rtl92de_update_hal_rate_tbl); + +void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 sifs_timer; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + &mac->slot_time); + if (!mac->ht_enable) + sifs_timer = 0x0a0a; + else + sifs_timer = 0x1010; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); +} +EXPORT_SYMBOL_GPL(rtl92de_update_channel_access_setting); + +bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + enum rf_pwrstate e_rfpowerstate_toset; + u8 u1tmp; + bool actuallyset = false; + unsigned long flag; + + if (rtlpriv->rtlhal.interface == INTF_PCI && + rtlpci->being_init_adapter) + return false; + if (ppsc->swrf_processing) + return false; + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + if (ppsc->rfchange_inprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + return false; + } + + ppsc->rfchange_inprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, + rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG) & ~(BIT(3))); + u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); + e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; + if (ppsc->hwradiooff && e_rfpowerstate_toset == ERFON) { + rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio ON, RF ON\n"); + e_rfpowerstate_toset = ERFON; + ppsc->hwradiooff = false; + actuallyset = true; + } else if (!ppsc->hwradiooff && e_rfpowerstate_toset == ERFOFF) { + rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio OFF, RF OFF\n"); + e_rfpowerstate_toset = ERFOFF; + ppsc->hwradiooff = true; + actuallyset = true; + } + if (actuallyset) { + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + ppsc->rfchange_inprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + } else { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + ppsc->rfchange_inprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + } + *valid = 1; + return !ppsc->hwradiooff; +} +EXPORT_SYMBOL_GPL(rtl92de_gpio_radio_on_off_checking); + +void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + static const u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static const u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + const u8 *macaddr = p_macaddr; + bool is_pairwise = false; + u32 entry_id; + + if (clear_all) { + u8 idx; + u8 cam_offset = 0; + u8 clear_number = 5; + + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + + return; + } + + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + pr_err("switch case %#x not processed\n", + enc_algo); + enc_algo = CAM_TKIP; + break; + } + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + if (mac->opmode == NL80211_IFTYPE_AP) { + entry_id = rtl_cam_get_free_entry(hw, p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + pr_err("Can not find free hw security cam entry\n"); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + key_index = PAIRWISE_KEYIDX; + is_pairwise = true; + } + } + if (rtlpriv->sec.key_len[key_index] == 0) { + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, + "delete one entry, entry_id is %d\n", + entry_id); + if (mac->opmode == NL80211_IFTYPE_AP) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, + "The insert KEY length is %d\n", + rtlpriv->sec.key_len[PAIRWISE_KEYIDX]); + rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, + "The insert KEY is %x %x\n", + rtlpriv->sec.key_buf[0][0], + rtlpriv->sec.key_buf[0][1]); + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, + "add one entry\n"); + if (is_pairwise) { + RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD, + "Pairwise Key content", + rtlpriv->sec.pairwise_key, + rtlpriv->sec.key_len[PAIRWISE_KEYIDX]); + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, + "set Pairwise key\n"); + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, + "set group key\n"); + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + } +} +EXPORT_SYMBOL_GPL(rtl92de_set_key); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h new file mode 100644 index 000000000000..2c07f5cc5766 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_HW_COMMON_H__ +#define __RTL92D_HW_COMMON_H__ + +void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw); +void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw); +void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data); +void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl92de_set_qos(struct ieee80211_hw *hw, int aci); +void rtl92de_read_eeprom_info(struct ieee80211_hw *hw); +void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw); +void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c new file mode 100644 index 000000000000..e58dc4000c19 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include <linux/module.h> + +MODULE_AUTHOR("Realtek WlanFAE <[email protected]>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8192D 802.11n common routines"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c new file mode 100644 index 000000000000..228c84ab5b90 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c @@ -0,0 +1,856 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../core.h" +#include "def.h" +#include "reg.h" +#include "dm_common.h" +#include "phy_common.h" +#include "rf_common.h" + +static const u8 channel_all[59] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, + 114, 116, 118, 120, 122, 124, 126, 128, 130, + 132, 134, 136, 138, 140, 149, 151, 153, 155, + 157, 159, 161, 163, 165 +}; + +static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + newoffset = offset; + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + udelay(10); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + udelay(100); + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong | BLSSIREADEDGE); + udelay(10); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, + BLSSIREADBACKDATA); + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n", + rfpath, pphyreg->rf_rb, retvalue); + return retvalue; +} + +static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 offset, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 data_and_addr; + u32 newoffset; + + newoffset = offset; + /* T65 RF */ + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, data_and_addr); +} + +u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask); + rtl92d_pci_lock(rtlpriv); + original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr); + bitshift = calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + rtl92d_pci_unlock(rtlpriv); + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value); + return readback_value; +} +EXPORT_SYMBOL_GPL(rtl92d_phy_query_rf_reg); + +void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 original_value, bitshift; + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath); + if (bitmask == 0) + return; + rtl92d_pci_lock(rtlpriv); + if (rtlphy->rf_mode != RF_OP_BY_FW) { + if (bitmask != RFREG_OFFSET_MASK) { + original_value = _rtl92d_phy_rf_serial_read(hw, + rfpath, + regaddr); + bitshift = calculate_bit_shift(bitmask); + data = ((original_value & (~bitmask)) | + (data << bitshift)); + } + _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data); + } + rtl92d_pci_unlock(rtlpriv); + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_set_rf_reg); + +void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + /* RF Interface Sowrtware Control */ + /* 16 LSBs if read 32-bit from 0x870 */ + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */ + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + /* 16 LSBs if read 32-bit from 0x874 */ + rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; + /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */ + + rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; + /* RF Interface Readback Value */ + /* 16 LSBs if read 32-bit from 0x8E0 */ + rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; + /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */ + rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; + /* 16 LSBs if read 32-bit from 0x8E4 */ + rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; + /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */ + rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; + + /* RF Interface Output (and Enable) */ + /* 16 LSBs if read 32-bit from 0x860 */ + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + /* 16 LSBs if read 32-bit from 0x864 */ + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + /* RF Interface (Output and) Enable */ + /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */ + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */ + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + /* Addr of LSSI. Write RF register by driver */ + /* LSSI Parameter */ + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = + RFPGA0_XA_LSSIPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = + RFPGA0_XB_LSSIPARAMETER; + + /* RF parameter */ + /* BB Band Select */ + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER; + + /* Tx AGC Gain Stage (same for all path. Should we remove this?) */ + /* Tx gain stage */ + rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; + /* Tx gain stage */ + rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; + /* Tx gain stage */ + rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; + /* Tx gain stage */ + rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; + + /* Transceiver A~D HSSI Parameter-1 */ + /* wire control parameter1 */ + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; + /* wire control parameter1 */ + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; + + /* Transceiver A~D HSSI Parameter-2 */ + /* wire control parameter2 */ + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; + /* wire control parameter2 */ + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; + + /* RF switch Control */ + /* TR/Ant switch control */ + rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; + + /* AGC control 1 */ + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; + + /* AGC control 2 */ + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; + + /* RX AFE control 1 */ + rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE; + + /*RX AFE control 1 */ + rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; + + /* Tx AFE control 1 */ + rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; + + /* Tx AFE control 2 */ + rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; + + /* Transceiver LSSI Readback SI mode */ + rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK; + + /* Transceiver LSSI Readback PI mode */ + rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK; +} +EXPORT_SYMBOL_GPL(rtl92d_phy_init_bb_rf_register_definition); + +void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + int index; + + if (regaddr == RTXAGC_A_RATE18_06) + index = 0; + else if (regaddr == RTXAGC_A_RATE54_24) + index = 1; + else if (regaddr == RTXAGC_A_CCK1_MCS32) + index = 6; + else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) + index = 7; + else if (regaddr == RTXAGC_A_MCS03_MCS00) + index = 2; + else if (regaddr == RTXAGC_A_MCS07_MCS04) + index = 3; + else if (regaddr == RTXAGC_A_MCS11_MCS08) + index = 4; + else if (regaddr == RTXAGC_A_MCS15_MCS12) + index = 5; + else if (regaddr == RTXAGC_B_RATE18_06) + index = 8; + else if (regaddr == RTXAGC_B_RATE54_24) + index = 9; + else if (regaddr == RTXAGC_B_CCK1_55_MCS32) + index = 14; + else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) + index = 15; + else if (regaddr == RTXAGC_B_MCS03_MCS00) + index = 10; + else if (regaddr == RTXAGC_B_MCS07_MCS04) + index = 11; + else if (regaddr == RTXAGC_B_MCS11_MCS08) + index = 12; + else if (regaddr == RTXAGC_B_MCS15_MCS12) + index = 13; + else + return; + + rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data; + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n", + rtlphy->pwrgroup_cnt, index, + rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]); + if (index == 13) + rtlphy->pwrgroup_cnt++; +} +EXPORT_SYMBOL_GPL(rtl92d_store_pwrindex_diffrate_offset); + +void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->default_initialgain[0] = + rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, MASKDWORD); + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_get_hw_reg_originalvalue); + +static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel, + u8 *cckpowerlevel, u8 *ofdmpowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = channel - 1; + + /* 1. CCK */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* RF-A */ + cckpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_cck[RF90_PATH_A][index]; + /* RF-B */ + cckpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_cck[RF90_PATH_B][index]; + } else { + cckpowerlevel[RF90_PATH_A] = 0; + cckpowerlevel[RF90_PATH_B] = 0; + } + /* 2. OFDM for 1S or 2S */ + if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) { + /* Read HT 40 OFDM TX power */ + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index]; + ofdmpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index]; + } else if (rtlphy->rf_type == RF_2T2R) { + /* Read HT 40 OFDM TX power */ + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index]; + ofdmpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index]; + } +} + +static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw, + u8 channel, u8 *cckpowerlevel, + u8 *ofdmpowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; + rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; +} + +static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) +{ + u8 place = chnl; + + if (chnl > 14) { + for (place = 14; place < ARRAY_SIZE(channel_all); place++) { + if (channel_all[place] == chnl) { + place++; + break; + } + } + } + return place; +} + +void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 cckpowerlevel[2], ofdmpowerlevel[2]; + + if (!rtlefuse->txpwr_fromeprom) + return; + channel = _rtl92c_phy_get_rightchnlplace(channel); + _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0], + &ofdmpowerlevel[0]); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) + _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0], + &ofdmpowerlevel[0]); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) + rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); + rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_set_txpower_level); + +void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath, + u32 *pu4_regval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n"); + /*----Store original RFENV control type----*/ + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); + break; + case RF90_PATH_B: + case RF90_PATH_D: + *pu4_regval = + rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); + break; + } + /*----Set RF_ENV enable----*/ + rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); + /*----Set RF_ENV output high----*/ + rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); + /* Set bit number of Address and Data for RF register */ + /* Set 1 to 4 bits for 8255 */ + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0); + udelay(1); + /*Set 0 to 12 bits for 8255 */ + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); + udelay(1); + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n"); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_enable_rf_env); + +void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath, + u32 *pu4_regval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n"); + /*----Restore RFENV control type----*/ + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval); + break; + case RF90_PATH_B: + case RF90_PATH_D: + rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, + *pu4_regval); + break; + } + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n"); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_restore_rf_env); + +u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) +{ + u8 place; + + if (chnl > 14) { + for (place = 14; place < ARRAY_SIZE(channel_all); place++) { + if (channel_all[place] == chnl) + return place - 13; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(rtl92d_get_rightchnlplace_for_iqk); + +void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg, + u32 *adda_backup, u32 regnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n"); + for (i = 0; i < regnum; i++) + adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_save_adda_registers); + +void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw, + const u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n"); + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); + macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_save_mac_registers); + +void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw, + const u32 *adda_reg, bool patha_on, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 pathon; + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n"); + pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4; + if (patha_on) + pathon = rtlpriv->rtlhal.interfaceindex == 0 ? + 0x04db25a4 : 0x0b1b25a4; + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_path_adda_on); + +void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw, + const u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n"); + rtl_write_byte(rtlpriv, macreg[0], 0x3F); + + for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & + (~BIT(3)))); + rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & (~BIT(5)))); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_mac_setting_calibration); + +static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2) +{ + u32 ret; + + if (val1 >= val2) + ret = val1 - val2; + else + ret = val2 - val1; + return ret; +} + +static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(channel5g); i++) + if (channel == channel5g[i]) + return true; + return false; +} + +void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw, + const u32 *targetchnl, u32 *curvecount_val, + bool is5g, u32 *curveindex) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 smallest_abs_val = 0xffffffff, u4tmp; + u8 i, j; + u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G; + + for (i = 0; i < chnl_num; i++) { + if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1)) + continue; + curveindex[i] = 0; + for (j = 0; j < (CV_CURVE_CNT * 2); j++) { + u4tmp = _rtl92d_phy_get_abs(targetchnl[i], + curvecount_val[j]); + + if (u4tmp < smallest_abs_val) { + curveindex[i] = j; + smallest_abs_val = u4tmp; + } + } + smallest_abs_val = 0xffffffff; + RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n", + i, curveindex[i]); + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_calc_curvindex); + +void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 i; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "settings regs %zu default regs %d\n", + ARRAY_SIZE(rtlphy->iqk_matrix), + IQK_MATRIX_REG_NUM); + /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */ + for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { + rtlphy->iqk_matrix[i].value[0][0] = 0x100; + rtlphy->iqk_matrix[i].value[0][2] = 0x100; + rtlphy->iqk_matrix[i].value[0][4] = 0x100; + rtlphy->iqk_matrix[i].value[0][6] = 0x100; + rtlphy->iqk_matrix[i].value[0][1] = 0x0; + rtlphy->iqk_matrix[i].value[0][3] = 0x0; + rtlphy->iqk_matrix[i].value[0][5] = 0x0; + rtlphy->iqk_matrix[i].value[0][7] = 0x0; + rtlphy->iqk_matrix[i].iqk_done = false; + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_reset_iqk_result); + +static void rtl92d_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *de_digtable = &rtlpriv->dm_digtable; + struct rtl_phy *rtlphy = &rtlpriv->phy; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; + rtl92d_dm_write_dig(hw); + rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel); + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue; + de_digtable->cur_igvalue = 0x37; + if (rtlpriv->rtlhal.interface == INTF_USB) + de_digtable->cur_igvalue = 0x17; + rtl92d_dm_write_dig(hw); + break; + default: + pr_err("switch case %#x not processed\n", + rtlphy->current_io_type); + break; + } + + rtlphy->set_io_inprogress = false; + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n", + rtlphy->current_io_type); +} + +bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + bool postprocessing = false; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, + "-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress); + + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Resume DM after scan\n"); + postprocessing = true; + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Pause DM before scan\n"); + postprocessing = true; + break; + default: + pr_err("switch case %#x not processed\n", + iotype); + break; + } + } while (false); + + if (postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + + rtl92d_phy_set_io(hw); + rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype); + return true; +} +EXPORT_SYMBOL_GPL(rtl92d_phy_set_io_cmd); + +void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 offset = REG_MAC_PHY_CTRL_NORMAL; + u8 phy_ctrl = 0xf0; + + if (rtlhal->interface == INTF_USB) { + phy_ctrl = rtl_read_byte(rtlpriv, offset); + phy_ctrl &= ~(BIT(0) | BIT(1) | BIT(2)); + } + + switch (rtlhal->macphymode) { + case DUALMAC_DUALPHY: + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "MacPhyMode: DUALMAC_DUALPHY\n"); + rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0) | BIT(1)); + break; + case SINGLEMAC_SINGLEPHY: + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "MacPhyMode: SINGLEMAC_SINGLEPHY\n"); + rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(2)); + break; + case DUALMAC_SINGLEPHY: + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "MacPhyMode: DUALMAC_SINGLEPHY\n"); + rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0)); + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode); + +void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &rtlpriv->phy; + + switch (rtlhal->macphymode) { + case DUALMAC_SINGLEPHY: + rtlphy->rf_type = RF_2T2R; + rtlhal->version |= RF_TYPE_2T2R; + rtlhal->bandset = BAND_ON_BOTH; + rtlhal->current_bandtype = BAND_ON_2_4G; + break; + + case SINGLEMAC_SINGLEPHY: + rtlphy->rf_type = RF_2T2R; + rtlhal->version |= RF_TYPE_2T2R; + rtlhal->bandset = BAND_ON_BOTH; + rtlhal->current_bandtype = BAND_ON_2_4G; + break; + + case DUALMAC_DUALPHY: + rtlphy->rf_type = RF_1T1R; + rtlhal->version &= RF_TYPE_1T1R; + /* Now we let MAC0 run on 5G band. */ + if (rtlhal->interfaceindex == 0) { + rtlhal->bandset = BAND_ON_5G; + rtlhal->current_bandtype = BAND_ON_5G; + } else { + rtlhal->bandset = BAND_ON_2_4G; + rtlhal->current_bandtype = BAND_ON_2_4G; + } + break; + default: + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode_info); + +u8 rtl92d_get_chnlgroup_fromarray(u8 chnl) +{ + u8 group; + + if (channel_all[chnl] <= 3) + group = 0; + else if (channel_all[chnl] <= 9) + group = 1; + else if (channel_all[chnl] <= 14) + group = 2; + else if (channel_all[chnl] <= 44) + group = 3; + else if (channel_all[chnl] <= 54) + group = 4; + else if (channel_all[chnl] <= 64) + group = 5; + else if (channel_all[chnl] <= 112) + group = 6; + else if (channel_all[chnl] <= 126) + group = 7; + else if (channel_all[chnl] <= 140) + group = 8; + else if (channel_all[chnl] <= 153) + group = 9; + else if (channel_all[chnl] <= 159) + group = 10; + else + group = 11; + return group; +} +EXPORT_SYMBOL_GPL(rtl92d_get_chnlgroup_fromarray); + +u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex) +{ + u8 group; + + if (channel_all[chnlindex] <= 3) /* Chanel 1-3 */ + group = 0; + else if (channel_all[chnlindex] <= 9) /* Channel 4-9 */ + group = 1; + else if (channel_all[chnlindex] <= 14) /* Channel 10-14 */ + group = 2; + else if (channel_all[chnlindex] <= 64) + group = 6; + else if (channel_all[chnlindex] <= 140) + group = 7; + else + group = 8; + return group; +} + +void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (rtlpriv->rtlhal.macphymode) { + case DUALMAC_DUALPHY: + rtl_write_byte(rtlpriv, REG_DMC, 0x0); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08); + rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff); + break; + case DUALMAC_SINGLEPHY: + rtl_write_byte(rtlpriv, REG_DMC, 0xf8); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08); + rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff); + break; + case SINGLEMAC_SINGLEPHY: + rtl_write_byte(rtlpriv, REG_DMC, 0x0); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10); + rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF); + break; + default: + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_config_maccoexist_rfpage); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h new file mode 100644 index 000000000000..0f794557af47 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_PHY_COMMON_H__ +#define __RTL92D_PHY_COMMON_H__ + +#define TARGET_CHNL_NUM_5G 221 +#define TARGET_CHNL_NUM_2G 14 +#define CV_CURVE_CNT 64 +#define RT_CANNOT_IO(hw) false +#define RX_INDEX_MAPPING_NUM 15 +#define IQK_BB_REG_NUM 10 + +#define IQK_DELAY_TIME 1 +#define MAX_TOLERANCE 5 +#define MAX_TOLERANCE_92D 3 + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum rf_content { + radioa_txt = 0, + radiob_txt = 1, + radioc_txt = 2, + radiod_txt = 3 +}; + +static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw, + unsigned long *flag) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->rtlhal.interface == INTF_USB) + return; + + if (rtlpriv->rtlhal.interfaceindex == 1) + spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag); +} + +static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw, + unsigned long *flag) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->rtlhal.interface == INTF_USB) + return; + + if (rtlpriv->rtlhal.interfaceindex == 1) + spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock, + *flag); +} + +u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask); +void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data); +void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); +void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath, + u32 *pu4_regval); +void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath, + u32 *pu4_regval); +u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl); +void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg, + u32 *adda_backup, u32 regnum); +void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw, + const u32 *macreg, u32 *macbackup); +void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw, + const u32 *adda_reg, bool patha_on, bool is2t); +void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw, + const u32 *macreg, u32 *macbackup); +void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw, + const u32 *targetchnl, u32 *curvecount_val, + bool is5g, u32 *curveindex); +void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw); +bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw); +void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw); +u8 rtl92d_get_chnlgroup_fromarray(u8 chnl); +u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex); +void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw); +/* Without these declarations sparse warns about context imbalance. */ +void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw, + unsigned long *flag); +void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw, + unsigned long *flag); + +/* Without these helpers and the declarations sparse warns about + * context imbalance. + */ +static inline void rtl92d_pci_lock(struct rtl_priv *rtlpriv) +{ + if (rtlpriv->rtlhal.interface == INTF_PCI) + spin_lock(&rtlpriv->locks.rf_lock); +} + +static inline void rtl92d_pci_unlock(struct rtl_priv *rtlpriv) +{ + if (rtlpriv->rtlhal.interface == INTF_PCI) + spin_unlock(&rtlpriv->locks.rf_lock); +} + +void rtl92d_pci_lock(struct rtl_priv *rtlpriv); +void rtl92d_pci_unlock(struct rtl_priv *rtlpriv); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h index 2783d7e7b227..b5b906b799cb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h @@ -50,6 +50,9 @@ #define REG_HMEBOX_EXT_1 0x008A #define REG_HMEBOX_EXT_2 0x008C #define REG_HMEBOX_EXT_3 0x008E +#define SIZE_OF_REG_HMEBOX_EXT 2 + +#define REG_EFUSE_ACCESS 0x00CF #define REG_BIST_SCAN 0x00D0 #define REG_BIST_RPT 0x00D4 @@ -86,6 +89,7 @@ #define REG_CPWM 0x012F #define REG_FWIMR 0x0130 #define REG_FWISR 0x0134 +#define REG_FTIMR 0x0138 #define REG_PKTBUF_DBG_CTRL 0x0140 #define REG_PKTBUF_DBG_DATA_L 0x0144 #define REG_PKTBUF_DBG_DATA_H 0x0148 @@ -109,6 +113,7 @@ #define REG_HMEBOX_1 0x01D4 #define REG_HMEBOX_2 0x01D8 #define REG_HMEBOX_3 0x01DC +#define SIZE_OF_REG_HMEBOX 4 #define REG_LLT_INIT 0x01E0 #define REG_BB_ACCEESS_CTRL 0x01E8 @@ -197,6 +202,8 @@ #define REG_POWER_STAGE1 0x04B4 #define REG_POWER_STAGE2 0x04B8 #define REG_PKT_LIFE_TIME 0x04C0 +#define REG_PKT_VO_VI_LIFE_TIME 0x04C0 +#define REG_PKT_BE_BK_LIFE_TIME 0x04C2 #define REG_STBC_SETTING 0x04C4 #define REG_PROT_MODE_CTRL 0x04C8 #define REG_MAX_AGGR_NUM 0x04CA @@ -233,6 +240,7 @@ #define REG_RD_NAV_NXT 0x0544 #define REG_NAV_PROT_LEN 0x0546 #define REG_BCN_CTRL 0x0550 +#define REG_BCN_CTRL_1 0x0551 #define REG_MBID_NUM 0x0552 #define REG_DUAL_TSF_RST 0x0553 #define REG_BCN_INTERVAL 0x0554 @@ -319,6 +327,8 @@ #define REG_BT_COEX_TABLE 0x06C0 #define REG_WMAC_RESP_TXINFO 0x06D8 +#define REG_USB_Queue_Select_MAC0 0xFE44 +#define REG_USB_Queue_Select_MAC1 0xFE47 /* ----------------------------------------------------- */ /* Redifine 8192C register definition for compatibility */ @@ -355,27 +365,27 @@ #define RRSR_RSC_UPSUBCHNL 0x400000 #define RRSR_RSC_LOWSUBCHNL 0x200000 #define RRSR_SHORT 0x800000 -#define RRSR_1M BIT0 -#define RRSR_2M BIT1 -#define RRSR_5_5M BIT2 -#define RRSR_11M BIT3 -#define RRSR_6M BIT4 -#define RRSR_9M BIT5 -#define RRSR_12M BIT6 -#define RRSR_18M BIT7 -#define RRSR_24M BIT8 -#define RRSR_36M BIT9 -#define RRSR_48M BIT10 -#define RRSR_54M BIT11 -#define RRSR_MCS0 BIT12 -#define RRSR_MCS1 BIT13 -#define RRSR_MCS2 BIT14 -#define RRSR_MCS3 BIT15 -#define RRSR_MCS4 BIT16 -#define RRSR_MCS5 BIT17 -#define RRSR_MCS6 BIT18 -#define RRSR_MCS7 BIT19 -#define BRSR_ACKSHORTPMB BIT23 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) +#define BRSR_ACKSHORTPMB BIT(23) /* ----------------------------------------------------- */ /* 8192C Rate Definition */ @@ -600,7 +610,11 @@ #define EEPROM_SVID 0x2C /* SE Vendor ID.E-F */ #define EEPROM_SMID 0x2E /* SE PCI Subsystem ID. 10-11 */ +#define EEPROM_VID_USB 0xC +#define EEPROM_PID_USB 0xE +#define EEPROM_ENDPOINT_SETTING 0x10 #define EEPROM_MAC_ADDR 0x16 /* SEMAC Address. 12-17 */ +#define EEPROM_MAC_ADDR_MAC0_92DU 0x19 #define EEPROM_MAC_ADDR_MAC0_92D 0x55 #define EEPROM_MAC_ADDR_MAC1_92D 0x5B @@ -915,6 +929,42 @@ #define BD_HCI_SEL BIT(26) #define TYPE_ID BIT(27) +#define HCI_TXDMA_EN BIT(0) +#define HCI_RXDMA_EN BIT(1) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) + +#define TXDMA_HIQ_MAP GENMASK(15, 14) +#define TXDMA_MGQ_MAP GENMASK(13, 12) +#define TXDMA_BKQ_MAP GENMASK(11, 10) +#define TXDMA_BEQ_MAP GENMASK(9, 8) +#define TXDMA_VIQ_MAP GENMASK(7, 6) +#define TXDMA_VOQ_MAP GENMASK(5, 4) + +#define QUEUE_LOW 1 +#define QUEUE_NORMAL 2 +#define QUEUE_HIGH 3 + +#define HPQ_MASK GENMASK(7, 0) +#define LPQ_MASK GENMASK(15, 8) +#define PUBQ_MASK GENMASK(23, 16) +#define LD_RQPN BIT(31) + +#define DROP_DATA_EN BIT(9) + /* LLT_INIT */ #define _LLT_NO_ACTIVE 0x0 #define _LLT_WRITE_ACCESS 0x1 @@ -929,6 +979,10 @@ /* ----------------------------------------------------- */ /* 0x0400h ~ 0x047Fh Protocol Configuration */ /* ----------------------------------------------------- */ +/* FWHW_TXQ_CTRL */ +#define EN_AMPDU_RTY_NEW BIT(7) +#define EN_BCNQ_DL BIT(22) + #define RETRY_LIMIT_SHORT_SHIFT 8 #define RETRY_LIMIT_LONG_SHIFT 0 @@ -942,6 +996,13 @@ #define AC_PARAM_ECW_MIN_OFFSET 8 #define AC_PARAM_AIFS_OFFSET 0 +/* REG_RD_CTRL */ +#define DIS_EDCA_CNT_DWN BIT(11) + +/* REG_BCN_CTRL */ +#define EN_BCN_FUNCTION BIT(3) +#define DIS_TSF_UDT BIT(4) + /* ACMHWCTRL */ #define ACMHW_HWEN BIT(0) #define ACMHW_BEQEN BIT(1) @@ -1073,6 +1134,11 @@ #define RCCK0_FACOUNTERLOWER 0xa5c #define RCCK0_FACOUNTERUPPER 0xa58 +#define RPDP_ANTA 0xb00 +#define RCONFIG_ANTA 0xb68 +#define RCONFIG_ANTB 0xb6c +#define RPDP_ANTB 0xb70 + /* 6. PageC(0xC00) */ #define ROFDM0_LSTF 0xc00 @@ -1126,6 +1192,7 @@ #define ROFDM0_TXPSEUDONOISEWGT 0xce4 #define ROFDM0_FRAMESYNC 0xcf0 #define ROFDM0_DFSREPORT 0xcf4 +#define ROFDM0_RXIQEXTANTA 0xca0 #define ROFDM0_TXCOEFF1 0xca4 #define ROFDM0_TXCOEFF2 0xca8 #define ROFDM0_TXCOEFF3 0xcac @@ -1184,17 +1251,70 @@ #define RTXAGC_B_MCS15_MCS12 0x868 #define RTXAGC_B_CCK11_A_CCK2_11 0x86c +#define RFPGA0_IQK 0xe28 +#define RTX_IQK_TONE_A 0xe30 +#define RRX_IQK_TONE_A 0xe34 +#define RTX_IQK_PI_A 0xe38 +#define RRX_IQK_PI_A 0xe3c + +#define RTX_IQK 0xe40 +#define RRX_IQK 0xe44 +#define RIQK_AGC_PTS 0xe48 +#define RIQK_AGC_RSP 0xe4c +#define RTX_IQK_TONE_B 0xe50 +#define RRX_IQK_TONE_B 0xe54 +#define RTX_IQK_PI_B 0xe58 +#define RRX_IQK_PI_B 0xe5c +#define RIQK_AGC_CONT 0xe60 + +#define RBLUE_TOOTH 0xe6c +#define RRX_WAIT_CCA 0xe70 +#define RTX_CCK_RFON 0xe74 +#define RTX_CCK_BBON 0xe78 +#define RTX_OFDM_RFON 0xe7c +#define RTX_OFDM_BBON 0xe80 +#define RTX_TO_RX 0xe84 +#define RTX_TO_TX 0xe88 +#define RRX_CCK 0xe8c + +#define RTX_POWER_BEFORE_IQK_A 0xe94 +#define RTX_POWER_AFTER_IQK_A 0xe9c + +#define RRX_POWER_BEFORE_IQK_A 0xea0 +#define RRX_POWER_BEFORE_IQK_A_2 0xea4 +#define RRX_POWER_AFTER_IQK_A 0xea8 +#define RRX_POWER_AFTER_IQK_A_2 0xeac + +#define RTX_POWER_BEFORE_IQK_B 0xeb4 +#define RTX_POWER_AFTER_IQK_B 0xebc + +#define RRX_POWER_BEFORE_IQK_B 0xec0 +#define RRX_POWER_BEFORE_IQK_B_2 0xec4 +#define RRX_POWER_AFTER_IQK_B 0xec8 +#define RRX_POWER_AFTER_IQK_B_2 0xecc + +#define MASK_IQK_RESULT 0x03ff0000 + +#define RRX_OFDM 0xed0 +#define RRX_WAIT_RIFS 0xed4 +#define RRX_TO_RX 0xed8 +#define RSTANDBY 0xedc +#define RSLEEP 0xee0 +#define RPMPD_ANAEN 0xeec + /* RL6052 Register definition */ #define RF_AC 0x00 #define RF_IQADJ_G1 0x01 #define RF_IQADJ_G2 0x02 +#define RF_BS_PA_APSET_G1_G4 0x03 #define RF_POW_TRSW 0x05 #define RF_GAIN_RX 0x06 #define RF_GAIN_TX 0x07 #define RF_TXM_IDAC 0x08 +#define RF_TXPA_AG 0x0B #define RF_BS_IQGEN 0x0F #define RF_MODE1 0x10 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c new file mode 100644 index 000000000000..427d1877f431 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "def.h" +#include "reg.h" +#include "phy_common.h" +#include "rf_common.h" + +void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 rfpath; + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff; + rtlphy->rfreg_chnlval[rfpath] |= 0x0400; + + rtl_set_rfreg(hw, rfpath, RF_CHNLBW, + BIT(10) | BIT(11), 0x01); + + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, + "20M RF 0x18 = 0x%x\n", + rtlphy->rfreg_chnlval[rfpath]); + } + + break; + case HT_CHANNEL_WIDTH_20_40: + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff; + + rtl_set_rfreg(hw, rfpath, RF_CHNLBW, + BIT(10) | BIT(11), 0x00); + + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, + "40M RF 0x18 = 0x%x\n", + rtlphy->rfreg_chnlval[rfpath]); + } + break; + default: + pr_err("unknown bandwidth: %#X\n", bandwidth); + break; + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_bandwidth); + +void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 tx_agc[2] = {0, 0}, tmpval; + bool turbo_scanoff = false; + u8 idx1, idx2; + u8 *ptr; + + if (rtlefuse->eeprom_regulatory != 0) + turbo_scanoff = true; + if (mac->act_scanning) { + tx_agc[RF90_PATH_A] = 0x3f3f3f3f; + tx_agc[RF90_PATH_B] = 0x3f3f3f3f; + if (turbo_scanoff) { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + } + } else { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + if (rtlefuse->eeprom_regulatory == 0) { + tmpval = (rtlphy->mcs_offset[0][6]) + + (rtlphy->mcs_offset[0][7] << 8); + tx_agc[RF90_PATH_A] += tmpval; + tmpval = (rtlphy->mcs_offset[0][14]) + + (rtlphy->mcs_offset[0][15] << 24); + tx_agc[RF90_PATH_B] += tmpval; + } + } + + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + ptr = (u8 *)(&tx_agc[idx1]); + for (idx2 = 0; idx2 < 4; idx2++) { + if (*ptr > RF6052_MAX_TX_PWR) + *ptr = RF6052_MAX_TX_PWR; + ptr++; + } + } + + tmpval = tx_agc[RF90_PATH_A] & 0xff; + rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", + tmpval, RTXAGC_A_CCK1_MCS32); + tmpval = tx_agc[RF90_PATH_A] >> 8; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", + tmpval, RTXAGC_B_CCK11_A_CCK2_11); + tmpval = tx_agc[RF90_PATH_B] >> 24; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", + tmpval, RTXAGC_B_CCK11_A_CCK2_11); + tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", + tmpval, RTXAGC_B_CCK1_55_MCS32); +} +EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_cck_txpower); + +static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel, + u32 *ofdmbase, u32 *mcsbase) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 powerbase0, powerbase1; + u8 legacy_pwrdiff, ht20_pwrdiff; + u8 i, powerlevel[2]; + + for (i = 0; i < 2; i++) { + powerlevel[i] = ppowerlevel[i]; + legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; + powerbase0 = powerlevel[i] + legacy_pwrdiff; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + " [OFDM power base index rf(%c) = 0x%x]\n", + i == 0 ? 'A' : 'B', *(ofdmbase + i)); + } + + for (i = 0; i < 2; i++) { + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { + ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; + powerlevel[i] += ht20_pwrdiff; + } + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | + (powerbase1 << 8) | powerbase1; + *(mcsbase + i) = powerbase1; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + " [MCS power base index rf(%c) = 0x%x]\n", + i == 0 ? 'A' : 'B', *(mcsbase + i)); + } +} + +static void _rtl92d_get_pwr_diff_limit(struct ieee80211_hw *hw, u8 channel, + u8 index, u8 rf, u8 pwr_diff_limit[4]) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 mcs_offset; + u8 limit; + int i; + + mcs_offset = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)]; + + for (i = 0; i < 4; i++) { + pwr_diff_limit[i] = (mcs_offset >> (i * 8)) & 0x7f; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + limit = rtlefuse->pwrgroup_ht40[rf][channel - 1]; + else + limit = rtlefuse->pwrgroup_ht20[rf][channel - 1]; + + if (pwr_diff_limit[i] > limit) + pwr_diff_limit[i] = limit; + } +} + +static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerbase0, + u32 *powerbase1, + u32 *p_outwriteval) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 writeval = 0, customer_limit, rf; + u8 chnlgroup = 0, pwr_diff_limit[4]; + + for (rf = 0; rf < 2; rf++) { + switch (rtlefuse->eeprom_regulatory) { + case 0: + writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)]; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance\n"); + break; + case 1: + if (rtlphy->pwrgroup_cnt == 1) + chnlgroup = 0; + + if (rtlphy->pwrgroup_cnt < MAX_PG_GROUP) + break; + + chnlgroup = rtl92d_phy_get_chnlgroup_bypg(channel - 1); + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) + chnlgroup++; + else + chnlgroup += 4; + + writeval = rtlphy->mcs_offset + [chnlgroup][index + (rf ? 8 : 0)]; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Realtek regulatory, 20MHz\n"); + break; + case 2: + writeval = 0; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory\n"); + break; + case 3: + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 40MHz rf(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', + rtlefuse->pwrgroup_ht40[rf][channel - 1]); + } else { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "customer's limit, 20MHz rf(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', + rtlefuse->pwrgroup_ht20[rf][channel - 1]); + } + + _rtl92d_get_pwr_diff_limit(hw, channel, index, rf, + pwr_diff_limit); + + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | + (pwr_diff_limit[0]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Customer's limit rf(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', customer_limit); + + writeval = customer_limit; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer\n"); + break; + default: + writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)]; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "RTK better performance\n"); + break; + } + + if (index < 2) + writeval += powerbase0[rf]; + else + writeval += powerbase1[rf]; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "writeval rf(%c)= 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); + + *(p_outwriteval + rf) = writeval; + } +} + +static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *pvalue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + static const u16 regoffset_a[6] = { + RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, + RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, + RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 + }; + static const u16 regoffset_b[6] = { + RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, + RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, + RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 + }; + u8 i, rf, pwr_val[4]; + u32 writeval; + u16 regoffset; + + for (rf = 0; rf < 2; rf++) { + writeval = pvalue[rf]; + for (i = 0; i < 4; i++) { + pwr_val[i] = (u8)((writeval & (0x7f << + (i * 8))) >> (i * 8)); + if (pwr_val[i] > RF6052_MAX_TX_PWR) + pwr_val[i] = RF6052_MAX_TX_PWR; + } + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; + if (rf == 0) + regoffset = regoffset_a[index]; + else + regoffset = regoffset_b[index]; + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + "Set 0x%x = %08x\n", regoffset, writeval); + if (((get_rf_type(rtlphy) == RF_2T2R) && + (regoffset == RTXAGC_A_MCS15_MCS12 || + regoffset == RTXAGC_B_MCS15_MCS12)) || + ((get_rf_type(rtlphy) != RF_2T2R) && + (regoffset == RTXAGC_A_MCS07_MCS04 || + regoffset == RTXAGC_B_MCS07_MCS04))) { + writeval = pwr_val[3]; + if (regoffset == RTXAGC_A_MCS15_MCS12 || + regoffset == RTXAGC_A_MCS07_MCS04) + regoffset = 0xc90; + if (regoffset == RTXAGC_B_MCS15_MCS12 || + regoffset == RTXAGC_B_MCS07_MCS04) + regoffset = 0xc98; + for (i = 0; i < 3; i++) { + if (i != 2) + writeval = (writeval > 8) ? + (writeval - 8) : 0; + else + writeval = (writeval > 6) ? + (writeval - 6) : 0; + rtl_write_byte(rtlpriv, (u32)(regoffset + i), + (u8)writeval); + } + } + } +} + +void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel) +{ + u32 writeval[2], powerbase0[2], powerbase1[2]; + u8 index; + + _rtl92d_phy_get_power_base(hw, ppowerlevel, channel, + &powerbase0[0], &powerbase1[0]); + for (index = 0; index < 6; index++) { + _rtl92d_get_txpower_writeval_by_regulatory(hw, channel, index, + &powerbase0[0], + &powerbase1[0], + &writeval[0]); + _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]); + } +} +EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_ofdm_txpower); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h new file mode 100644 index 000000000000..c243ec08369b --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_RF_COMMON_H__ +#define __RTL92D_RF_COMMON_H__ + +void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); +void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c new file mode 100644 index 000000000000..72d2b7426d82 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../base.h" +#include "../stats.h" +#include "def.h" +#include "trx_common.h" + +static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, + u8 signal_strength_index) +{ + long signal_power; + + signal_power = (long)((signal_strength_index + 1) >> 1); + signal_power -= 95; + return signal_power; +} + +static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstats, + __le32 *pdesc, + struct rx_fwinfo_92d *p_drvinfo, + bool packet_match_bssid, + bool packet_toself, + bool packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct phy_sts_cck_8192d *cck_buf; + s8 rx_pwr_all, rx_pwr[4]; + u8 rf_rx_num = 0, evm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool is_cck_rate; + u8 rxmcs; + + rxmcs = get_rx_desc_rxmcs(pdesc); + is_cck_rate = rxmcs <= DESC_RATE11M; + pstats->packet_matchbssid = packet_match_bssid; + pstats->packet_toself = packet_toself; + pstats->packet_beacon = packet_beacon; + pstats->is_cck = is_cck_rate; + pstats->rx_mimo_sig_qual[0] = -1; + pstats->rx_mimo_sig_qual[1] = -1; + + if (is_cck_rate) { + u8 report, cck_highpwr; + + cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo; + if (ppsc->rfpwr_state == ERFON) + cck_highpwr = rtlphy->cck_high_power; + else + cck_highpwr = false; + if (!cck_highpwr) { + u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + + report = cck_buf->cck_agc_rpt & 0xc0; + report = report >> 6; + switch (report) { + case 0x3: + rx_pwr_all = -46 - (cck_agc_rpt & 0x3e); + break; + case 0x2: + rx_pwr_all = -26 - (cck_agc_rpt & 0x3e); + break; + case 0x1: + rx_pwr_all = -12 - (cck_agc_rpt & 0x3e); + break; + case 0x0: + rx_pwr_all = 16 - (cck_agc_rpt & 0x3e); + break; + } + } else { + u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + + report = p_drvinfo->cfosho[0] & 0x60; + report = report >> 5; + switch (report) { + case 0x3: + rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x2: + rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x1: + rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x0: + rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1); + break; + } + } + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + /* CCK gain is smaller than OFDM/MCS gain, */ + /* so we add gain diff by experiences, the val is 6 */ + pwdb_all += 6; + if (pwdb_all > 100) + pwdb_all = 100; + /* modify the offset to make the same gain index with OFDM. */ + if (pwdb_all > 34 && pwdb_all <= 42) + pwdb_all -= 2; + else if (pwdb_all > 26 && pwdb_all <= 34) + pwdb_all -= 6; + else if (pwdb_all > 14 && pwdb_all <= 26) + pwdb_all -= 8; + else if (pwdb_all > 4 && pwdb_all <= 14) + pwdb_all -= 4; + pstats->rx_pwdb_all = pwdb_all; + pstats->recvsignalpower = rx_pwr_all; + if (packet_match_bssid) { + u8 sq; + + if (pstats->rx_pwdb_all > 40) { + sq = 100; + } else { + sq = cck_buf->sq_rpt; + if (sq > 64) + sq = 0; + else if (sq < 20) + sq = 100; + else + sq = ((64 - sq) * 100) / 44; + } + pstats->signalquality = sq; + pstats->rx_mimo_sig_qual[0] = sq; + pstats->rx_mimo_sig_qual[1] = -1; + } + } else { + rtlpriv->dm.rfpath_rxenable[0] = true; + rtlpriv->dm.rfpath_rxenable[1] = true; + for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { + if (rtlpriv->dm.rfpath_rxenable[i]) + rf_rx_num++; + rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) + - 110; + rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + rtlpriv->stats.rx_snr_db[i] = + (long)(p_drvinfo->rxsnr[i] / 2); + if (packet_match_bssid) + pstats->rx_mimo_signalstrength[i] = (u8)rssi; + } + rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pstats->rx_pwdb_all = pwdb_all; + pstats->rxpower = rx_pwr_all; + pstats->recvsignalpower = rx_pwr_all; + if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 && + rxmcs <= DESC_RATEMCS15) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + for (i = 0; i < max_spatial_stream; i++) { + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); + if (packet_match_bssid) { + if (i == 0) + pstats->signalquality = + (u8)(evm & 0xff); + pstats->rx_mimo_sig_qual[i] = + (u8)(evm & 0xff); + } + } + } + if (is_cck_rate) + pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + pwdb_all)); + else if (rf_rx_num != 0) + pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + total_rssi /= rf_rx_num)); +} + +static void rtl92d_loop_over_paths(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 rfpath; + + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + pstats->rx_mimo_signalstrength[rfpath]; + } + if (pstats->rx_mimo_signalstrength[rfpath] > + rtlpriv->stats.rx_rssi_percentage[rfpath]) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats.rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + rtlpriv->stats.rx_rssi_percentage[rfpath] = + rtlpriv->stats.rx_rssi_percentage[rfpath] + 1; + } else { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats.rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + } + } +} + +static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rt_smooth_data *ui_rssi; + u32 last_rssi, tmpval; + + if (!pstats->packet_toself && !pstats->packet_beacon) + return; + + ui_rssi = &rtlpriv->stats.ui_rssi; + + rtlpriv->stats.rssi_calculate_cnt++; + if (ui_rssi->total_num++ >= PHY_RSSI_SLID_WIN_MAX) { + ui_rssi->total_num = PHY_RSSI_SLID_WIN_MAX; + last_rssi = ui_rssi->elements[ui_rssi->index]; + ui_rssi->total_val -= last_rssi; + } + ui_rssi->total_val += pstats->signalstrength; + ui_rssi->elements[ui_rssi->index++] = pstats->signalstrength; + if (ui_rssi->index >= PHY_RSSI_SLID_WIN_MAX) + ui_rssi->index = 0; + tmpval = ui_rssi->total_val / ui_rssi->total_num; + rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw, (u8)tmpval); + pstats->rssi = rtlpriv->stats.signal_strength; + + if (!pstats->is_cck && pstats->packet_toself) + rtl92d_loop_over_paths(hw, pstats); +} + +static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int weighting = 0; + + if (rtlpriv->stats.recv_signal_power == 0) + rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; + if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power) + weighting = 5; + else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power) + weighting = (-5); + rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * + 5 + pstats->recvsignalpower + weighting) / 6; +} + +static void _rtl92de_process_pwdb(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + long undec_sm_pwdb; + + if (mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_AP) + return; + + undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; + + if (pstats->packet_toself || pstats->packet_beacon) { + if (undec_sm_pwdb < 0) + undec_sm_pwdb = pstats->rx_pwdb_all; + if (pstats->rx_pwdb_all > (u32)undec_sm_pwdb) { + undec_sm_pwdb = (((undec_sm_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + undec_sm_pwdb = undec_sm_pwdb + 1; + } else { + undec_sm_pwdb = (((undec_sm_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + } + rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb; + _rtl92de_update_rxsignalstatistics(hw, pstats); + } +} + +static void rtl92d_loop_over_streams(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int stream; + + for (stream = 0; stream < 2; stream++) { + if (pstats->rx_mimo_sig_qual[stream] != -1) { + if (rtlpriv->stats.rx_evm_percentage[stream] == 0) { + rtlpriv->stats.rx_evm_percentage[stream] = + pstats->rx_mimo_sig_qual[stream]; + } + rtlpriv->stats.rx_evm_percentage[stream] = + ((rtlpriv->stats.rx_evm_percentage[stream] + * (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_mimo_sig_qual[stream] * 1)) / + (RX_SMOOTH_FACTOR); + } + } +} + +static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rt_smooth_data *ui_link_quality; + u32 last_evm, tmpval; + + if (pstats->signalquality == 0) + return; + if (!pstats->packet_toself && !pstats->packet_beacon) + return; + + ui_link_quality = &rtlpriv->stats.ui_link_quality; + + if (ui_link_quality->total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { + ui_link_quality->total_num = PHY_LINKQUALITY_SLID_WIN_MAX; + last_evm = ui_link_quality->elements[ui_link_quality->index]; + ui_link_quality->total_val -= last_evm; + } + ui_link_quality->total_val += pstats->signalquality; + ui_link_quality->elements[ui_link_quality->index++] = pstats->signalquality; + if (ui_link_quality->index >= PHY_LINKQUALITY_SLID_WIN_MAX) + ui_link_quality->index = 0; + tmpval = ui_link_quality->total_val / ui_link_quality->total_num; + rtlpriv->stats.signal_quality = tmpval; + rtlpriv->stats.last_sigstrength_inpercent = tmpval; + rtl92d_loop_over_streams(hw, pstats); +} + +static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw, + u8 *buffer, + struct rtl_stats *pcurrent_stats) +{ + if (!pcurrent_stats->packet_matchbssid && + !pcurrent_stats->packet_beacon) + return; + + _rtl92de_process_ui_rssi(hw, pcurrent_stats); + _rtl92de_process_pwdb(hw, pcurrent_stats); + _rtl92de_process_ui_link_quality(hw, pcurrent_stats); +} + +static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstats, + __le32 *pdesc, + struct rx_fwinfo_92d *p_drvinfo) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + bool packet_matchbssid; + bool packet_beacon; + bool packet_toself; + u16 type, cfc; + u8 *tmp_buf; + u8 *praddr; + __le16 fc; + + tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = hdr->frame_control; + cfc = le16_to_cpu(fc); + type = WLAN_FC_GET_TYPE(fc); + praddr = hdr->addr1; + packet_matchbssid = ((type != IEEE80211_FTYPE_CTL) && + ether_addr_equal(mac->bssid, + (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 : + (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : + hdr->addr3) && + (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); + packet_toself = packet_matchbssid && + ether_addr_equal(praddr, rtlefuse->dev_addr); + packet_beacon = ieee80211_is_beacon(fc); + _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); + _rtl92de_process_phyinfo(hw, tmp_buf, pstats); +} + +bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *pdesc8, struct sk_buff *skb) +{ + __le32 *pdesc = (__le32 *)pdesc8; + struct rx_fwinfo_92d *p_drvinfo; + u32 phystatus = get_rx_desc_physt(pdesc); + + stats->length = (u16)get_rx_desc_pkt_len(pdesc); + stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); + stats->icv = (u16)get_rx_desc_icv(pdesc); + stats->crc = (u16)get_rx_desc_crc32(pdesc); + stats->hwerror = (stats->crc | stats->icv); + stats->decrypted = !get_rx_desc_swdec(pdesc) && + get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE; + stats->rate = (u8)get_rx_desc_rxmcs(pdesc); + stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc); + stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); + stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) && + (get_rx_desc_faggr(pdesc) == 1)); + stats->timestamp_low = get_rx_desc_tsfl(pdesc); + stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); + stats->is_ht = (bool)get_rx_desc_rxht(pdesc); + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + if (get_rx_desc_crc32(pdesc)) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (get_rx_desc_bw(pdesc)) + rx_status->bw = RATE_INFO_BW_40; + if (get_rx_desc_rxht(pdesc)) + rx_status->encoding = RX_ENC_HT; + rx_status->flag |= RX_FLAG_MACTIME_START; + if (stats->decrypted) + rx_status->flag |= RX_FLAG_DECRYPTED; + rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht, + false, stats->rate); + rx_status->mactime = get_rx_desc_tsfl(pdesc); + if (phystatus) { + p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + + stats->rx_bufshift); + _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc, + p_drvinfo); + } + /*rx_status->qual = stats->signal; */ + rx_status->signal = stats->recvsignalpower + 10; + return true; +} +EXPORT_SYMBOL_GPL(rtl92de_rx_query_desc); + +void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, + u8 desc_name, u8 *val) +{ + __le32 *pdesc = (__le32 *)pdesc8; + + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + wmb(); + set_tx_desc_own(pdesc, 1); + break; + case HW_DESC_TX_NEXTDESC_ADDR: + set_tx_desc_next_desc_address(pdesc, *(u32 *)val); + break; + default: + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", + desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_RXOWN: + wmb(); + set_rx_desc_own(pdesc, 1); + break; + case HW_DESC_RXBUFF_ADDR: + set_rx_desc_buff_addr(pdesc, *(u32 *)val); + break; + case HW_DESC_RXPKT_LEN: + set_rx_desc_pkt_len(pdesc, *(u32 *)val); + break; + case HW_DESC_RXERO: + set_rx_desc_eor(pdesc, 1); + break; + default: + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", + desc_name); + break; + } + } +} +EXPORT_SYMBOL_GPL(rtl92de_set_desc); + +u64 rtl92de_get_desc(struct ieee80211_hw *hw, + u8 *p_desc8, bool istx, u8 desc_name) +{ + __le32 *p_desc = (__le32 *)p_desc8; + u32 ret = 0; + + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + ret = get_tx_desc_own(p_desc); + break; + case HW_DESC_TXBUFF_ADDR: + ret = get_tx_desc_tx_buffer_address(p_desc); + break; + default: + WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", + desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_OWN: + ret = get_rx_desc_own(p_desc); + break; + case HW_DESC_RXPKT_LEN: + ret = get_rx_desc_pkt_len(p_desc); + break; + case HW_DESC_RXBUFF_ADDR: + ret = get_rx_desc_buff_addr(p_desc); + break; + default: + WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", + desc_name); + break; + } + } + return ret; +} +EXPORT_SYMBOL_GPL(rtl92de_get_desc); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h new file mode 100644 index 000000000000..87d956d771eb --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + +#ifndef __RTL92D_TRX_COMMON_H__ +#define __RTL92D_TRX_COMMON_H__ + +#define RX_DRV_INFO_SIZE_UNIT 8 + +enum rtl92d_rx_desc_enc { + RX_DESC_ENC_NONE = 0, + RX_DESC_ENC_WEP40 = 1, + RX_DESC_ENC_TKIP_WO_MIC = 2, + RX_DESC_ENC_TKIP_MIC = 3, + RX_DESC_ENC_AES = 4, + RX_DESC_ENC_WEP104 = 5, +}; + +/* macros to read/write various fields in RX or TX descriptors */ + +static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); +} + +static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(25)); +} + +static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(26)); +} + +static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(27)); +} + +static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(28)); +} + +static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_tx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(5)); +} + +static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, BIT(7)); +} + +static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16)); +} + +static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); +} + +static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26)); +} + +static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, BIT(17)); +} + +static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); +} + +static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16)); +} + +static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28)); +} + +static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0)); +} + +static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(6)); +} + +static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(7)); +} + +static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(8)); +} + +static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(10)); +} + +static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(11)); +} + +static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(12)); +} + +static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(13)); +} + +static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20)); +} + +static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(25)); +} + +static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(26)); +} + +static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, BIT(27)); +} + +static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28)); +} + +static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30)); +} + +static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); +} + +static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, BIT(6)); +} + +static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8)); +} + +static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); +} + +static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11)); +} + +static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); +} + +static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 8) = cpu_to_le32(__val); +} + +static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 8)); +} + +static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 10) = cpu_to_le32(__val); +} + +static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(13, 0)); +} + +static inline u32 get_rx_desc_crc32(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(14)); +} + +static inline u32 get_rx_desc_icv(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(15)); +} + +static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(19, 16)); +} + +static inline u32 get_rx_desc_enc_type(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(22, 20)); +} + +static inline u32 get_rx_desc_shift(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, GENMASK(25, 24)); +} + +static inline u32 get_rx_desc_physt(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(26)); +} + +static inline u32 get_rx_desc_swdec(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(27)); +} + +static inline u32 get_rx_desc_own(__le32 *__pdesc) +{ + return le32_get_bits(*__pdesc, BIT(31)); +} + +static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); +} + +static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(30)); +} + +static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) +{ + le32p_replace_bits(__pdesc, __val, BIT(31)); +} + +static inline u32 get_rx_desc_paggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(14)); +} + +static inline u32 get_rx_desc_faggr(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 1), BIT(15)); +} + +static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0)); +} + +static inline u32 get_rx_desc_rxht(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(6)); +} + +static inline u32 get_rx_desc_splcp(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(8)); +} + +static inline u32 get_rx_desc_bw(__le32 *__pdesc) +{ + return le32_get_bits(*(__pdesc + 3), BIT(9)); +} + +static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 5)); +} + +static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) +{ + return le32_to_cpu(*(__pdesc + 6)); +} + +static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) +{ + *(__pdesc + 6) = cpu_to_le32(__val); +} + +/* For 92D early mode */ +static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(2, 0)); +} + +static inline void set_earlymode_len0(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(15, 4)); +} + +static inline void set_earlymode_len1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(27, 16)); +} + +static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits(__paddr, __value, GENMASK(31, 28)); +} + +static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0)); +} + +static inline void set_earlymode_len3(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8)); +} + +static inline void set_earlymode_len4(__le32 *__paddr, u32 __value) +{ + le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20)); +} + +struct rx_fwinfo_92d { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + s8 rxevm[2]; + s8 rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; +#ifdef __LITTLE_ENDIAN + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +#else + u8 reserve:4; + u8 rxsc:2; + u8 sgi_en:1; + u8 ex_intf_flag:1; +#endif +} __packed; + +bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, + u8 desc_name, u8 *val); +u64 rtl92de_get_desc(struct ieee80211_hw *hw, + u8 *p_desc, bool istx, u8 desc_name); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index cf4aca83bd05..c6a2e8b22fa0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -4,455 +4,16 @@ #include "../wifi.h" #include "../base.h" #include "../core.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/fw_common.h" #include "phy.h" #include "dm.h" -#include "fw.h" #define UNDEC_SM_PWDB entry_min_undec_sm_pwdb -static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { - 0x7f8001fe, /* 0, +6.0dB */ - 0x788001e2, /* 1, +5.5dB */ - 0x71c001c7, /* 2, +5.0dB */ - 0x6b8001ae, /* 3, +4.5dB */ - 0x65400195, /* 4, +4.0dB */ - 0x5fc0017f, /* 5, +3.5dB */ - 0x5a400169, /* 6, +3.0dB */ - 0x55400155, /* 7, +2.5dB */ - 0x50800142, /* 8, +2.0dB */ - 0x4c000130, /* 9, +1.5dB */ - 0x47c0011f, /* 10, +1.0dB */ - 0x43c0010f, /* 11, +0.5dB */ - 0x40000100, /* 12, +0dB */ - 0x3c8000f2, /* 13, -0.5dB */ - 0x390000e4, /* 14, -1.0dB */ - 0x35c000d7, /* 15, -1.5dB */ - 0x32c000cb, /* 16, -2.0dB */ - 0x300000c0, /* 17, -2.5dB */ - 0x2d4000b5, /* 18, -3.0dB */ - 0x2ac000ab, /* 19, -3.5dB */ - 0x288000a2, /* 20, -4.0dB */ - 0x26000098, /* 21, -4.5dB */ - 0x24000090, /* 22, -5.0dB */ - 0x22000088, /* 23, -5.5dB */ - 0x20000080, /* 24, -6.0dB */ - 0x1e400079, /* 25, -6.5dB */ - 0x1c800072, /* 26, -7.0dB */ - 0x1b00006c, /* 27. -7.5dB */ - 0x19800066, /* 28, -8.0dB */ - 0x18000060, /* 29, -8.5dB */ - 0x16c0005b, /* 30, -9.0dB */ - 0x15800056, /* 31, -9.5dB */ - 0x14400051, /* 32, -10.0dB */ - 0x1300004c, /* 33, -10.5dB */ - 0x12000048, /* 34, -11.0dB */ - 0x11000044, /* 35, -11.5dB */ - 0x10000040, /* 36, -12.0dB */ - 0x0f00003c, /* 37, -12.5dB */ - 0x0e400039, /* 38, -13.0dB */ - 0x0d800036, /* 39, -13.5dB */ - 0x0cc00033, /* 40, -14.0dB */ - 0x0c000030, /* 41, -14.5dB */ - 0x0b40002d, /* 42, -15.0dB */ -}; - -static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { - {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ - {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ - {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ - {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ - {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ - {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ - {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ - {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ - {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ - {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ - {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ - {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ - {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ - {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ - {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ - {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ - {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ - {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ - {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ - {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ - {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */ - {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */ - {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */ - {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */ - {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */ - {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */ - {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */ - {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */ - {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */ - {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */ - {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */ - {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */ - {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */ -}; - -static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { - {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ - {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ - {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ - {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ - {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ - {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ - {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ - {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ - {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ - {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ - {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ - {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ - {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ - {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ - {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ - {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ - {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ - {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ - {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ - {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ - {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */ - {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */ - {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */ - {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */ - {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */ - {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */ - {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */ - {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */ - {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */ - {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */ - {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */ - {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */ - {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */ -}; - -static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) -{ - u32 ret_value; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); - unsigned long flag = 0; - - /* hold ofdm counter */ - rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */ - rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */ - - ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); - falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); - falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); - falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); - falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); - falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); - falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); - falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + - falsealm_cnt->cnt_rate_illegal + - falsealm_cnt->cnt_crc8_fail + - falsealm_cnt->cnt_mcs_fail + - falsealm_cnt->cnt_fast_fsync_fail + - falsealm_cnt->cnt_sb_search_fail; - - if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { - /* hold cck counter */ - rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); - ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); - falsealm_cnt->cnt_cck_fail = ret_value; - ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); - falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; - rtl92d_release_cckandrw_pagea_ctl(hw, &flag); - } else { - falsealm_cnt->cnt_cck_fail = 0; - } - - /* reset false alarm counter registers */ - falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail + - falsealm_cnt->cnt_sb_search_fail + - falsealm_cnt->cnt_parity_fail + - falsealm_cnt->cnt_rate_illegal + - falsealm_cnt->cnt_crc8_fail + - falsealm_cnt->cnt_mcs_fail + - falsealm_cnt->cnt_cck_fail; - - rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); - /* update ofdm counter */ - rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); - /* update page C counter */ - rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); - /* update page D counter */ - rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); - if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) { - /* reset cck counter */ - rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); - /* enable cck counter */ - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); - rtl92d_release_cckandrw_pagea_ctl(hw, &flag); - } - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n", - falsealm_cnt->cnt_fast_fsync_fail, - falsealm_cnt->cnt_sb_search_fail); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n", - falsealm_cnt->cnt_parity_fail, - falsealm_cnt->cnt_rate_illegal, - falsealm_cnt->cnt_crc8_fail, - falsealm_cnt->cnt_mcs_fail); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n", - falsealm_cnt->cnt_ofdm_fail, - falsealm_cnt->cnt_cck_fail, - falsealm_cnt->cnt_all); -} - -static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - struct rtl_mac *mac = rtl_mac(rtlpriv); - - /* Determine the minimum RSSI */ - if ((mac->link_state < MAC80211_LINKED) && - (rtlpriv->dm.UNDEC_SM_PWDB == 0)) { - de_digtable->min_undec_pwdb_for_dm = 0; - rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "Not connected to any\n"); - } - if (mac->link_state >= MAC80211_LINKED) { - if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) { - de_digtable->min_undec_pwdb_for_dm = - rtlpriv->dm.UNDEC_SM_PWDB; - rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "AP Client PWDB = 0x%lx\n", - rtlpriv->dm.UNDEC_SM_PWDB); - } else { - de_digtable->min_undec_pwdb_for_dm = - rtlpriv->dm.undec_sm_pwdb; - rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "STA Default Port PWDB = 0x%x\n", - de_digtable->min_undec_pwdb_for_dm); - } - } else { - de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB; - rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "AP Ext Port or disconnect PWDB = 0x%x\n", - de_digtable->min_undec_pwdb_for_dm); - } - - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", - de_digtable->min_undec_pwdb_for_dm); -} - -static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - unsigned long flag = 0; - - if (de_digtable->cursta_cstate == DIG_STA_CONNECT) { - if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { - if (de_digtable->min_undec_pwdb_for_dm <= 25) - de_digtable->cur_cck_pd_state = - CCK_PD_STAGE_LOWRSSI; - else - de_digtable->cur_cck_pd_state = - CCK_PD_STAGE_HIGHRSSI; - } else { - if (de_digtable->min_undec_pwdb_for_dm <= 20) - de_digtable->cur_cck_pd_state = - CCK_PD_STAGE_LOWRSSI; - else - de_digtable->cur_cck_pd_state = - CCK_PD_STAGE_HIGHRSSI; - } - } else { - de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; - } - if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) { - if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { - rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); - rtl92d_release_cckandrw_pagea_ctl(hw, &flag); - } else { - rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); - rtl92d_release_cckandrw_pagea_ctl(hw, &flag); - } - de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; - } - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", - de_digtable->cursta_cstate == DIG_STA_CONNECT ? - "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", - de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? - "Low RSSI " : "High RSSI "); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", - IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); - -} - -void rtl92d_dm_write_dig(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n", - de_digtable->cur_igvalue, de_digtable->pre_igvalue, - de_digtable->back_val); - if (de_digtable->dig_enable_flag == false) { - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); - de_digtable->pre_igvalue = 0x17; - return; - } - if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) { - rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, - de_digtable->cur_igvalue); - rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, - de_digtable->cur_igvalue); - de_digtable->pre_igvalue = de_digtable->cur_igvalue; - } -} - -static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) -{ - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - - if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) && - (rtlpriv->mac80211.vendor == PEER_CISCO)) { - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); - if (de_digtable->last_min_undec_pwdb_for_dm >= 50 - && de_digtable->min_undec_pwdb_for_dm < 50) { - rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "Early Mode Off\n"); - } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 && - de_digtable->min_undec_pwdb_for_dm > 55) { - rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "Early Mode On\n"); - } - } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) { - rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n"); - } -} - -static void rtl92d_dm_dig(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - u8 value_igi = de_digtable->cur_igvalue; - struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); - - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); - if (rtlpriv->rtlhal.earlymode_enable) { - rtl92d_early_mode_enabled(rtlpriv); - de_digtable->last_min_undec_pwdb_for_dm = - de_digtable->min_undec_pwdb_for_dm; - } - if (!rtlpriv->dm.dm_initialgain_enable) - return; - - /* because we will send data pkt when scanning - * this will cause some ap like gear-3700 wep TP - * lower if we return here, this is the diff of - * mac80211 driver vs ieee80211 driver */ - /* if (rtlpriv->mac80211.act_scanning) - * return; */ - - /* Not STA mode return tmp */ - if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) - return; - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); - /* Decide the current status and if modify initial gain or not */ - if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) - de_digtable->cursta_cstate = DIG_STA_CONNECT; - else - de_digtable->cursta_cstate = DIG_STA_DISCONNECT; - - /* adjust initial gain according to false alarm counter */ - if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) - value_igi--; - else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1) - value_igi += 0; - else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2) - value_igi++; - else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2) - value_igi += 2; - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", - de_digtable->large_fa_hit, de_digtable->forbidden_igi); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n", - de_digtable->recover_cnt, de_digtable->rx_gain_min); - - /* deal with abnormally large false alarm */ - if (falsealm_cnt->cnt_all > 10000) { - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_DIG(): Abnormally false alarm case\n"); - - de_digtable->large_fa_hit++; - if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) { - de_digtable->forbidden_igi = de_digtable->cur_igvalue; - de_digtable->large_fa_hit = 1; - } - if (de_digtable->large_fa_hit >= 3) { - if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX) - de_digtable->rx_gain_min = DM_DIG_MAX; - else - de_digtable->rx_gain_min = - (de_digtable->forbidden_igi + 1); - de_digtable->recover_cnt = 3600; /* 3600=2hr */ - } - } else { - /* Recovery mechanism for IGI lower bound */ - if (de_digtable->recover_cnt != 0) { - de_digtable->recover_cnt--; - } else { - if (de_digtable->large_fa_hit == 0) { - if ((de_digtable->forbidden_igi - 1) < - DM_DIG_FA_LOWER) { - de_digtable->forbidden_igi = - DM_DIG_FA_LOWER; - de_digtable->rx_gain_min = - DM_DIG_FA_LOWER; - - } else { - de_digtable->forbidden_igi--; - de_digtable->rx_gain_min = - (de_digtable->forbidden_igi + 1); - } - } else if (de_digtable->large_fa_hit == 3) { - de_digtable->large_fa_hit = 0; - } - } - } - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", - de_digtable->large_fa_hit, de_digtable->forbidden_igi); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n", - de_digtable->recover_cnt, de_digtable->rx_gain_min); - - if (value_igi > DM_DIG_MAX) - value_igi = DM_DIG_MAX; - else if (value_igi < de_digtable->rx_gain_min) - value_igi = de_digtable->rx_gain_min; - de_digtable->cur_igvalue = value_igi; - rtl92d_dm_write_dig(hw); - if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) - rtl92d_dm_cck_packet_detection_thresh(hw); - rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n"); -} - static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -579,626 +140,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw) } } -void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.current_turbo_edca = false; - rtlpriv->dm.is_any_nonbepkts = false; - rtlpriv->dm.is_cur_rdlstate = false; -} - -static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - const u32 edca_be_ul = 0x5ea42b; - const u32 edca_be_dl = 0x5ea42b; - static u64 last_txok_cnt; - static u64 last_rxok_cnt; - u64 cur_txok_cnt; - u64 cur_rxok_cnt; - - if (mac->link_state != MAC80211_LINKED) { - rtlpriv->dm.current_turbo_edca = false; - goto exit; - } - - if ((!rtlpriv->dm.is_any_nonbepkts) && - (!rtlpriv->dm.disable_framebursting)) { - cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; - cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; - if (cur_rxok_cnt > 4 * cur_txok_cnt) { - if (!rtlpriv->dm.is_cur_rdlstate || - !rtlpriv->dm.current_turbo_edca) { - rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, - edca_be_dl); - rtlpriv->dm.is_cur_rdlstate = true; - } - } else { - if (rtlpriv->dm.is_cur_rdlstate || - !rtlpriv->dm.current_turbo_edca) { - rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, - edca_be_ul); - rtlpriv->dm.is_cur_rdlstate = false; - } - } - rtlpriv->dm.current_turbo_edca = true; - } else { - if (rtlpriv->dm.current_turbo_edca) { - u8 tmp = AC0_BE; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, - &tmp); - rtlpriv->dm.current_turbo_edca = false; - } - } - -exit: - rtlpriv->dm.is_any_nonbepkts = false; - last_txok_cnt = rtlpriv->stats.txbytesunicast; - last_rxok_cnt = rtlpriv->stats.rxbytesunicast; -} - -static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 index_mapping[RX_INDEX_MAPPING_NUM] = { - 0x0f, 0x0f, 0x0d, 0x0c, 0x0b, - 0x0a, 0x09, 0x08, 0x07, 0x06, - 0x05, 0x04, 0x04, 0x03, 0x02 - }; - int i; - u32 u4tmp; - - u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter - - rtlpriv->dm.thermalvalue_rxgain)]) << 12; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "===> Rx Gain %x\n", u4tmp); - for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++) - rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK, - (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp); -} - -static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg, - u8 *cck_index_old) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - int i; - unsigned long flag = 0; - long temp_cck; - const u8 *cckswing; - - /* Query CCK default setting From 0xa24 */ - rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); - temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, - MASKDWORD) & MASKCCK; - rtl92d_release_cckandrw_pagea_ctl(hw, &flag); - for (i = 0; i < CCK_TABLE_LENGTH; i++) { - if (rtlpriv->dm.cck_inch14) - cckswing = &cckswing_table_ch14[i][2]; - else - cckswing = &cckswing_table_ch1ch13[i][2]; - - if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) { - *cck_index_old = (u8)i; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", - RCCK0_TXFILTER2, temp_cck, - *cck_index_old, - rtlpriv->dm.cck_inch14); - break; - } - } - *temp_cckg = temp_cck; -} - -static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index, - bool *internal_pa, u8 thermalvalue, u8 delta, - u8 rf, struct rtl_efuse *rtlefuse, - struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy, - const u8 index_mapping[5][INDEX_MAPPING_NUM], - const u8 index_mapping_pa[8][INDEX_MAPPING_NUM]) -{ - int i; - u8 index; - u8 offset = 0; - - for (i = 0; i < rf; i++) { - if (rtlhal->macphymode == DUALMAC_DUALPHY && - rtlhal->interfaceindex == 1) /* MAC 1 5G */ - *internal_pa = rtlefuse->internal_pa_5g[1]; - else - *internal_pa = rtlefuse->internal_pa_5g[i]; - if (*internal_pa) { - if (rtlhal->interfaceindex == 1 || i == rf) - offset = 4; - else - offset = 0; - if (rtlphy->current_channel >= 100 && - rtlphy->current_channel <= 165) - offset += 2; - } else { - if (rtlhal->interfaceindex == 1 || i == rf) - offset = 2; - else - offset = 0; - } - if (thermalvalue > rtlefuse->eeprom_thermalmeter) - offset++; - if (*internal_pa) { - if (delta > INDEX_MAPPING_NUM - 1) - index = index_mapping_pa[offset] - [INDEX_MAPPING_NUM - 1]; - else - index = - index_mapping_pa[offset][delta]; - } else { - if (delta > INDEX_MAPPING_NUM - 1) - index = - index_mapping[offset][INDEX_MAPPING_NUM - 1]; - else - index = index_mapping[offset][delta]; - } - if (thermalvalue > rtlefuse->eeprom_thermalmeter) { - if (*internal_pa && thermalvalue > 0x12) { - ofdm_index[i] = rtlpriv->dm.ofdm_index[i] - - ((delta / 2) * 3 + (delta % 2)); - } else { - ofdm_index[i] -= index; - } - } else { - ofdm_index[i] += index; - } - } -} - -static void rtl92d_dm_txpower_tracking_callback_thermalmeter( - struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain; - u8 offset, thermalvalue_avg_count = 0; - u32 thermalvalue_avg = 0; - bool internal_pa = false; - long ele_a = 0, ele_d, temp_cck, val_x, value32; - long val_y, ele_c = 0; - u8 ofdm_index[2]; - s8 cck_index = 0; - u8 ofdm_index_old[2] = {0, 0}; - s8 cck_index_old = 0; - u8 index; - int i; - bool is2t = IS_92D_SINGLEPHY(rtlhal->version); - u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf; - u8 indexforchannel = - rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel); - static const u8 index_mapping[5][INDEX_MAPPING_NUM] = { - /* 5G, path A/MAC 0, decrease power */ - {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, - /* 5G, path A/MAC 0, increase power */ - {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, - /* 5G, path B/MAC 1, decrease power */ - {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18}, - /* 5G, path B/MAC 1, increase power */ - {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, - /* 2.4G, for decreas power */ - {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10}, - }; - static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = { - /* 5G, path A/MAC 0, ch36-64, decrease power */ - {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, - /* 5G, path A/MAC 0, ch36-64, increase power */ - {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, - /* 5G, path A/MAC 0, ch100-165, decrease power */ - {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15}, - /* 5G, path A/MAC 0, ch100-165, increase power */ - {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18}, - /* 5G, path B/MAC 1, ch36-64, decrease power */ - {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16}, - /* 5G, path B/MAC 1, ch36-64, increase power */ - {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, - /* 5G, path B/MAC 1, ch100-165, decrease power */ - {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14}, - /* 5G, path B/MAC 1, ch100-165, increase power */ - {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18}, - }; - - rtlpriv->dm.txpower_trackinginit = true; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n"); - thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800); - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", - thermalvalue, - rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter); - rtl92d_phy_ap_calibrate(hw, (thermalvalue - - rtlefuse->eeprom_thermalmeter)); - - if (!thermalvalue) - goto exit; - - if (is2t) - rf = 2; - else - rf = 1; - - if (rtlpriv->dm.thermalvalue && !rtlhal->reloadtxpowerindex) - goto old_index_done; - - ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; - for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { - if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_index_old[0] = (u8)i; - - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", - ROFDM0_XATXIQIMBALANCE, - ele_d, ofdm_index_old[0]); - break; - } - } - if (is2t) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, - MASKDWORD) & MASKOFDM_D; - for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { - if (ele_d == - (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_index_old[1] = (u8)i; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, - DBG_LOUD, - "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n", - ROFDM0_XBTXIQIMBALANCE, ele_d, - ofdm_index_old[1]); - break; - } - } - } - if (rtlhal->current_bandtype == BAND_ON_2_4G) { - rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old); - } else { - temp_cck = 0x090e1317; - cck_index_old = 12; - } - - if (!rtlpriv->dm.thermalvalue) { - rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter; - rtlpriv->dm.thermalvalue_lck = thermalvalue; - rtlpriv->dm.thermalvalue_iqk = thermalvalue; - rtlpriv->dm.thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter; - for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; - rtlpriv->dm.cck_index = cck_index_old; - } - if (rtlhal->reloadtxpowerindex) { - for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; - rtlpriv->dm.cck_index = cck_index_old; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "reload ofdm index for band switch\n"); - } -old_index_done: - for (i = 0; i < rf; i++) - ofdm_index[i] = rtlpriv->dm.ofdm_index[i]; - - rtlpriv->dm.thermalvalue_avg - [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue; - rtlpriv->dm.thermalvalue_avg_index++; - if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM) - rtlpriv->dm.thermalvalue_avg_index = 0; - for (i = 0; i < AVG_THERMAL_NUM; i++) { - if (rtlpriv->dm.thermalvalue_avg[i]) { - thermalvalue_avg += rtlpriv->dm.thermalvalue_avg[i]; - thermalvalue_avg_count++; - } - } - if (thermalvalue_avg_count) - thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); - if (rtlhal->reloadtxpowerindex) { - delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? - (thermalvalue - rtlefuse->eeprom_thermalmeter) : - (rtlefuse->eeprom_thermalmeter - thermalvalue); - rtlhal->reloadtxpowerindex = false; - rtlpriv->dm.done_txpower = false; - } else if (rtlpriv->dm.done_txpower) { - delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? - (thermalvalue - rtlpriv->dm.thermalvalue) : - (rtlpriv->dm.thermalvalue - thermalvalue); - } else { - delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? - (thermalvalue - rtlefuse->eeprom_thermalmeter) : - (rtlefuse->eeprom_thermalmeter - thermalvalue); - } - delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? - (thermalvalue - rtlpriv->dm.thermalvalue_lck) : - (rtlpriv->dm.thermalvalue_lck - thermalvalue); - delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? - (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : - (rtlpriv->dm.thermalvalue_iqk - thermalvalue); - delta_rxgain = - (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ? - (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) : - (rtlpriv->dm.thermalvalue_rxgain - thermalvalue); - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", - thermalvalue, rtlpriv->dm.thermalvalue, - rtlefuse->eeprom_thermalmeter, delta, delta_lck, - delta_iqk); - if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) { - rtlpriv->dm.thermalvalue_lck = thermalvalue; - rtl92d_phy_lc_calibrate(hw); - } - - if (delta == 0 || !rtlpriv->dm.txpower_track_control) - goto check_delta; - - rtlpriv->dm.done_txpower = true; - delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? - (thermalvalue - rtlefuse->eeprom_thermalmeter) : - (rtlefuse->eeprom_thermalmeter - thermalvalue); - if (rtlhal->current_bandtype == BAND_ON_2_4G) { - offset = 4; - if (delta > INDEX_MAPPING_NUM - 1) - index = index_mapping[offset][INDEX_MAPPING_NUM - 1]; - else - index = index_mapping[offset][delta]; - if (thermalvalue > rtlpriv->dm.thermalvalue) { - for (i = 0; i < rf; i++) - ofdm_index[i] -= delta; - cck_index -= delta; - } else { - for (i = 0; i < rf; i++) - ofdm_index[i] += index; - cck_index += index; - } - } else if (rtlhal->current_bandtype == BAND_ON_5G) { - rtl92d_bandtype_5G(rtlhal, ofdm_index, - &internal_pa, thermalvalue, - delta, rf, rtlefuse, rtlpriv, - rtlphy, index_mapping, - index_mapping_internal_pa); - } - if (is2t) { - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n", - rtlpriv->dm.ofdm_index[0], - rtlpriv->dm.ofdm_index[1], - rtlpriv->dm.cck_index); - } else { - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "temp OFDM_A_index=0x%x,cck_index = 0x%x\n", - rtlpriv->dm.ofdm_index[0], - rtlpriv->dm.cck_index); - } - for (i = 0; i < rf; i++) { - if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) { - ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1; - } else if (internal_pa || - rtlhal->current_bandtype == BAND_ON_2_4G) { - if (ofdm_index[i] < ofdm_min_index_internal_pa) - ofdm_index[i] = ofdm_min_index_internal_pa; - } else if (ofdm_index[i] < ofdm_min_index) { - ofdm_index[i] = ofdm_min_index; - } - } - if (rtlhal->current_bandtype == BAND_ON_2_4G) { - if (cck_index > CCK_TABLE_SIZE - 1) { - cck_index = CCK_TABLE_SIZE - 1; - } else if (cck_index < 0) { - cck_index = 0; - } - } - if (is2t) { - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n", - ofdm_index[0], ofdm_index[1], - cck_index); - } else { - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "new OFDM_A_index=0x%x,cck_index = 0x%x\n", - ofdm_index[0], cck_index); - } - ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; - val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0]; - val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1]; - if (val_x != 0) { - if ((val_x & 0x00000200) != 0) - val_x = val_x | 0xFFFFFC00; - ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; - - /* new element C = element D x Y */ - if ((val_y & 0x00000200) != 0) - val_y = val_y | 0xFFFFFC00; - ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; - - /* write new elements A, C, D to regC80 and - * regC94, element B is always 0 - */ - value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD, value32); - - value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, - value32); - - value32 = ((val_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), - value32); - - } else { - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD, - ofdmswing_table[(u8)ofdm_index[0]]); - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, - 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(24), 0x00); - } - - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n", - rtlhal->interfaceindex, - val_x, val_y, ele_a, ele_c, ele_d, - val_x, val_y); - - if (cck_index >= CCK_TABLE_SIZE) - cck_index = CCK_TABLE_SIZE - 1; - if (cck_index < 0) - cck_index = 0; - if (rtlhal->current_bandtype == BAND_ON_2_4G) { - /* Adjust CCK according to IQK result */ - if (!rtlpriv->dm.cck_inch14) { - rtl_write_byte(rtlpriv, 0xa22, - cckswing_table_ch1ch13[cck_index][0]); - rtl_write_byte(rtlpriv, 0xa23, - cckswing_table_ch1ch13[cck_index][1]); - rtl_write_byte(rtlpriv, 0xa24, - cckswing_table_ch1ch13[cck_index][2]); - rtl_write_byte(rtlpriv, 0xa25, - cckswing_table_ch1ch13[cck_index][3]); - rtl_write_byte(rtlpriv, 0xa26, - cckswing_table_ch1ch13[cck_index][4]); - rtl_write_byte(rtlpriv, 0xa27, - cckswing_table_ch1ch13[cck_index][5]); - rtl_write_byte(rtlpriv, 0xa28, - cckswing_table_ch1ch13[cck_index][6]); - rtl_write_byte(rtlpriv, 0xa29, - cckswing_table_ch1ch13[cck_index][7]); - } else { - rtl_write_byte(rtlpriv, 0xa22, - cckswing_table_ch14[cck_index][0]); - rtl_write_byte(rtlpriv, 0xa23, - cckswing_table_ch14[cck_index][1]); - rtl_write_byte(rtlpriv, 0xa24, - cckswing_table_ch14[cck_index][2]); - rtl_write_byte(rtlpriv, 0xa25, - cckswing_table_ch14[cck_index][3]); - rtl_write_byte(rtlpriv, 0xa26, - cckswing_table_ch14[cck_index][4]); - rtl_write_byte(rtlpriv, 0xa27, - cckswing_table_ch14[cck_index][5]); - rtl_write_byte(rtlpriv, 0xa28, - cckswing_table_ch14[cck_index][6]); - rtl_write_byte(rtlpriv, 0xa29, - cckswing_table_ch14[cck_index][7]); - } - } - if (is2t) { - ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22; - val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4]; - val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5]; - if (val_x != 0) { - if ((val_x & 0x00000200) != 0) - /* consider minus */ - val_x = val_x | 0xFFFFFC00; - ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; - /* new element C = element D x Y */ - if ((val_y & 0x00000200) != 0) - val_y = val_y | 0xFFFFFC00; - ele_c = ((val_y * ele_d) >> 8) & 0x00003FF; - /* write new elements A, C, D to regC88 - * and regC9C, element B is always 0 - */ - value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; - rtl_set_bbreg(hw, - ROFDM0_XBTXIQIMBALANCE, - MASKDWORD, value32); - value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, - MASKH4BITS, value32); - value32 = ((val_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(28), value32); - } else { - rtl_set_bbreg(hw, - ROFDM0_XBTXIQIMBALANCE, - MASKDWORD, - ofdmswing_table[ofdm_index[1]]); - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, - MASKH4BITS, 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(28), 0x00); - } - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n", - val_x, val_y, ele_a, ele_c, - ele_d, val_x, val_y); - } - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n", - rtl_get_bbreg(hw, 0xc80, MASKDWORD), - rtl_get_bbreg(hw, 0xc94, MASKDWORD), - rtl_get_rfreg(hw, RF90_PATH_A, 0x24, - RFREG_OFFSET_MASK)); - -check_delta: - if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) { - rtl92d_phy_reset_iqk_result(hw); - rtlpriv->dm.thermalvalue_iqk = thermalvalue; - rtl92d_phy_iq_calibrate(hw); - } - if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G && - thermalvalue <= rtlefuse->eeprom_thermalmeter) { - rtlpriv->dm.thermalvalue_rxgain = thermalvalue; - rtl92d_dm_rxgain_tracking_thermalmeter(hw); - } - if (rtlpriv->dm.txpower_track_control) - rtlpriv->dm.thermalvalue = thermalvalue; - -exit: - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n"); -} - -static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.txpower_tracking = true; - rtlpriv->dm.txpower_trackinginit = false; - rtlpriv->dm.txpower_track_control = true; - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "pMgntInfo->txpower_tracking = %d\n", - rtlpriv->dm.txpower_tracking); -} - -void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (!rtlpriv->dm.txpower_tracking) - return; - - if (!rtlpriv->dm.tm_trigger) { - rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | - BIT(16), 0x03); - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Trigger 92S Thermal Meter!!\n"); - rtlpriv->dm.tm_trigger = 1; - return; - } else { - rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - "Schedule TxPowerTracking direct call!!\n"); - rtl92d_dm_txpower_tracking_callback_thermalmeter(hw); - rtlpriv->dm.tm_trigger = 0; - } -} - -void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *ra = &(rtlpriv->ra); - - ra->ratr_state = DM_RATR_STA_INIT; - ra->pre_ratr_state = DM_RATR_STA_INIT; - if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) - rtlpriv->dm.useramask = true; - else - rtlpriv->dm.useramask = false; -} - -void rtl92d_dm_init(struct ieee80211_hw *hw) +void rtl92de_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1212,7 +154,7 @@ void rtl92d_dm_init(struct ieee80211_hw *hw) rtl92d_dm_initialize_txpower_tracking(hw); } -void rtl92d_dm_watchdog(struct ieee80211_hw *hw) +void rtl92de_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool fw_current_inpsmode = false; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h index 939cc45bfebd..beade227b442 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h @@ -4,94 +4,7 @@ #ifndef __RTL92C_DM_H__ #define __RTL92C_DM_H__ -#define HAL_DM_DIG_DISABLE BIT(0) -#define HAL_DM_HIPWR_DISABLE BIT(1) - -#define OFDM_TABLE_LENGTH 37 -#define OFDM_TABLE_SIZE_92D 43 -#define CCK_TABLE_LENGTH 33 - -#define CCK_TABLE_SIZE 33 - -#define BW_AUTO_SWITCH_HIGH_LOW 25 -#define BW_AUTO_SWITCH_LOW_HIGH 30 - -#define DM_DIG_FA_UPPER 0x32 -#define DM_DIG_FA_LOWER 0x20 -#define DM_DIG_FA_TH0 0x100 -#define DM_DIG_FA_TH1 0x400 -#define DM_DIG_FA_TH2 0x600 - -#define RXPATHSELECTION_SS_TH_LOW 30 -#define RXPATHSELECTION_DIFF_TH 18 - -#define DM_RATR_STA_INIT 0 -#define DM_RATR_STA_HIGH 1 -#define DM_RATR_STA_MIDDLE 2 -#define DM_RATR_STA_LOW 3 - -#define CTS2SELF_THVAL 30 -#define REGC38_TH 20 - -#define WAIOTTHVAL 25 - -#define TXHIGHPWRLEVEL_NORMAL 0 -#define TXHIGHPWRLEVEL_LEVEL1 1 -#define TXHIGHPWRLEVEL_LEVEL2 2 -#define TXHIGHPWRLEVEL_BT1 3 -#define TXHIGHPWRLEVEL_BT2 4 - -#define DM_TYPE_BYFW 0 -#define DM_TYPE_BYDRIVER 1 - -#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 -#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 -#define INDEX_MAPPING_NUM 13 - -struct swat { - u8 failure_cnt; - u8 try_flag; - u8 stop_trying; - long pre_rssi; - long trying_threshold; - u8 cur_antenna; - u8 pre_antenna; -}; - -enum tag_dynamic_init_gain_operation_type_definition { - DIG_TYPE_THRESH_HIGH = 0, - DIG_TYPE_THRESH_LOW = 1, - DIG_TYPE_BACKOFF = 2, - DIG_TYPE_RX_GAIN_MIN = 3, - DIG_TYPE_RX_GAIN_MAX = 4, - DIG_TYPE_ENABLE = 5, - DIG_TYPE_DISABLE = 6, - DIG_OP_TYPE_MAX -}; - -enum dm_1r_cca { - CCA_1R = 0, - CCA_2R = 1, - CCA_MAX = 2, -}; - -enum dm_rf { - RF_SAVE = 0, - RF_NORMAL = 1, - RF_MAX = 2, -}; - -enum dm_sw_ant_switch { - ANS_ANTENNA_B = 1, - ANS_ANTENNA_A = 2, - ANS_ANTENNA_MAX = 3, -}; - -void rtl92d_dm_init(struct ieee80211_hw *hw); -void rtl92d_dm_watchdog(struct ieee80211_hw *hw); -void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); -void rtl92d_dm_write_dig(struct ieee80211_hw *hw); -void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw); -void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl92de_dm_init(struct ieee80211_hw *hw); +void rtl92de_dm_watchdog(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index e1fb29962801..c8444a72ff69 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -5,157 +5,12 @@ #include "../pci.h" #include "../base.h" #include "../efuse.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/fw_common.h" #include "fw.h" #include "sw.h" -static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv) -{ - return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ? - true : false; -} - -static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 tmp; - - if (enable) { - tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); - rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); - rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); - } else { - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); - rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); - /* Reserved for fw extension. - * 0x81[7] is used for mac0 status , - * so don't write this reg here - * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/ - } -} - -static void _rtl92d_write_fw(struct ieee80211_hw *hw, - enum version_8192d version, u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 *bufferptr = buffer; - u32 pagenums, remainsize; - u32 page, offset; - - rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) - rtl_fill_dummy(bufferptr, &size); - pagenums = size / FW_8192D_PAGE_SIZE; - remainsize = size % FW_8192D_PAGE_SIZE; - if (pagenums > 8) - pr_err("Page numbers should not greater then 8\n"); - for (page = 0; page < pagenums; page++) { - offset = page * FW_8192D_PAGE_SIZE; - rtl_fw_page_write(hw, page, (bufferptr + offset), - FW_8192D_PAGE_SIZE); - } - if (remainsize) { - offset = pagenums * FW_8192D_PAGE_SIZE; - page = pagenums; - rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); - } -} - -static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 counter = 0; - u32 value32; - - do { - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && - (!(value32 & FWDL_CHKSUM_RPT))); - if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { - pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", - value32); - return -EIO; - } - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - value32 |= MCUFWDL_RDY; - rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); - return 0; -} - -void rtl92d_firmware_selfreset(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 u1b_tmp; - u8 delay = 100; - - /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ - rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); - u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - while (u1b_tmp & BIT(2)) { - delay--; - if (delay == 0) - break; - udelay(50); - u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - } - WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n"); - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "=====> 8051 reset success (%d)\n", delay); -} - -static int _rtl92d_fw_init(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 counter; - - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n"); - /* polling for FW ready */ - counter = 0; - do { - if (rtlhal->interfaceindex == 0) { - if (rtl_read_byte(rtlpriv, FW_MAC0_READY) & - MAC0_READY) { - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", - rtl_read_byte(rtlpriv, - FW_MAC0_READY)); - return 0; - } - udelay(5); - } else { - if (rtl_read_byte(rtlpriv, FW_MAC1_READY) & - MAC1_READY) { - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready success!! REG_MCUFWDL: 0x%x\n", - rtl_read_byte(rtlpriv, - FW_MAC1_READY)); - return 0; - } - udelay(5); - } - } while (counter++ < POLLING_READY_TIMEOUT_COUNT); - - if (rtlhal->interfaceindex == 0) { - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n", - rtl_read_byte(rtlpriv, FW_MAC0_READY)); - } else { - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n", - rtl_read_byte(rtlpriv, FW_MAC1_READY)); - } - rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", - rtl_read_dword(rtlpriv, REG_MCUFWDL)); - return -1; -} - int rtl92d_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -189,7 +44,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) } spin_lock_irqsave(&globalmutex_for_fwdownload, flags); - fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); + fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else @@ -202,7 +57,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) for (count = 0; count < 5000; count++) { udelay(500); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); - fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv); + fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv); if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5)) fwdl_in_process = true; else @@ -237,11 +92,11 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) rtl92d_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); } - _rtl92d_enable_fw_download(hw, true); - _rtl92d_write_fw(hw, version, pfwdata, fwsize); - _rtl92d_enable_fw_download(hw, false); + rtl92d_enable_fw_download(hw, true); + rtl92d_write_fw(hw, version, pfwdata, fwsize); + rtl92d_enable_fw_download(hw, false); spin_lock_irqsave(&globalmutex_for_fwdownload, flags); - err = _rtl92d_fw_free_to_go(hw); + err = rtl92d_fw_free_to_go(hw); /* download fw over,clear 0x1f[5] */ value = rtl_read_byte(rtlpriv, 0x1f); value &= (~BIT(5)); @@ -250,207 +105,10 @@ int rtl92d_download_fw(struct ieee80211_hw *hw) if (err) pr_err("fw is not ready to run!\n"); exit: - err = _rtl92d_fw_init(hw); + err = rtl92d_fw_init(hw); return err; } -static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 val_hmetfr; - bool result = false; - - val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); - if (((val_hmetfr >> boxnum) & BIT(0)) == 0) - result = true; - return result; -} - -static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *cmdbuffer) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - u8 boxnum; - u16 box_reg = 0, box_extreg = 0; - u8 u1b_tmp; - bool isfw_read = false; - u8 buf_index = 0; - bool bwrite_success = false; - u8 wait_h2c_limmit = 100; - u8 wait_writeh2c_limmit = 100; - u8 boxcontent[4], boxextcontent[2]; - u32 h2c_waitcounter = 0; - unsigned long flag; - u8 idx; - - if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) { - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Return as RF is off!!!\n"); - return; - } - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); - while (true) { - spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); - if (rtlhal->h2c_setinprogress) { - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "H2C set in progress! Wait to set..element_id(%d)\n", - element_id); - - while (rtlhal->h2c_setinprogress) { - spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, - flag); - h2c_waitcounter++; - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Wait 100 us (%d times)...\n", - h2c_waitcounter); - udelay(100); - - if (h2c_waitcounter > 1000) - return; - - spin_lock_irqsave(&rtlpriv->locks.h2c_lock, - flag); - } - spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); - } else { - rtlhal->h2c_setinprogress = true; - spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); - break; - } - } - while (!bwrite_success) { - wait_writeh2c_limmit--; - if (wait_writeh2c_limmit == 0) { - pr_err("Write H2C fail because no trigger for FW INT!\n"); - break; - } - boxnum = rtlhal->last_hmeboxnum; - switch (boxnum) { - case 0: - box_reg = REG_HMEBOX_0; - box_extreg = REG_HMEBOX_EXT_0; - break; - case 1: - box_reg = REG_HMEBOX_1; - box_extreg = REG_HMEBOX_EXT_1; - break; - case 2: - box_reg = REG_HMEBOX_2; - box_extreg = REG_HMEBOX_EXT_2; - break; - case 3: - box_reg = REG_HMEBOX_3; - box_extreg = REG_HMEBOX_EXT_3; - break; - default: - pr_err("switch case %#x not processed\n", - boxnum); - break; - } - isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); - while (!isfw_read) { - wait_h2c_limmit--; - if (wait_h2c_limmit == 0) { - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting too long for FW read clear HMEBox(%d)!\n", - boxnum); - break; - } - udelay(10); - isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum); - u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF); - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n", - boxnum, u1b_tmp); - } - if (!isfw_read) { - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n", - boxnum); - break; - } - memset(boxcontent, 0, sizeof(boxcontent)); - memset(boxextcontent, 0, sizeof(boxextcontent)); - boxcontent[0] = element_id; - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "Write element_id box_reg(%4x) = %2x\n", - box_reg, element_id); - switch (cmd_len) { - case 1: - boxcontent[0] &= ~(BIT(7)); - memcpy(boxcontent + 1, cmdbuffer + buf_index, 1); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg + idx, - boxcontent[idx]); - break; - case 2: - boxcontent[0] &= ~(BIT(7)); - memcpy(boxcontent + 1, cmdbuffer + buf_index, 2); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg + idx, - boxcontent[idx]); - break; - case 3: - boxcontent[0] &= ~(BIT(7)); - memcpy(boxcontent + 1, cmdbuffer + buf_index, 3); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg + idx, - boxcontent[idx]); - break; - case 4: - boxcontent[0] |= (BIT(7)); - memcpy(boxextcontent, cmdbuffer + buf_index, 2); - memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2); - for (idx = 0; idx < 2; idx++) - rtl_write_byte(rtlpriv, box_extreg + idx, - boxextcontent[idx]); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg + idx, - boxcontent[idx]); - break; - case 5: - boxcontent[0] |= (BIT(7)); - memcpy(boxextcontent, cmdbuffer + buf_index, 2); - memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3); - for (idx = 0; idx < 2; idx++) - rtl_write_byte(rtlpriv, box_extreg + idx, - boxextcontent[idx]); - for (idx = 0; idx < 4; idx++) - rtl_write_byte(rtlpriv, box_reg + idx, - boxcontent[idx]); - break; - default: - pr_err("switch case %#x not processed\n", - cmd_len); - break; - } - bwrite_success = true; - rtlhal->last_hmeboxnum = boxnum + 1; - if (rtlhal->last_hmeboxnum == 4) - rtlhal->last_hmeboxnum = 0; - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, - "pHalData->last_hmeboxnum = %d\n", - rtlhal->last_hmeboxnum); - } - spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); - rtlhal->h2c_setinprogress = false; - spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); - rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); -} - -void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, - u8 element_id, u32 cmd_len, u8 *cmdbuffer) -{ - u32 tmp_cmdbuf[2]; - - memset(tmp_cmdbuf, 0, 8); - memcpy(tmp_cmdbuf, cmdbuffer, cmd_len); - _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf); - return; -} - static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { @@ -599,7 +257,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; - u8 u1rsvdpageloc[3] = { 0 }; + u8 u1rsvdpageloc[3] = { PROBERSP_PG, PSPOLL_PG, NULL_PG }; bool dlok = false; u8 *beacon; u8 *p_pspoll; @@ -618,7 +276,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data ---------------------------------------------------------*/ @@ -626,7 +283,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ @@ -634,7 +290,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", @@ -663,11 +318,3 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!\n"); } - -void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) -{ - u8 u1_joinbssrpt_parm[1] = {0}; - - SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); - rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h index 7f0a17c1a9ea..9e1385ac17b1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h @@ -4,44 +4,7 @@ #ifndef __RTL92D__FW__H__ #define __RTL92D__FW__H__ -#define FW_8192D_START_ADDRESS 0x1000 -#define FW_8192D_PAGE_SIZE 4096 -#define FW_8192D_POLLING_TIMEOUT_COUNT 1000 - -#define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \ - (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \ - (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \ - (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \ - (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \ - (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3) - -/* Firmware Header(8-byte alinment required) */ -/* --- LONG WORD 0 ---- */ -#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \ - le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0)) -#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \ - le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0)) -#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \ - le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16)) - -#define pagenum_128(_len) \ - (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0)) - -#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ - *(u8 *)__ph2ccmd = __val; -#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \ - *(u8 *)__ph2ccmd = __val; -#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ - *(u8 *)(__ph2ccmd + 1) = __val; -#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ - *(u8 *)(__ph2ccmd + 2) = __val; - int rtl92d_download_fw(struct ieee80211_hw *hw); -void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, - u32 cmd_len, u8 *p_cmdbuffer); -void rtl92d_firmware_selfreset(struct ieee80211_hw *hw); void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); -void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 4ba42f6be3f2..73b81e60cfa9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -8,8 +8,12 @@ #include "../cam.h" #include "../ps.h" #include "../pci.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/fw_common.h" +#include "../rtl8192d/hw_common.h" +#include "../rtl8192d/phy_common.h" #include "phy.h" #include "dm.h" #include "fw.h" @@ -50,34 +54,6 @@ static void _rtl92de_set_bcn_ctrl_reg(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); } -static void _rtl92de_stop_tx_beacon(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 tmp1byte; - - tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); - rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff); - rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); - tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); - tmp1byte &= ~(BIT(0)); - rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); -} - -static void _rtl92de_resume_tx_beacon(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 tmp1byte; - - tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); - rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); - rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a); - rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); - tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); - tmp1byte |= BIT(0); - rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); -} - static void _rtl92de_enable_bcn_sub_func(struct ieee80211_hw *hw) { _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(1)); @@ -90,58 +66,14 @@ static void _rtl92de_disable_bcn_sub_func(struct ieee80211_hw *hw) void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); switch (variable) { case HW_VAR_RCR: *((u32 *) (val)) = rtlpci->receive_config; break; - case HW_VAR_RF_STATE: - *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; - break; - case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfstate; - u32 val_rcr; - - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *)(&rfstate)); - if (rfstate == ERFOFF) { - *((bool *) (val)) = true; - } else { - val_rcr = rtl_read_dword(rtlpriv, REG_RCR); - val_rcr &= 0x00070000; - if (val_rcr) - *((bool *) (val)) = false; - else - *((bool *) (val)) = true; - } - break; - } - case HW_VAR_FW_PSMODE_STATUS: - *((bool *) (val)) = ppsc->fw_current_inpsmode; - break; - case HW_VAR_CORRECT_TSF:{ - u64 tsf; - u32 *ptsf_low = (u32 *)&tsf; - u32 *ptsf_high = ((u32 *)&tsf) + 1; - - *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); - *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); - *((u64 *) (val)) = tsf; - break; - } - case HW_VAR_INT_MIGRATION: - *((bool *)(val)) = rtlpriv->dm.interrupt_migration; - break; - case HW_VAR_INT_AC: - *((bool *)(val)) = rtlpriv->dm.disable_tx_int; - break; - case HAL_DEF_WOWLAN: - break; default: - pr_err("switch case %#x not processed\n", variable); + rtl92d_get_hw_reg(hw, variable, val); break; } } @@ -151,141 +83,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - u8 idx; switch (variable) { - case HW_VAR_ETHER_ADDR: - for (idx = 0; idx < ETH_ALEN; idx++) { - rtl_write_byte(rtlpriv, (REG_MACID + idx), - val[idx]); - } - break; - case HW_VAR_BASIC_RATE: { - u16 rate_cfg = ((u16 *) val)[0]; - u8 rate_index = 0; - - rate_cfg = rate_cfg & 0x15f; - if (mac->vendor == PEER_CISCO && - ((rate_cfg & 0x150) == 0)) - rate_cfg |= 0x01; - rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); - rtl_write_byte(rtlpriv, REG_RRSR + 1, - (rate_cfg >> 8) & 0xff); - while (rate_cfg > 0x1) { - rate_cfg = (rate_cfg >> 1); - rate_index++; - } - if (rtlhal->fw_version > 0xe) - rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, - rate_index); - break; - } - case HW_VAR_BSSID: - for (idx = 0; idx < ETH_ALEN; idx++) { - rtl_write_byte(rtlpriv, (REG_BSSID + idx), - val[idx]); - } - break; - case HW_VAR_SIFS: - rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); - rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); - rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); - rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); - if (!mac->ht_enable) - rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, - 0x0e0e); - else - rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, - *((u16 *) val)); - break; - case HW_VAR_SLOT_TIME: { - u8 e_aci; - - rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, - "HW_VAR_SLOT_TIME %x\n", val[0]); - rtl_write_byte(rtlpriv, REG_SLOT, val[0]); - for (e_aci = 0; e_aci < AC_MAX; e_aci++) - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_AC_PARAM, - (&e_aci)); - break; - } - case HW_VAR_ACK_PREAMBLE: { - u8 reg_tmp; - u8 short_preamble = (bool) (*val); - - reg_tmp = (mac->cur_40_prime_sc) << 5; - if (short_preamble) - reg_tmp |= 0x80; - rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); - break; - } - case HW_VAR_AMPDU_MIN_SPACE: { - u8 min_spacing_to_set; - - min_spacing_to_set = *val; - if (min_spacing_to_set <= 7) { - mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | - min_spacing_to_set); - *val = min_spacing_to_set; - rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", - mac->min_space_cfg); - rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, - mac->min_space_cfg); - } - break; - } - case HW_VAR_SHORTGI_DENSITY: { - u8 density_to_set; - - density_to_set = *val; - mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg; - mac->min_space_cfg |= (density_to_set << 3); - rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_SHORTGI_DENSITY: %#x\n", - mac->min_space_cfg); - rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, - mac->min_space_cfg); - break; - } - case HW_VAR_AMPDU_FACTOR: { - u8 factor_toset; - u32 regtoset; - u8 *ptmp_byte = NULL; - u8 index; - - if (rtlhal->macphymode == DUALMAC_DUALPHY) - regtoset = 0xb9726641; - else if (rtlhal->macphymode == DUALMAC_SINGLEPHY) - regtoset = 0x66626641; - else - regtoset = 0xb972a841; - factor_toset = *val; - if (factor_toset <= 3) { - factor_toset = (1 << (factor_toset + 2)); - if (factor_toset > 0xf) - factor_toset = 0xf; - for (index = 0; index < 4; index++) { - ptmp_byte = (u8 *)(®toset) + index; - if ((*ptmp_byte & 0xf0) > - (factor_toset << 4)) - *ptmp_byte = (*ptmp_byte & 0x0f) - | (factor_toset << 4); - if ((*ptmp_byte & 0x0f) > factor_toset) - *ptmp_byte = (*ptmp_byte & 0xf0) - | (factor_toset); - } - rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset); - rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, - "Set HW_VAR_AMPDU_FACTOR: %#x\n", - factor_toset); - } - break; - } case HW_VAR_AC_PARAM: { u8 e_aci = *val; rtl92d_dm_init_edca_turbo(hw); @@ -346,37 +145,6 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]); rtlpci->receive_config = ((u32 *) (val))[0]; break; - case HW_VAR_RETRY_LIMIT: { - u8 retry_limit = val[0]; - - rtl_write_word(rtlpriv, REG_RL, - retry_limit << RETRY_LIMIT_SHORT_SHIFT | - retry_limit << RETRY_LIMIT_LONG_SHIFT); - break; - } - case HW_VAR_DUAL_TSF_RST: - rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); - break; - case HW_VAR_EFUSE_BYTES: - rtlefuse->efuse_usedbytes = *((u16 *) val); - break; - case HW_VAR_EFUSE_USAGE: - rtlefuse->efuse_usedpercentage = *val; - break; - case HW_VAR_IO_CMD: - rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val)); - break; - case HW_VAR_WPA_CONFIG: - rtl_write_byte(rtlpriv, REG_SECCFG, *val); - break; - case HW_VAR_SET_RPWM: - rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val)); - break; - case HW_VAR_H2C_FW_PWRMODE: - break; - case HW_VAR_FW_PSMODE_STATUS: - ppsc->fw_current_inpsmode = *((bool *) val); - break; case HW_VAR_H2C_FW_JOINBSSRPT: { u8 mstatus = (*val); u8 tmp_regcr, tmp_reg422; @@ -409,19 +177,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl92d_set_fw_joinbss_report_cmd(hw, (*val)); break; } - case HW_VAR_AID: { - u16 u2btmp; - u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); - u2btmp &= 0xC000; - rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | - mac->assoc_id)); - break; - } case HW_VAR_CORRECT_TSF: { u8 btype_ibss = val[0]; if (btype_ibss) - _rtl92de_stop_tx_beacon(hw); + rtl92de_stop_tx_beacon(hw); _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3)); rtl_write_dword(rtlpriv, REG_TSFTR, (u32) (mac->tsf & 0xffffffff)); @@ -429,7 +189,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) (u32) ((mac->tsf >> 32) & 0xffffffff)); _rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0); if (btype_ibss) - _rtl92de_resume_tx_beacon(hw); + rtl92de_resume_tx_beacon(hw); break; } @@ -472,34 +232,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } default: - pr_err("switch case %#x not processed\n", variable); + rtl92d_set_hw_reg(hw, variable, val); break; } } -static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - bool status = true; - long count = 0; - u32 value = _LLT_INIT_ADDR(address) | - _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); - - rtl_write_dword(rtlpriv, REG_LLT_INIT, value); - do { - value = rtl_read_dword(rtlpriv, REG_LLT_INIT); - if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) - break; - if (count > POLLING_LLT_THRESHOLD) { - pr_err("Failed to polling write LLT done at address %d!\n", - address); - status = false; - break; - } - } while (++count); - return status; -} - static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -558,13 +295,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) /* 18. LLT_table_init(Adapter); */ for (i = 0; i < (txpktbuf_bndy - 1); i++) { - status = _rtl92de_llt_write(hw, i, i + 1); + status = rtl92de_llt_write(hw, i, i + 1); if (!status) return status; } /* end of list */ - status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + status = rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); if (!status) return status; @@ -573,13 +310,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) /* config this MAC as two MAC transfer. */ /* Otherwise used as local loopback buffer. */ for (i = txpktbuf_bndy; i < maxpage; i++) { - status = _rtl92de_llt_write(hw, i, (i + 1)); + status = rtl92de_llt_write(hw, i, (i + 1)); if (!status) return status; } /* Let last entry point to the start entry of ring buffer */ - status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy); + status = rtl92de_llt_write(hw, maxpage, txpktbuf_bndy); if (!status) return status; @@ -842,32 +579,6 @@ static void _rtl92de_enable_aspm_back_door(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, 0x352, 0x1); } -void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 sec_reg_value; - - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", - rtlpriv->sec.pairwise_enc_algorithm, - rtlpriv->sec.group_enc_algorithm); - if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, - "not open hw encryption\n"); - return; - } - sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE; - if (rtlpriv->sec.use_defaultkey) { - sec_reg_value |= SCR_TXUSEDK; - sec_reg_value |= SCR_RXUSEDK; - } - sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); - rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); - rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, - "The SECR-value %x\n", sec_reg_value); - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); -} - int rtl92de_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -991,11 +702,11 @@ int rtl92de_hw_init(struct ieee80211_hw *hw) _rtl92de_enable_aspm_back_door(hw); /* rtlpriv->intf_ops->enable_aspm(hw); */ - rtl92d_dm_init(hw); + rtl92de_dm_init(hw); rtlpci->being_init_adapter = false; if (ppsc->rfpwr_state == ERFON) { - rtl92d_phy_lc_calibrate(hw); + rtl92d_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version)); /* 5G and 2.4G must wait sometime to let RF LO ready */ if (rtlhal->macphymode == DUALMAC_DUALPHY) { u32 tmp_rega; @@ -1020,23 +731,6 @@ int rtl92de_hw_init(struct ieee80211_hw *hw) return err; } -static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY; - u32 value32; - - value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); - if (!(value32 & 0x000f0000)) { - version = VERSION_TEST_CHIP_92D_SINGLEPHY; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n"); - } else { - version = VERSION_NORMAL_CHIP_92D_SINGLEPHY; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n"); - } - return version; -} - static int _rtl92de_set_media_status(struct ieee80211_hw *hw, enum nl80211_iftype type) { @@ -1048,11 +742,11 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw, if (type == NL80211_IFTYPE_UNSPECIFIED || type == NL80211_IFTYPE_STATION) { - _rtl92de_stop_tx_beacon(hw); + rtl92de_stop_tx_beacon(hw); _rtl92de_enable_bcn_sub_func(hw); } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { - _rtl92de_resume_tx_beacon(hw); + rtl92de_resume_tx_beacon(hw); _rtl92de_disable_bcn_sub_func(hw); } else { rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, @@ -1152,13 +846,6 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw) } } -/* don't set REG_EDCA_BE_PARAM here because - * mac80211 will send pkt when scan */ -void rtl92de_set_qos(struct ieee80211_hw *hw, int aci) -{ - rtl92d_dm_init_edca_turbo(hw); -} - void rtl92de_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1383,825 +1070,6 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw, rtl92de_enable_interrupt(hw); } -static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo, - u8 *rom_content, bool autoloadfail) -{ - u32 rfpath, eeaddr, group, offset1, offset2; - u8 i; - - memset(pwrinfo, 0, sizeof(struct txpower_info)); - if (autoloadfail) { - for (group = 0; group < CHANNEL_GROUP_MAX; group++) { - for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { - if (group < CHANNEL_GROUP_MAX_2G) { - pwrinfo->cck_index[rfpath][group] = - EEPROM_DEFAULT_TXPOWERLEVEL_2G; - pwrinfo->ht40_1sindex[rfpath][group] = - EEPROM_DEFAULT_TXPOWERLEVEL_2G; - } else { - pwrinfo->ht40_1sindex[rfpath][group] = - EEPROM_DEFAULT_TXPOWERLEVEL_5G; - } - pwrinfo->ht40_2sindexdiff[rfpath][group] = - EEPROM_DEFAULT_HT40_2SDIFF; - pwrinfo->ht20indexdiff[rfpath][group] = - EEPROM_DEFAULT_HT20_DIFF; - pwrinfo->ofdmindexdiff[rfpath][group] = - EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; - pwrinfo->ht40maxoffset[rfpath][group] = - EEPROM_DEFAULT_HT40_PWRMAXOFFSET; - pwrinfo->ht20maxoffset[rfpath][group] = - EEPROM_DEFAULT_HT20_PWRMAXOFFSET; - } - } - for (i = 0; i < 3; i++) { - pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI; - pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI; - } - return; - } - - /* Maybe autoload OK,buf the tx power index value is not filled. - * If we find it, we set it to default value. */ - for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { - for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) { - eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3) - + group; - pwrinfo->cck_index[rfpath][group] = - (rom_content[eeaddr] == 0xFF) ? - (eeaddr > 0x7B ? - EEPROM_DEFAULT_TXPOWERLEVEL_5G : - EEPROM_DEFAULT_TXPOWERLEVEL_2G) : - rom_content[eeaddr]; - } - } - for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { - for (group = 0; group < CHANNEL_GROUP_MAX; group++) { - offset1 = group / 3; - offset2 = group % 3; - eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3) + - offset2 + offset1 * 21; - pwrinfo->ht40_1sindex[rfpath][group] = - (rom_content[eeaddr] == 0xFF) ? (eeaddr > 0x7B ? - EEPROM_DEFAULT_TXPOWERLEVEL_5G : - EEPROM_DEFAULT_TXPOWERLEVEL_2G) : - rom_content[eeaddr]; - } - } - /* These just for 92D efuse offset. */ - for (group = 0; group < CHANNEL_GROUP_MAX; group++) { - for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { - int base1 = EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G; - - offset1 = group / 3; - offset2 = group % 3; - - if (rom_content[base1 + offset2 + offset1 * 21] != 0xFF) - pwrinfo->ht40_2sindexdiff[rfpath][group] = - (rom_content[base1 + - offset2 + offset1 * 21] >> (rfpath * 4)) - & 0xF; - else - pwrinfo->ht40_2sindexdiff[rfpath][group] = - EEPROM_DEFAULT_HT40_2SDIFF; - if (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset2 - + offset1 * 21] != 0xFF) - pwrinfo->ht20indexdiff[rfpath][group] = - (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G - + offset2 + offset1 * 21] >> (rfpath * 4)) - & 0xF; - else - pwrinfo->ht20indexdiff[rfpath][group] = - EEPROM_DEFAULT_HT20_DIFF; - if (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset2 - + offset1 * 21] != 0xFF) - pwrinfo->ofdmindexdiff[rfpath][group] = - (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G - + offset2 + offset1 * 21] >> (rfpath * 4)) - & 0xF; - else - pwrinfo->ofdmindexdiff[rfpath][group] = - EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; - if (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset2 - + offset1 * 21] != 0xFF) - pwrinfo->ht40maxoffset[rfpath][group] = - (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G - + offset2 + offset1 * 21] >> (rfpath * 4)) - & 0xF; - else - pwrinfo->ht40maxoffset[rfpath][group] = - EEPROM_DEFAULT_HT40_PWRMAXOFFSET; - if (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset2 - + offset1 * 21] != 0xFF) - pwrinfo->ht20maxoffset[rfpath][group] = - (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G + - offset2 + offset1 * 21] >> (rfpath * 4)) & - 0xF; - else - pwrinfo->ht20maxoffset[rfpath][group] = - EEPROM_DEFAULT_HT20_PWRMAXOFFSET; - } - } - if (rom_content[EEPROM_TSSI_A_5G] != 0xFF) { - /* 5GL */ - pwrinfo->tssi_a[0] = rom_content[EEPROM_TSSI_A_5G] & 0x3F; - pwrinfo->tssi_b[0] = rom_content[EEPROM_TSSI_B_5G] & 0x3F; - /* 5GM */ - pwrinfo->tssi_a[1] = rom_content[EEPROM_TSSI_AB_5G] & 0x3F; - pwrinfo->tssi_b[1] = - (rom_content[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 | - (rom_content[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2; - /* 5GH */ - pwrinfo->tssi_a[2] = (rom_content[EEPROM_TSSI_AB_5G + 1] & - 0xF0) >> 4 | - (rom_content[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4; - pwrinfo->tssi_b[2] = (rom_content[EEPROM_TSSI_AB_5G + 2] & - 0xFC) >> 2; - } else { - for (i = 0; i < 3; i++) { - pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI; - pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI; - } - } -} - -static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, - bool autoload_fail, u8 *hwinfo) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct txpower_info pwrinfo; - u8 tempval[2], i, pwr, diff; - u32 ch, rfpath, group; - - _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail); - if (!autoload_fail) { - /* bit0~2 */ - rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7); - rtlefuse->eeprom_thermalmeter = - hwinfo[EEPROM_THERMAL_METER] & 0x1f; - rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K]; - tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03; - tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2; - rtlefuse->txpwr_fromeprom = true; - if (IS_92D_D_CUT(rtlpriv->rtlhal.version) || - IS_92D_E_CUT(rtlpriv->rtlhal.version)) { - rtlefuse->internal_pa_5g[0] = - !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6); - rtlefuse->internal_pa_5g[1] = - !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6); - rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, - "Is D cut,Internal PA0 %d Internal PA1 %d\n", - rtlefuse->internal_pa_5g[0], - rtlefuse->internal_pa_5g[1]); - } - rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6]; - rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7]; - } else { - rtlefuse->eeprom_regulatory = 0; - rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; - rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP; - tempval[0] = tempval[1] = 3; - } - - /* Use default value to fill parameters if - * efuse is not filled on some place. */ - - /* ThermalMeter from EEPROM */ - if (rtlefuse->eeprom_thermalmeter < 0x06 || - rtlefuse->eeprom_thermalmeter > 0x1c) - rtlefuse->eeprom_thermalmeter = 0x12; - rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; - - /* check XTAL_K */ - if (rtlefuse->crystalcap == 0xFF) - rtlefuse->crystalcap = 0; - if (rtlefuse->eeprom_regulatory > 3) - rtlefuse->eeprom_regulatory = 0; - - for (i = 0; i < 2; i++) { - switch (tempval[i]) { - case 0: - tempval[i] = 5; - break; - case 1: - tempval[i] = 4; - break; - case 2: - tempval[i] = 3; - break; - case 3: - default: - tempval[i] = 0; - break; - } - } - - rtlefuse->delta_iqk = tempval[0]; - if (tempval[1] > 0) - rtlefuse->delta_lck = tempval[1] - 1; - if (rtlefuse->eeprom_c9 == 0xFF) - rtlefuse->eeprom_c9 = 0x00; - rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, - "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory); - rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, - "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); - rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, - "CrystalCap = 0x%x\n", rtlefuse->crystalcap); - rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, - "Delta_IQK = 0x%x Delta_LCK = 0x%x\n", - rtlefuse->delta_iqk, rtlefuse->delta_lck); - - for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { - for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { - group = rtl92d_get_chnlgroup_fromarray((u8) ch); - if (ch < CHANNEL_MAX_NUMBER_2G) - rtlefuse->txpwrlevel_cck[rfpath][ch] = - pwrinfo.cck_index[rfpath][group]; - rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] = - pwrinfo.ht40_1sindex[rfpath][group]; - rtlefuse->txpwr_ht20diff[rfpath][ch] = - pwrinfo.ht20indexdiff[rfpath][group]; - rtlefuse->txpwr_legacyhtdiff[rfpath][ch] = - pwrinfo.ofdmindexdiff[rfpath][group]; - rtlefuse->pwrgroup_ht20[rfpath][ch] = - pwrinfo.ht20maxoffset[rfpath][group]; - rtlefuse->pwrgroup_ht40[rfpath][ch] = - pwrinfo.ht40maxoffset[rfpath][group]; - pwr = pwrinfo.ht40_1sindex[rfpath][group]; - diff = pwrinfo.ht40_2sindexdiff[rfpath][group]; - rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] = - (pwr > diff) ? (pwr - diff) : 0; - } - } -} - -static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw, - u8 *content) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 macphy_crvalue = content[EEPROM_MAC_FUNCTION]; - - if (macphy_crvalue & BIT(3)) { - rtlhal->macphymode = SINGLEMAC_SINGLEPHY; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "MacPhyMode SINGLEMAC_SINGLEPHY\n"); - } else { - rtlhal->macphymode = DUALMAC_DUALPHY; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "MacPhyMode DUALMAC_DUALPHY\n"); - } -} - -static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw, - u8 *content) -{ - _rtl92de_read_macphymode_from_prom(hw, content); - rtl92d_phy_config_macphymode(hw); - rtl92d_phy_config_macphymode_info(hw); -} - -static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - enum version_8192d chipver = rtlpriv->rtlhal.version; - u8 cutvalue[2]; - u16 chipvalue; - - read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]); - read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]); - chipvalue = (cutvalue[1] << 8) | cutvalue[0]; - switch (chipvalue) { - case 0xAA55: - chipver |= CHIP_92D_C_CUT; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n"); - break; - case 0x9966: - chipver |= CHIP_92D_D_CUT; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); - break; - case 0xCC33: - chipver |= CHIP_92D_E_CUT; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n"); - break; - default: - chipver |= CHIP_92D_D_CUT; - pr_err("Unknown CUT!\n"); - break; - } - rtlpriv->rtlhal.version = chipver; -} - -static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, - EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D, - EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, - COUNTRY_CODE_WORLD_WIDE_13}; - int i; - u16 usvalue; - u8 *hwinfo; - - hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); - if (!hwinfo) - return; - - if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) - goto exit; - - _rtl92de_efuse_update_chip_version(hw); - _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); - - /* Read Permanent MAC address for 2nd interface */ - if (rtlhal->interfaceindex != 0) { - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i]; - *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; - } - } - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, - rtlefuse->dev_addr); - rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); - _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo); - - /* Read Channel Plan */ - switch (rtlhal->bandset) { - case BAND_ON_2_4G: - rtlefuse->channel_plan = COUNTRY_CODE_TELEC; - break; - case BAND_ON_5G: - rtlefuse->channel_plan = COUNTRY_CODE_FCC; - break; - case BAND_ON_BOTH: - rtlefuse->channel_plan = COUNTRY_CODE_FCC; - break; - default: - rtlefuse->channel_plan = COUNTRY_CODE_FCC; - break; - } - rtlefuse->txpwr_fromeprom = true; -exit: - kfree(hwinfo); -} - -void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 tmp_u1b; - - rtlhal->version = _rtl92de_read_chip_version(hw); - tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); - rtlefuse->autoload_status = tmp_u1b; - if (tmp_u1b & BIT(4)) { - rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); - rtlefuse->epromtype = EEPROM_93C46; - } else { - rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); - rtlefuse->epromtype = EEPROM_BOOT_EFUSE; - } - if (tmp_u1b & BIT(5)) { - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - - rtlefuse->autoload_failflag = false; - _rtl92de_read_adapter_info(hw); - } else { - pr_err("Autoload ERR!!\n"); - } - return; -} - -static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 ratr_value; - u8 ratr_index = 0; - u8 nmode = mac->ht_enable; - u8 mimo_ps = IEEE80211_SMPS_OFF; - u16 shortgi_rate; - u32 tmp_ratr_value; - u8 curtxbw_40mhz = mac->bw_40; - u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; - u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; - enum wireless_mode wirelessmode = mac->mode; - - if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_value = sta->deflink.supp_rates[1] << 4; - else - ratr_value = sta->deflink.supp_rates[0]; - ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | - sta->deflink.ht_cap.mcs.rx_mask[0] << 12); - switch (wirelessmode) { - case WIRELESS_MODE_A: - ratr_value &= 0x00000FF0; - break; - case WIRELESS_MODE_B: - if (ratr_value & 0x0000000c) - ratr_value &= 0x0000000d; - else - ratr_value &= 0x0000000f; - break; - case WIRELESS_MODE_G: - ratr_value &= 0x00000FF5; - break; - case WIRELESS_MODE_N_24G: - case WIRELESS_MODE_N_5G: - nmode = 1; - if (mimo_ps == IEEE80211_SMPS_STATIC) { - ratr_value &= 0x0007F005; - } else { - u32 ratr_mask; - - if (get_rf_type(rtlphy) == RF_1T2R || - get_rf_type(rtlphy) == RF_1T1R) { - ratr_mask = 0x000ff005; - } else { - ratr_mask = 0x0f0ff005; - } - - ratr_value &= ratr_mask; - } - break; - default: - if (rtlphy->rf_type == RF_1T2R) - ratr_value &= 0x000ff0ff; - else - ratr_value &= 0x0f0ff0ff; - - break; - } - ratr_value &= 0x0FFFFFFF; - if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || - (!curtxbw_40mhz && curshortgi_20mhz))) { - ratr_value |= 0x10000000; - tmp_ratr_value = (ratr_value >> 12); - for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { - if ((1 << shortgi_rate) & tmp_ratr_value) - break; - } - shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | - (shortgi_rate << 4) | (shortgi_rate); - } - rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); - rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", - rtl_read_dword(rtlpriv, REG_ARFR0)); -} - -static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level, bool update_bw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_sta_info *sta_entry = NULL; - u32 ratr_bitmap; - u8 ratr_index; - u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0; - u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? - 1 : 0; - u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? - 1 : 0; - enum wireless_mode wirelessmode = 0; - bool shortgi = false; - u32 value[2]; - u8 macid = 0; - u8 mimo_ps = IEEE80211_SMPS_OFF; - - sta_entry = (struct rtl_sta_info *) sta->drv_priv; - mimo_ps = sta_entry->mimo_ps; - wirelessmode = sta_entry->wireless_mode; - if (mac->opmode == NL80211_IFTYPE_STATION) - curtxbw_40mhz = mac->bw_40; - else if (mac->opmode == NL80211_IFTYPE_AP || - mac->opmode == NL80211_IFTYPE_ADHOC) - macid = sta->aid + 1; - - if (rtlhal->current_bandtype == BAND_ON_5G) - ratr_bitmap = sta->deflink.supp_rates[1] << 4; - else - ratr_bitmap = sta->deflink.supp_rates[0]; - ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 | - sta->deflink.ht_cap.mcs.rx_mask[0] << 12); - switch (wirelessmode) { - case WIRELESS_MODE_B: - ratr_index = RATR_INX_WIRELESS_B; - if (ratr_bitmap & 0x0000000c) - ratr_bitmap &= 0x0000000d; - else - ratr_bitmap &= 0x0000000f; - break; - case WIRELESS_MODE_G: - ratr_index = RATR_INX_WIRELESS_GB; - - if (rssi_level == 1) - ratr_bitmap &= 0x00000f00; - else if (rssi_level == 2) - ratr_bitmap &= 0x00000ff0; - else - ratr_bitmap &= 0x00000ff5; - break; - case WIRELESS_MODE_A: - ratr_index = RATR_INX_WIRELESS_G; - ratr_bitmap &= 0x00000ff0; - break; - case WIRELESS_MODE_N_24G: - case WIRELESS_MODE_N_5G: - if (wirelessmode == WIRELESS_MODE_N_24G) - ratr_index = RATR_INX_WIRELESS_NGB; - else - ratr_index = RATR_INX_WIRELESS_NG; - if (mimo_ps == IEEE80211_SMPS_STATIC) { - if (rssi_level == 1) - ratr_bitmap &= 0x00070000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0007f000; - else - ratr_bitmap &= 0x0007f005; - } else { - if (rtlphy->rf_type == RF_1T2R || - rtlphy->rf_type == RF_1T1R) { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x000f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x000ff000; - else - ratr_bitmap &= 0x000ff005; - } - } else { - if (curtxbw_40mhz) { - if (rssi_level == 1) - ratr_bitmap &= 0x0f0f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f0ff000; - else - ratr_bitmap &= 0x0f0ff015; - } else { - if (rssi_level == 1) - ratr_bitmap &= 0x0f0f0000; - else if (rssi_level == 2) - ratr_bitmap &= 0x0f0ff000; - else - ratr_bitmap &= 0x0f0ff005; - } - } - } - if ((curtxbw_40mhz && curshortgi_40mhz) || - (!curtxbw_40mhz && curshortgi_20mhz)) { - - if (macid == 0) - shortgi = true; - else if (macid == 1) - shortgi = false; - } - break; - default: - ratr_index = RATR_INX_WIRELESS_NGB; - - if (rtlphy->rf_type == RF_1T2R) - ratr_bitmap &= 0x000ff0ff; - else - ratr_bitmap &= 0x0f0ff0ff; - break; - } - - value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28); - value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80; - rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, - "ratr_bitmap :%x value0:%x value1:%x\n", - ratr_bitmap, value[0], value[1]); - rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value); - if (macid != 0) - sta_entry->ratr_index = ratr_index; -} - -void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level, bool update_bw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->dm.useramask) - rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw); - else - rtl92de_update_hal_rate_table(hw, sta); -} - -void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u16 sifs_timer; - - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, - &mac->slot_time); - if (!mac->ht_enable) - sifs_timer = 0x0a0a; - else - sifs_timer = 0x1010; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); -} - -bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - enum rf_pwrstate e_rfpowerstate_toset; - u8 u1tmp; - bool actuallyset = false; - unsigned long flag; - - if (rtlpci->being_init_adapter) - return false; - if (ppsc->swrf_processing) - return false; - spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); - if (ppsc->rfchange_inprogress) { - spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); - return false; - } else { - ppsc->rfchange_inprogress = true; - spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); - } - rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv, - REG_MAC_PINMUX_CFG) & ~(BIT(3))); - u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); - e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; - if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) { - rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, - "GPIOChangeRF - HW Radio ON, RF ON\n"); - e_rfpowerstate_toset = ERFON; - ppsc->hwradiooff = false; - actuallyset = true; - } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) { - rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, - "GPIOChangeRF - HW Radio OFF, RF OFF\n"); - e_rfpowerstate_toset = ERFOFF; - ppsc->hwradiooff = true; - actuallyset = true; - } - if (actuallyset) { - spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); - ppsc->rfchange_inprogress = false; - spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); - } else { - if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) - RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); - spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); - ppsc->rfchange_inprogress = false; - spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); - } - *valid = 1; - return !ppsc->hwradiooff; -} - -void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 *macaddr = p_macaddr; - u32 entry_id; - bool is_pairwise = false; - static u8 cam_const_addr[4][6] = { - {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, - {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} - }; - static u8 cam_const_broad[] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - - if (clear_all) { - u8 idx; - u8 cam_offset = 0; - u8 clear_number = 5; - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); - for (idx = 0; idx < clear_number; idx++) { - rtl_cam_mark_invalid(hw, cam_offset + idx); - rtl_cam_empty_entry(hw, cam_offset + idx); - - if (idx < 5) { - memset(rtlpriv->sec.key_buf[idx], 0, - MAX_KEY_LEN); - rtlpriv->sec.key_len[idx] = 0; - } - } - } else { - switch (enc_algo) { - case WEP40_ENCRYPTION: - enc_algo = CAM_WEP40; - break; - case WEP104_ENCRYPTION: - enc_algo = CAM_WEP104; - break; - case TKIP_ENCRYPTION: - enc_algo = CAM_TKIP; - break; - case AESCCMP_ENCRYPTION: - enc_algo = CAM_AES; - break; - default: - pr_err("switch case %#x not processed\n", - enc_algo); - enc_algo = CAM_TKIP; - break; - } - if (is_wepkey || rtlpriv->sec.use_defaultkey) { - macaddr = cam_const_addr[key_index]; - entry_id = key_index; - } else { - if (is_group) { - macaddr = cam_const_broad; - entry_id = key_index; - } else { - if (mac->opmode == NL80211_IFTYPE_AP) { - entry_id = rtl_cam_get_free_entry(hw, - p_macaddr); - if (entry_id >= TOTAL_CAM_ENTRY) { - pr_err("Can not find free hw security cam entry\n"); - return; - } - } else { - entry_id = CAM_PAIRWISE_KEY_POSITION; - } - key_index = PAIRWISE_KEYIDX; - is_pairwise = true; - } - } - if (rtlpriv->sec.key_len[key_index] == 0) { - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, - "delete one entry, entry_id is %d\n", - entry_id); - if (mac->opmode == NL80211_IFTYPE_AP) - rtl_cam_del_entry(hw, p_macaddr); - rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); - } else { - rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, - "The insert KEY length is %d\n", - rtlpriv->sec.key_len[PAIRWISE_KEYIDX]); - rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, - "The insert KEY is %x %x\n", - rtlpriv->sec.key_buf[0][0], - rtlpriv->sec.key_buf[0][1]); - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, - "add one entry\n"); - if (is_pairwise) { - RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD, - "Pairwise Key content", - rtlpriv->sec.pairwise_key, - rtlpriv-> - sec.key_len[PAIRWISE_KEYIDX]); - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, - "set Pairwise key\n"); - rtl_cam_add_one_entry(hw, macaddr, key_index, - entry_id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv-> - sec.key_buf[key_index]); - } else { - rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, - "set group key\n"); - if (mac->opmode == NL80211_IFTYPE_ADHOC) { - rtl_cam_add_one_entry(hw, - rtlefuse->dev_addr, - PAIRWISE_KEYIDX, - CAM_PAIRWISE_KEY_POSITION, - enc_algo, CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf[entry_id]); - } - rtl_cam_add_one_entry(hw, macaddr, key_index, - entry_id, enc_algo, - CAM_CONFIG_NO_USEDK, - rtlpriv->sec.key_buf - [entry_id]); - } - } - } -} - void rtl92de_suspend(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h index ea495216d394..bda4a1a7c91d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h @@ -5,7 +5,6 @@ #define __RTL92DE_HW_H__ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -void rtl92de_read_eeprom_info(struct ieee80211_hw *hw); void rtl92de_interrupt_recognized(struct ieee80211_hw *hw, struct rtl_int *int_vec); int rtl92de_hw_init(struct ieee80211_hw *hw); @@ -14,21 +13,11 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw); void rtl92de_disable_interrupt(struct ieee80211_hw *hw); int rtl92de_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); -void rtl92de_set_qos(struct ieee80211_hw *hw, int aci); void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw); void rtl92de_set_beacon_interval(struct ieee80211_hw *hw); void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw, u32 add_msr, u32 rm_msr); void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_level, - bool update_bw); -void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw); -bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); -void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw); -void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all); void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value, u8 direct); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index 4bd708570992..33aede56c81b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -3,7 +3,7 @@ #include "../wifi.h" #include "../pci.h" -#include "reg.h" +#include "../rtl8192d/reg.h" #include "led.h" void rtl92de_sw_led_on(struct ieee80211_hw *hw, enum rtl_led_pin pin) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index d835a27429f0..d429560009bb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -5,8 +5,11 @@ #include "../pci.h" #include "../ps.h" #include "../core.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/rf_common.h" #include "phy.h" #include "rf.h" #include "dm.h" @@ -21,9 +24,6 @@ #define RF_REG_NUM_FOR_C_CUT_2G 5 #define RF_CHNL_NUM_5G 19 #define RF_CHNL_NUM_5G_40M 17 -#define TARGET_CHNL_NUM_5G 221 -#define TARGET_CHNL_NUM_2G 14 -#define CV_CURVE_CNT 64 static u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = { 0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0 @@ -160,15 +160,6 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = { 25711, 25658, 25606, 25554, 25502, 25451, 25328 }; -static const u8 channel_all[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, 130, - 132, 134, 136, 138, 140, 149, 151, 153, 155, - 157, 159, 161, 163, 165 -}; - u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -235,119 +226,6 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw, regaddr, bitmask, data); } -static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - u32 newoffset; - u32 tmplong, tmplong2; - u8 rfpi_enable = 0; - u32 retvalue; - - newoffset = offset; - tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); - if (rfpath == RF90_PATH_A) - tmplong2 = tmplong; - else - tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); - tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | - (newoffset << 23) | BLSSIREADEDGE; - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong & (~BLSSIREADEDGE)); - udelay(10); - rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); - udelay(50); - udelay(50); - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong | BLSSIREADEDGE); - udelay(10); - if (rfpath == RF90_PATH_A) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); - else if (rfpath == RF90_PATH_B) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, - BIT(8)); - if (rfpi_enable) - retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, - BLSSIREADBACKDATA); - else - retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, - BLSSIREADBACKDATA); - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n", - rfpath, pphyreg->rf_rb, retvalue); - return retvalue; -} - -static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, - u32 offset, u32 data) -{ - u32 data_and_addr; - u32 newoffset; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - - newoffset = offset; - /* T65 RF */ - data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; - rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf3wire_offset, data_and_addr); -} - -u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 regaddr, u32 bitmask) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 original_value, readback_value, bitshift; - - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", - regaddr, rfpath, bitmask); - spin_lock(&rtlpriv->locks.rf_lock); - original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = calculate_bit_shift(bitmask); - readback_value = (original_value & bitmask) >> bitshift; - spin_unlock(&rtlpriv->locks.rf_lock); - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", - regaddr, rfpath, bitmask, original_value); - return readback_value; -} - -void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, - u32 regaddr, u32 bitmask, u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 original_value, bitshift; - - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", - regaddr, bitmask, data, rfpath); - if (bitmask == 0) - return; - spin_lock(&rtlpriv->locks.rf_lock); - if (rtlphy->rf_mode != RF_OP_BY_FW) { - if (bitmask != RFREG_OFFSET_MASK) { - original_value = _rtl92d_phy_rf_serial_read(hw, - rfpath, regaddr); - bitshift = calculate_bit_shift(bitmask); - data = ((original_value & (~bitmask)) | - (data << bitshift)); - } - _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data); - } - spin_unlock(&rtlpriv->locks.rf_lock); - rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", - regaddr, bitmask, data, rfpath); -} - bool rtl92d_phy_mac_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -374,133 +252,6 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw) return true; } -static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - /* RF Interface Sowrtware Control */ - /* 16 LSBs if read 32-bit from 0x870 */ - rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; - /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */ - rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; - /* 16 LSBs if read 32-bit from 0x874 */ - rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; - /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */ - - rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; - /* RF Interface Readback Value */ - /* 16 LSBs if read 32-bit from 0x8E0 */ - rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; - /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */ - rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; - /* 16 LSBs if read 32-bit from 0x8E4 */ - rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; - /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */ - rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; - - /* RF Interface Output (and Enable) */ - /* 16 LSBs if read 32-bit from 0x860 */ - rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; - /* 16 LSBs if read 32-bit from 0x864 */ - rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; - - /* RF Interface (Output and) Enable */ - /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */ - rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; - /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */ - rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; - - /* Addr of LSSI. Wirte RF register by driver */ - /* LSSI Parameter */ - rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = - RFPGA0_XA_LSSIPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = - RFPGA0_XB_LSSIPARAMETER; - - /* RF parameter */ - /* BB Band Select */ - rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER; - - /* Tx AGC Gain Stage (same for all path. Should we remove this?) */ - /* Tx gain stage */ - rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; - /* Tx gain stage */ - rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; - /* Tx gain stage */ - rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; - /* Tx gain stage */ - rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; - - /* Tranceiver A~D HSSI Parameter-1 */ - /* wire control parameter1 */ - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; - /* wire control parameter1 */ - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; - - /* Tranceiver A~D HSSI Parameter-2 */ - /* wire control parameter2 */ - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; - /* wire control parameter2 */ - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; - - /* RF switch Control */ - /* TR/Ant switch control */ - rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - - /* AGC control 1 */ - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; - - /* AGC control 2 */ - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; - - /* RX AFE control 1 */ - rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE; - - /*RX AFE control 1 */ - rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; - - /* Tx AFE control 1 */ - rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; - - /* Tx AFE control 2 */ - rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; - - /* Tranceiver LSSI Readback SI mode */ - rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK; - - /* Tranceiver LSSI Readback PI mode */ - rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK; - rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK; -} - static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype) { @@ -601,58 +352,6 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, return true; } -static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, - u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - int index; - - if (regaddr == RTXAGC_A_RATE18_06) - index = 0; - else if (regaddr == RTXAGC_A_RATE54_24) - index = 1; - else if (regaddr == RTXAGC_A_CCK1_MCS32) - index = 6; - else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) - index = 7; - else if (regaddr == RTXAGC_A_MCS03_MCS00) - index = 2; - else if (regaddr == RTXAGC_A_MCS07_MCS04) - index = 3; - else if (regaddr == RTXAGC_A_MCS11_MCS08) - index = 4; - else if (regaddr == RTXAGC_A_MCS15_MCS12) - index = 5; - else if (regaddr == RTXAGC_B_RATE18_06) - index = 8; - else if (regaddr == RTXAGC_B_RATE54_24) - index = 9; - else if (regaddr == RTXAGC_B_CCK1_55_MCS32) - index = 14; - else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) - index = 15; - else if (regaddr == RTXAGC_B_MCS03_MCS00) - index = 10; - else if (regaddr == RTXAGC_B_MCS07_MCS04) - index = 11; - else if (regaddr == RTXAGC_B_MCS11_MCS08) - index = 12; - else if (regaddr == RTXAGC_B_MCS15_MCS12) - index = 13; - else - return; - - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data; - rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, - "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n", - rtlphy->pwrgroup_cnt, index, - rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]); - if (index == 13) - rtlphy->pwrgroup_cnt++; -} - static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, u8 configtype) { @@ -666,7 +365,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, if (configtype == BASEBAND_CONFIG_PHY_REG) { for (i = 0; i < phy_regarray_pg_len; i = i + 3) { rtl_addr_delay(phy_regarray_table_pg[i]); - _rtl92d_store_pwrindex_diffrate_offset(hw, + rtl92d_store_pwrindex_diffrate_offset(hw, phy_regarray_table_pg[i], phy_regarray_table_pg[i + 1], phy_regarray_table_pg[i + 2]); @@ -726,7 +425,7 @@ bool rtl92d_phy_bb_config(struct ieee80211_hw *hw) u32 regvaldw; u8 value; - _rtl92d_phy_init_bb_rf_register_definition(hw); + rtl92d_phy_init_bb_rf_register_definition(hw); regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) | BIT(0) | BIT(1)); @@ -812,115 +511,6 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, return true; } -void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->default_initialgain[0] = - (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[1] = - (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[2] = - (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[3] = - (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); - rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, - "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3]); - rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, - MASKBYTE0); - rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, - MASKDWORD); - rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, - "Default framesync (0x%x) = 0x%x\n", - ROFDM0_RXDETECTOR3, rtlphy->framesync); -} - -static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel, - u8 *cckpowerlevel, u8 *ofdmpowerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 index = (channel - 1); - - /* 1. CCK */ - if (rtlhal->current_bandtype == BAND_ON_2_4G) { - /* RF-A */ - cckpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_cck[RF90_PATH_A][index]; - /* RF-B */ - cckpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_cck[RF90_PATH_B][index]; - } else { - cckpowerlevel[RF90_PATH_A] = 0; - cckpowerlevel[RF90_PATH_B] = 0; - } - /* 2. OFDM for 1S or 2S */ - if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) { - /* Read HT 40 OFDM TX power */ - ofdmpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index]; - ofdmpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index]; - } else if (rtlphy->rf_type == RF_2T2R) { - /* Read HT 40 OFDM TX power */ - ofdmpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index]; - ofdmpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index]; - } -} - -static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw, - u8 channel, u8 *cckpowerlevel, u8 *ofdmpowerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; - rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; -} - -static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) -{ - u8 place = chnl; - - if (chnl > 14) { - for (place = 14; place < ARRAY_SIZE(channel5g); place++) { - if (channel5g[place] == chnl) { - place++; - break; - } - } - } - return place; -} - -void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) -{ - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 cckpowerlevel[2], ofdmpowerlevel[2]; - - if (!rtlefuse->txpwr_fromeprom) - return; - channel = _rtl92c_phy_get_rightchnlplace(channel); - _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0], - &ofdmpowerlevel[0]); - if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) - _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0], - &ofdmpowerlevel[0]); - if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) - rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); - rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); -} - void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, enum nl80211_channel_type ch_type) { @@ -1122,65 +712,6 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw, rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); } -static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, - u8 rfpath, u32 *pu4_regval) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n"); - /*----Store original RFENV control type----*/ - switch (rfpath) { - case RF90_PATH_A: - case RF90_PATH_C: - *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); - break; - case RF90_PATH_B: - case RF90_PATH_D: - *pu4_regval = - rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); - break; - } - /*----Set RF_ENV enable----*/ - rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); - udelay(1); - /*----Set RF_ENV output high----*/ - rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); - udelay(1); - /* Set bit number of Address and Data for RF register */ - /* Set 1 to 4 bits for 8255 */ - rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0); - udelay(1); - /*Set 0 to 12 bits for 8255 */ - rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); - udelay(1); - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n"); -} - -static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath, - u32 *pu4_regval) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n"); - /*----Restore RFENV control type----*/ - switch (rfpath) { - case RF90_PATH_A: - case RF90_PATH_C: - rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval); - break; - case RF90_PATH_B: - case RF90_PATH_D: - rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, - *pu4_regval); - break; - } - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n"); -} - static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1221,8 +752,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) rtlhal->during_mac1init_radioa = true; /* asume no this case */ if (need_pwr_down) - _rtl92d_phy_enable_rf_env(hw, path, - &u4regvalue); + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); } for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) { if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) { @@ -1253,7 +784,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) RFREG_OFFSET_MASK)); } if (need_pwr_down) - _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); if (rtlhal->during_mac1init_radioa) rtl92d_phy_powerdown_anotherphy(hw, false); if (channel < 149) @@ -1313,8 +844,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) rtlhal->during_mac0init_radiob = true; if (need_pwr_down) - _rtl92d_phy_enable_rf_env(hw, path, - &u4regvalue); + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); } } for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) { @@ -1347,31 +878,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) RFREG_OFFSET_MASK, rf_syn_g4_for_c_cut_2g | (u4tmp << 11)); if (need_pwr_down) - _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); if (rtlhal->during_mac0init_radiob) rtl92d_phy_powerdown_anotherphy(hw, true); } rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); } -u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl) -{ - u8 place; - - if (chnl > 14) { - for (place = 14; place < ARRAY_SIZE(channel_all); place++) { - if (channel_all[place] == chnl) - return place - 13; - } - } - - return 0; -} - -#define MAX_TOLERANCE 5 -#define IQK_DELAY_TIME 1 /* ms */ -#define MAX_TOLERANCE_92D 3 - /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb) { @@ -1636,30 +1149,6 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw) return result; } -static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, - u32 *adda_reg, u32 *adda_backup, - u32 regnum) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n"); - for (i = 0; i < regnum; i++) - adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD); -} - -static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n"); - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); - macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); -} - static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw, u32 *adda_reg, u32 *adda_backup, u32 regnum) @@ -1685,37 +1174,6 @@ static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, macreg[i], macbackup[i]); } -static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw, - u32 *adda_reg, bool patha_on, bool is2t) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 pathon; - u32 i; - - RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n"); - pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4; - if (patha_on) - pathon = rtlpriv->rtlhal.interfaceindex == 0 ? - 0x04db25a4 : 0x0b1b25a4; - for (i = 0; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon); -} - -static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n"); - rtl_write_byte(rtlpriv, macreg[0], 0x3F); - - for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & - (~BIT(3)))); - rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); -} - static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1772,14 +1230,16 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], is2t ? "2T2R" : "1T1R"); /* Save ADDA parameters, turn Path A ADDA on */ - _rtl92d_phy_save_adda_registers(hw, adda_reg, - rtlphy->adda_backup, IQK_ADDA_REG_NUM); - _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); - _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, - rtlphy->iqk_bb_backup, IQK_BB_REG_NUM); - } - _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t); + rtl92d_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + } + rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); @@ -1800,8 +1260,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], 0x00010000); } /* MAC settings */ - _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); + rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); /* Page B init */ rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000); if (is2t) @@ -1841,7 +1301,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], if (is2t) { _rtl92d_phy_patha_standby(hw); /* Turn Path B ADDA on */ - _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); + rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); for (i = 0; i < retrycount; i++) { pathb_ok = _rtl92d_phy_pathb_iqk(hw); if (pathb_ok == 0x03) { @@ -1938,24 +1398,24 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n", is2t ? "2T2R" : "1T1R"); /* Save ADDA parameters, turn Path A ADDA on */ - _rtl92d_phy_save_adda_registers(hw, adda_reg, - rtlphy->adda_backup, - IQK_ADDA_REG_NUM); - _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); + rtl92d_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); if (is2t) - _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, - rtlphy->iqk_bb_backup, - IQK_BB_REG_NUM); + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); else - _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, - rtlphy->iqk_bb_backup, - IQK_BB_REG_NUM - 1); + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM - 1); } - _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t); + rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t); /* MAC settings */ - _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); + rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); if (t == 0) rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, BIT(8)); @@ -2002,7 +1462,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw, if (is2t) { /* _rtl92d_phy_patha_standby(hw); */ /* Turn Path B ADDA on */ - _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); + rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); pathb_ok = _rtl92d_phy_pathb_iqk_5g_normal(hw); if (pathb_ok == 0x03) { RTPRINT(rtlpriv, FINIT, INIT_IQK, @@ -2401,56 +1861,6 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel) rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); } -static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2) -{ - u32 ret; - - if (val1 >= val2) - ret = val1 - val2; - else - ret = val2 - val1; - return ret; -} - -static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) -{ - - int i; - - for (i = 0; i < ARRAY_SIZE(channel5g); i++) - if (channel == channel5g[i]) - return true; - return false; -} - -static void _rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw, - u32 *targetchnl, u32 * curvecount_val, - bool is5g, u32 *curveindex) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 smallest_abs_val = 0xffffffff, u4tmp; - u8 i, j; - u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G; - - for (i = 0; i < chnl_num; i++) { - if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1)) - continue; - curveindex[i] = 0; - for (j = 0; j < (CV_CURVE_CNT * 2); j++) { - u4tmp = _rtl92d_phy_get_abs(targetchnl[i], - curvecount_val[j]); - - if (u4tmp < smallest_abs_val) { - curveindex[i] = j; - smallest_abs_val = u4tmp; - } - } - smallest_abs_val = 0xffffffff; - RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n", - i, curveindex[i]); - } -} - static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, u8 channel) { @@ -2477,12 +1887,12 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, rtlpriv->rtlhal.during_mac1init_radioa = true; /* asume no this case */ if (bneed_powerdown_radio) - _rtl92d_phy_enable_rf_env(hw, erfpath, - &u4regvalue); + rtl92d_phy_enable_rf_env(hw, erfpath, + &u4regvalue); } rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp); if (bneed_powerdown_radio) - _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); + rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); if (rtlpriv->rtlhal.during_mac1init_radioa) rtl92d_phy_powerdown_anotherphy(hw, false); } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) { @@ -2495,15 +1905,15 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, rtl92d_phy_enable_anotherphy(hw, true); rtlpriv->rtlhal.during_mac0init_radiob = true; if (bneed_powerdown_radio) - _rtl92d_phy_enable_rf_env(hw, erfpath, - &u4regvalue); + rtl92d_phy_enable_rf_env(hw, erfpath, + &u4regvalue); } rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp); RTPRINT(rtlpriv, FINIT, INIT_IQK, "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800)); if (bneed_powerdown_radio) - _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); + rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); if (rtlpriv->rtlhal.during_mac0init_radiob) rtl92d_phy_powerdown_anotherphy(hw, true); } @@ -2588,13 +1998,13 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t) readval2); } if (index == 0 && rtlhal->interfaceindex == 0) - _rtl92d_phy_calc_curvindex(hw, targetchnl_5g, - curvecount_val, - true, curveindex_5g); + rtl92d_phy_calc_curvindex(hw, targetchnl_5g, + curvecount_val, + true, curveindex_5g); else - _rtl92d_phy_calc_curvindex(hw, targetchnl_2g, - curvecount_val, - false, curveindex_2g); + rtl92d_phy_calc_curvindex(hw, targetchnl_2g, + curvecount_val, + false, curveindex_2g); /* switch CV-curve control mode */ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7, BIT(17), 0x1); @@ -2622,7 +2032,7 @@ static void _rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) _rtl92d_phy_lc_calibrate_sw(hw, is2t); } -void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw) +void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -2638,12 +2048,9 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw) RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Start!!! currentband %x delay %d ms\n", rtlhal->current_bandtype, timecount); - if (IS_92D_SINGLEPHY(rtlhal->version)) { - _rtl92d_phy_lc_calibrate(hw, true); - } else { - /* For 1T1R */ - _rtl92d_phy_lc_calibrate(hw, false); - } + + _rtl92d_phy_lc_calibrate(hw, is2t); + rtlphy->lck_inprogress = false; RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n"); } @@ -2674,30 +2081,6 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, return true; } -void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 i; - - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "settings regs %zu default regs %d\n", - ARRAY_SIZE(rtlphy->iqk_matrix), - IQK_MATRIX_REG_NUM); - /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */ - for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { - rtlphy->iqk_matrix[i].value[0][0] = 0x100; - rtlphy->iqk_matrix[i].value[0][2] = 0x100; - rtlphy->iqk_matrix[i].value[0][4] = 0x100; - rtlphy->iqk_matrix[i].value[0][6] = 0x100; - rtlphy->iqk_matrix[i].value[0][1] = 0x0; - rtlphy->iqk_matrix[i].value[0][3] = 0x0; - rtlphy->iqk_matrix[i].value[0][5] = 0x0; - rtlphy->iqk_matrix[i].value[0][7] = 0x0; - rtlphy->iqk_matrix[i].iqk_done = false; - } -} - static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay) @@ -2891,74 +2274,6 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw) return 1; } -static void rtl92d_phy_set_io(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *de_digtable = &rtlpriv->dm_digtable; - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, - "--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress); - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1; - rtl92d_dm_write_dig(hw); - rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue; - de_digtable->cur_igvalue = 0x37; - rtl92d_dm_write_dig(hw); - break; - default: - pr_err("switch case %#x not processed\n", - rtlphy->current_io_type); - break; - } - rtlphy->set_io_inprogress = false; - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n", - rtlphy->current_io_type); -} - -bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - bool postprocessing = false; - - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, - "-->IO Cmd(%#x), set_io_inprogress(%d)\n", - iotype, rtlphy->set_io_inprogress); - do { - switch (iotype) { - case IO_CMD_RESUME_DM_BY_SCAN: - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, - "[IO CMD] Resume DM after scan\n"); - postprocessing = true; - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, - "[IO CMD] Pause DM before scan\n"); - postprocessing = true; - break; - default: - pr_err("switch case %#x not processed\n", - iotype); - break; - } - } while (false); - if (postprocessing && !rtlphy->set_io_inprogress) { - rtlphy->set_io_inprogress = true; - rtlphy->current_io_type = iotype; - } else { - return false; - } - rtl92d_phy_set_io(hw); - rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype); - return true; -} - static void _rtl92d_phy_set_rfon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -3141,100 +2456,6 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, return bresult; } -void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 offset = REG_MAC_PHY_CTRL_NORMAL; - - switch (rtlhal->macphymode) { - case DUALMAC_DUALPHY: - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "MacPhyMode: DUALMAC_DUALPHY\n"); - rtl_write_byte(rtlpriv, offset, 0xF3); - break; - case SINGLEMAC_SINGLEPHY: - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "MacPhyMode: SINGLEMAC_SINGLEPHY\n"); - rtl_write_byte(rtlpriv, offset, 0xF4); - break; - case DUALMAC_SINGLEPHY: - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "MacPhyMode: DUALMAC_SINGLEPHY\n"); - rtl_write_byte(rtlpriv, offset, 0xF1); - break; - } -} - -void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - switch (rtlhal->macphymode) { - case DUALMAC_SINGLEPHY: - rtlphy->rf_type = RF_2T2R; - rtlhal->version |= RF_TYPE_2T2R; - rtlhal->bandset = BAND_ON_BOTH; - rtlhal->current_bandtype = BAND_ON_2_4G; - break; - - case SINGLEMAC_SINGLEPHY: - rtlphy->rf_type = RF_2T2R; - rtlhal->version |= RF_TYPE_2T2R; - rtlhal->bandset = BAND_ON_BOTH; - rtlhal->current_bandtype = BAND_ON_2_4G; - break; - - case DUALMAC_DUALPHY: - rtlphy->rf_type = RF_1T1R; - rtlhal->version &= RF_TYPE_1T1R; - /* Now we let MAC0 run on 5G band. */ - if (rtlhal->interfaceindex == 0) { - rtlhal->bandset = BAND_ON_5G; - rtlhal->current_bandtype = BAND_ON_5G; - } else { - rtlhal->bandset = BAND_ON_2_4G; - rtlhal->current_bandtype = BAND_ON_2_4G; - } - break; - default: - break; - } -} - -u8 rtl92d_get_chnlgroup_fromarray(u8 chnl) -{ - u8 group; - - if (channel_all[chnl] <= 3) - group = 0; - else if (channel_all[chnl] <= 9) - group = 1; - else if (channel_all[chnl] <= 14) - group = 2; - else if (channel_all[chnl] <= 44) - group = 3; - else if (channel_all[chnl] <= 54) - group = 4; - else if (channel_all[chnl] <= 64) - group = 5; - else if (channel_all[chnl] <= 112) - group = 6; - else if (channel_all[chnl] <= 126) - group = 7; - else if (channel_all[chnl] <= 140) - group = 8; - else if (channel_all[chnl] <= 153) - group = 9; - else if (channel_all[chnl] <= 159) - group = 10; - else - group = 11; - return group; -} - void rtl92d_phy_set_poweron(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -3286,31 +2507,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw) } } -void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (rtlpriv->rtlhal.macphymode) { - case DUALMAC_DUALPHY: - rtl_write_byte(rtlpriv, REG_DMC, 0x0); - rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08); - rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff); - break; - case DUALMAC_SINGLEPHY: - rtl_write_byte(rtlpriv, REG_DMC, 0xf8); - rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08); - rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff); - break; - case SINGLEMAC_SINGLEPHY: - rtl_write_byte(rtlpriv, REG_DMC, 0x0); - rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10); - rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF); - break; - default: - break; - } -} - void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index 8d07c783a023..bbe9ef77225e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h @@ -10,11 +10,8 @@ #define MAX_DOZE_WAITING_TIMES_9x 64 -#define RT_CANNOT_IO(hw) false #define HIGHPOWER_RADIOA_ARRAYLEN 22 -#define MAX_TOLERANCE 5 - #define APK_BB_REG_NUM 5 #define APK_AFE_REG_NUM 16 #define APK_CURVE_REG_NUM 4 @@ -27,12 +24,8 @@ #define RESET_CNT_LIMIT 3 #define IQK_ADDA_REG_NUM 16 -#define IQK_BB_REG_NUM 10 #define IQK_BB_REG_NUM_test 6 #define IQK_MAC_REG_NUM 4 -#define RX_INDEX_MAPPING_NUM 15 - -#define IQK_DELAY_TIME 1 #define CT_OFFSET_MAC_ADDR 0X16 @@ -68,80 +61,30 @@ struct swchnlcmd { u32 msdelay; }; -enum baseband_config_type { - BASEBAND_CONFIG_PHY_REG = 0, - BASEBAND_CONFIG_AGC_TAB = 1, -}; - -enum rf_content { - radioa_txt = 0, - radiob_txt = 1, - radioc_txt = 2, - radiod_txt = 3 -}; - -static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw, - unsigned long *flag) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->rtlhal.interfaceindex == 1) - spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag); -} - -static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw, - unsigned long *flag) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->rtlhal.interfaceindex == 1) - spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock, - *flag); -} - u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data); -u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 regaddr, - u32 bitmask); -void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 regaddr, - u32 bitmask, u32 data); bool rtl92d_phy_mac_config(struct ieee80211_hw *hw); bool rtl92d_phy_bb_config(struct ieee80211_hw *hw); bool rtl92d_phy_rf_config(struct ieee80211_hw *hw); bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, enum radio_path rfpath); -void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); -void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw, enum nl80211_channel_type ch_type); u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw); bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum rf_content content, enum radio_path rfpath); -bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state); -void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw); -void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw); -u8 rtl92d_get_chnlgroup_fromarray(u8 chnl); void rtl92d_phy_set_poweron(struct ieee80211_hw *hw); -void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw); bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw); -void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw); void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw); -void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw); -void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw, - unsigned long *flag); -void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw, - unsigned long *flag); -u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl); void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 83787fd293de..eb7d8b070cc7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -2,383 +2,14 @@ /* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/phy_common.h" #include "phy.h" #include "rf.h" #include "dm.h" #include "hw.h" -void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 rfpath; - - switch (bandwidth) { - case HT_CHANNEL_WIDTH_20: - for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { - rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval - [rfpath] & 0xfffff3ff) | 0x0400); - rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | - BIT(11), 0x01); - - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, - "20M RF 0x18 = 0x%x\n", - rtlphy->rfreg_chnlval[rfpath]); - } - - break; - case HT_CHANNEL_WIDTH_20_40: - for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { - rtlphy->rfreg_chnlval[rfpath] = - ((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff)); - rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11), - 0x00); - rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, - "40M RF 0x18 = 0x%x\n", - rtlphy->rfreg_chnlval[rfpath]); - } - break; - default: - pr_err("unknown bandwidth: %#X\n", bandwidth); - break; - } -} - -void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 tx_agc[2] = {0, 0}, tmpval; - bool turbo_scanoff = false; - u8 idx1, idx2; - u8 *ptr; - - if (rtlefuse->eeprom_regulatory != 0) - turbo_scanoff = true; - if (mac->act_scanning) { - tx_agc[RF90_PATH_A] = 0x3f3f3f3f; - tx_agc[RF90_PATH_B] = 0x3f3f3f3f; - if (turbo_scanoff) { - for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - tx_agc[idx1] = ppowerlevel[idx1] | - (ppowerlevel[idx1] << 8) | - (ppowerlevel[idx1] << 16) | - (ppowerlevel[idx1] << 24); - } - } - } else { - for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - tx_agc[idx1] = ppowerlevel[idx1] | - (ppowerlevel[idx1] << 8) | - (ppowerlevel[idx1] << 16) | - (ppowerlevel[idx1] << 24); - } - if (rtlefuse->eeprom_regulatory == 0) { - tmpval = (rtlphy->mcs_offset[0][6]) + - (rtlphy->mcs_offset[0][7] << 8); - tx_agc[RF90_PATH_A] += tmpval; - tmpval = (rtlphy->mcs_offset[0][14]) + - (rtlphy->mcs_offset[0][15] << 24); - tx_agc[RF90_PATH_B] += tmpval; - } - } - - for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - ptr = (u8 *) (&(tx_agc[idx1])); - for (idx2 = 0; idx2 < 4; idx2++) { - if (*ptr > RF6052_MAX_TX_PWR) - *ptr = RF6052_MAX_TX_PWR; - ptr++; - } - } - - tmpval = tx_agc[RF90_PATH_A] & 0xff; - rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", - tmpval, RTXAGC_A_CCK1_MCS32); - tmpval = tx_agc[RF90_PATH_A] >> 8; - rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", - tmpval, RTXAGC_B_CCK11_A_CCK2_11); - tmpval = tx_agc[RF90_PATH_B] >> 24; - rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", - tmpval, RTXAGC_B_CCK11_A_CCK2_11); - tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; - rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", - tmpval, RTXAGC_B_CCK1_55_MCS32); -} - -static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel, - u32 *ofdmbase, u32 *mcsbase) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 powerbase0, powerbase1; - u8 legacy_pwrdiff, ht20_pwrdiff; - u8 i, powerlevel[2]; - - for (i = 0; i < 2; i++) { - powerlevel[i] = ppowerlevel[i]; - legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; - powerbase0 = powerlevel[i] + legacy_pwrdiff; - powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | - (powerbase0 << 8) | powerbase0; - *(ofdmbase + i) = powerbase0; - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - " [OFDM power base index rf(%c) = 0x%x]\n", - i == 0 ? 'A' : 'B', *(ofdmbase + i)); - } - - for (i = 0; i < 2; i++) { - if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { - ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; - powerlevel[i] += ht20_pwrdiff; - } - powerbase1 = powerlevel[i]; - powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | - (powerbase1 << 8) | powerbase1; - *(mcsbase + i) = powerbase1; - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - " [MCS power base index rf(%c) = 0x%x]\n", - i == 0 ? 'A' : 'B', *(mcsbase + i)); - } -} - -static u8 _rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex) -{ - u8 group; - u8 channel_info[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, - 134, 136, 138, 140, 149, 151, 153, 155, 157, 159, - 161, 163, 165 - }; - - if (channel_info[chnlindex] <= 3) /* Chanel 1-3 */ - group = 0; - else if (channel_info[chnlindex] <= 9) /* Channel 4-9 */ - group = 1; - else if (channel_info[chnlindex] <= 14) /* Channel 10-14 */ - group = 2; - else if (channel_info[chnlindex] <= 64) - group = 6; - else if (channel_info[chnlindex] <= 140) - group = 7; - else - group = 8; - return group; -} - -static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, - u8 channel, u8 index, - u32 *powerbase0, - u32 *powerbase1, - u32 *p_outwriteval) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 i, chnlgroup = 0, pwr_diff_limit[4]; - u32 writeval = 0, customer_limit, rf; - - for (rf = 0; rf < 2; rf++) { - switch (rtlefuse->eeprom_regulatory) { - case 0: - chnlgroup = 0; - writeval = rtlphy->mcs_offset - [chnlgroup][index + - (rf ? 8 : 0)] + ((index < 2) ? - powerbase0[rf] : - powerbase1[rf]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeval(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeval); - break; - case 1: - if (rtlphy->pwrgroup_cnt == 1) - chnlgroup = 0; - if (rtlphy->pwrgroup_cnt >= MAX_PG_GROUP) { - chnlgroup = _rtl92d_phy_get_chnlgroup_bypg( - channel - 1); - if (rtlphy->current_chan_bw == - HT_CHANNEL_WIDTH_20) - chnlgroup++; - else - chnlgroup += 4; - writeval = rtlphy->mcs_offset - [chnlgroup][index + - (rf ? 8 : 0)] + ((index < 2) ? - powerbase0[rf] : - powerbase1[rf]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeval); - } - break; - case 2: - writeval = ((index < 2) ? powerbase0[rf] : - powerbase1[rf]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Better regulatory, writeval(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeval); - break; - case 3: - chnlgroup = 0; - if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "customer's limit, 40MHz rf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', - rtlefuse->pwrgroup_ht40[rf] - [channel - 1]); - } else { - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "customer's limit, 20MHz rf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', - rtlefuse->pwrgroup_ht20[rf] - [channel - 1]); - } - for (i = 0; i < 4; i++) { - pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset - [chnlgroup][index + (rf ? 8 : 0)] & - (0x7f << (i * 8))) >> (i * 8)); - if (rtlphy->current_chan_bw == - HT_CHANNEL_WIDTH_20_40) { - if (pwr_diff_limit[i] > - rtlefuse->pwrgroup_ht40[rf] - [channel - 1]) - pwr_diff_limit[i] = - rtlefuse->pwrgroup_ht40 - [rf][channel - 1]; - } else { - if (pwr_diff_limit[i] > - rtlefuse->pwrgroup_ht20[rf][ - channel - 1]) - pwr_diff_limit[i] = - rtlefuse->pwrgroup_ht20[rf] - [channel - 1]; - } - } - customer_limit = (pwr_diff_limit[3] << 24) | - (pwr_diff_limit[2] << 16) | - (pwr_diff_limit[1] << 8) | - (pwr_diff_limit[0]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer's limit rf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', customer_limit); - writeval = customer_limit + ((index < 2) ? - powerbase0[rf] : powerbase1[rf]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer, writeval rf(%c)= 0x%x\n", - rf == 0 ? 'A' : 'B', writeval); - break; - default: - chnlgroup = 0; - writeval = rtlphy->mcs_offset[chnlgroup][index + - (rf ? 8 : 0)] + ((index < 2) ? - powerbase0[rf] : powerbase1[rf]); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeval rf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeval); - break; - } - *(p_outwriteval + rf) = writeval; - } -} - -static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw, - u8 index, u32 *pvalue) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - static u16 regoffset_a[6] = { - RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, - RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, - RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 - }; - static u16 regoffset_b[6] = { - RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, - RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, - RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 - }; - u8 i, rf, pwr_val[4]; - u32 writeval; - u16 regoffset; - - for (rf = 0; rf < 2; rf++) { - writeval = pvalue[rf]; - for (i = 0; i < 4; i++) { - pwr_val[i] = (u8) ((writeval & (0x7f << - (i * 8))) >> (i * 8)); - if (pwr_val[i] > RF6052_MAX_TX_PWR) - pwr_val[i] = RF6052_MAX_TX_PWR; - } - writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | - (pwr_val[1] << 8) | pwr_val[0]; - if (rf == 0) - regoffset = regoffset_a[index]; - else - regoffset = regoffset_b[index]; - rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); - RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Set 0x%x = %08x\n", regoffset, writeval); - if (((get_rf_type(rtlphy) == RF_2T2R) && - (regoffset == RTXAGC_A_MCS15_MCS12 || - regoffset == RTXAGC_B_MCS15_MCS12)) || - ((get_rf_type(rtlphy) != RF_2T2R) && - (regoffset == RTXAGC_A_MCS07_MCS04 || - regoffset == RTXAGC_B_MCS07_MCS04))) { - writeval = pwr_val[3]; - if (regoffset == RTXAGC_A_MCS15_MCS12 || - regoffset == RTXAGC_A_MCS07_MCS04) - regoffset = 0xc90; - if (regoffset == RTXAGC_B_MCS15_MCS12 || - regoffset == RTXAGC_B_MCS07_MCS04) - regoffset = 0xc98; - for (i = 0; i < 3; i++) { - if (i != 2) - writeval = (writeval > 8) ? - (writeval - 8) : 0; - else - writeval = (writeval > 6) ? - (writeval - 6) : 0; - rtl_write_byte(rtlpriv, (u32) (regoffset + i), - (u8) writeval); - } - } - } -} - -void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel) -{ - u32 writeval[2], powerbase0[2], powerbase1[2]; - u8 index; - - _rtl92d_phy_get_power_base(hw, ppowerlevel, channel, - &powerbase0[0], &powerbase1[0]); - for (index = 0; index < 6; index++) { - _rtl92d_get_txpower_writeval_by_regulatory(hw, - channel, index, &powerbase0[0], - &powerbase1[0], &writeval[0]); - _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]); - } -} - bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h index 4e646cc9ebc0..c097d90cc99c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h @@ -4,11 +4,6 @@ #ifndef __RTL92D_RF_H__ #define __RTL92D_RF_H__ -void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth); -void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel); -void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel); bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw); bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0); void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index afd685ed460a..5f6311c2aac4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -5,8 +5,12 @@ #include "../core.h" #include "../pci.h" #include "../base.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/hw_common.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/trx_common.h" #include "phy.h" #include "dm.h" #include "hw.h" @@ -207,7 +211,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking, .set_bw_mode = rtl92d_phy_set_bw_mode, .switch_channel = rtl92d_phy_sw_chnl, - .dm_watchdog = rtl92d_dm_watchdog, + .dm_watchdog = rtl92de_dm_watchdog, .scan_operation_backup = rtl_phy_scan_operation_backup, .set_rf_power_state = rtl92d_phy_set_rf_power_state, .led_control = rtl92de_led_control, @@ -223,6 +227,8 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .set_rfreg = rtl92d_phy_set_rf_reg, .linked_set_reg = rtl92d_linked_set_reg, .get_btc_status = rtl_btc_status_false, + .phy_iq_calibrate = rtl92d_phy_iq_calibrate, + .phy_lc_calibrate = rtl92d_phy_lc_calibrate, }; static struct rtl_mod_params rtl92de_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 192982ec8152..2b9b352f7783 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -5,8 +5,10 @@ #include "../pci.h" #include "../base.h" #include "../stats.h" -#include "reg.h" -#include "def.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/trx_common.h" #include "phy.h" #include "trx.h" #include "led.h" @@ -23,434 +25,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } -static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, - u8 signal_strength_index) -{ - long signal_power; - - signal_power = (long)((signal_strength_index + 1) >> 1); - signal_power -= 95; - return signal_power; -} - -static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, - struct rtl_stats *pstats, - struct rx_desc_92d *pdesc, - struct rx_fwinfo_92d *p_drvinfo, - bool packet_match_bssid, - bool packet_toself, - bool packet_beacon) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); - struct phy_sts_cck_8192d *cck_buf; - s8 rx_pwr_all, rx_pwr[4]; - u8 rf_rx_num = 0, evm, pwdb_all; - u8 i, max_spatial_stream; - u32 rssi, total_rssi = 0; - bool is_cck_rate; - - is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); - pstats->packet_matchbssid = packet_match_bssid; - pstats->packet_toself = packet_toself; - pstats->packet_beacon = packet_beacon; - pstats->is_cck = is_cck_rate; - pstats->rx_mimo_sig_qual[0] = -1; - pstats->rx_mimo_sig_qual[1] = -1; - - if (is_cck_rate) { - u8 report, cck_highpwr; - cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo; - if (ppsc->rfpwr_state == ERFON) - cck_highpwr = rtlphy->cck_high_power; - else - cck_highpwr = false; - if (!cck_highpwr) { - u8 cck_agc_rpt = cck_buf->cck_agc_rpt; - report = cck_buf->cck_agc_rpt & 0xc0; - report = report >> 6; - switch (report) { - case 0x3: - rx_pwr_all = -46 - (cck_agc_rpt & 0x3e); - break; - case 0x2: - rx_pwr_all = -26 - (cck_agc_rpt & 0x3e); - break; - case 0x1: - rx_pwr_all = -12 - (cck_agc_rpt & 0x3e); - break; - case 0x0: - rx_pwr_all = 16 - (cck_agc_rpt & 0x3e); - break; - } - } else { - u8 cck_agc_rpt = cck_buf->cck_agc_rpt; - report = p_drvinfo->cfosho[0] & 0x60; - report = report >> 5; - switch (report) { - case 0x3: - rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1); - break; - case 0x2: - rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1); - break; - case 0x1: - rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1); - break; - case 0x0: - rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1); - break; - } - } - pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); - /* CCK gain is smaller than OFDM/MCS gain, */ - /* so we add gain diff by experiences, the val is 6 */ - pwdb_all += 6; - if (pwdb_all > 100) - pwdb_all = 100; - /* modify the offset to make the same gain index with OFDM. */ - if (pwdb_all > 34 && pwdb_all <= 42) - pwdb_all -= 2; - else if (pwdb_all > 26 && pwdb_all <= 34) - pwdb_all -= 6; - else if (pwdb_all > 14 && pwdb_all <= 26) - pwdb_all -= 8; - else if (pwdb_all > 4 && pwdb_all <= 14) - pwdb_all -= 4; - pstats->rx_pwdb_all = pwdb_all; - pstats->recvsignalpower = rx_pwr_all; - if (packet_match_bssid) { - u8 sq; - if (pstats->rx_pwdb_all > 40) { - sq = 100; - } else { - sq = cck_buf->sq_rpt; - if (sq > 64) - sq = 0; - else if (sq < 20) - sq = 100; - else - sq = ((64 - sq) * 100) / 44; - } - pstats->signalquality = sq; - pstats->rx_mimo_sig_qual[0] = sq; - pstats->rx_mimo_sig_qual[1] = -1; - } - } else { - rtlpriv->dm.rfpath_rxenable[0] = true; - rtlpriv->dm.rfpath_rxenable[1] = true; - for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { - if (rtlpriv->dm.rfpath_rxenable[i]) - rf_rx_num++; - rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - - 110; - rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); - total_rssi += rssi; - rtlpriv->stats.rx_snr_db[i] = - (long)(p_drvinfo->rxsnr[i] / 2); - if (packet_match_bssid) - pstats->rx_mimo_signalstrength[i] = (u8) rssi; - } - rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106; - pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); - pstats->rx_pwdb_all = pwdb_all; - pstats->rxpower = rx_pwr_all; - pstats->recvsignalpower = rx_pwr_all; - if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 && - pdesc->rxmcs <= DESC_RATEMCS15) - max_spatial_stream = 2; - else - max_spatial_stream = 1; - for (i = 0; i < max_spatial_stream; i++) { - evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); - if (packet_match_bssid) { - if (i == 0) - pstats->signalquality = - (u8)(evm & 0xff); - pstats->rx_mimo_sig_qual[i] = - (u8)(evm & 0xff); - } - } - } - if (is_cck_rate) - pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw, - pwdb_all)); - else if (rf_rx_num != 0) - pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw, - total_rssi /= rf_rx_num)); -} - -static void rtl92d_loop_over_paths(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u8 rfpath; - - for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; - rfpath++) { - if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { - rtlpriv->stats.rx_rssi_percentage[rfpath] = - pstats->rx_mimo_signalstrength[rfpath]; - - } - if (pstats->rx_mimo_signalstrength[rfpath] > - rtlpriv->stats.rx_rssi_percentage[rfpath]) { - rtlpriv->stats.rx_rssi_percentage[rfpath] = - ((rtlpriv->stats.rx_rssi_percentage[rfpath] * - (RX_SMOOTH_FACTOR - 1)) + - (pstats->rx_mimo_signalstrength[rfpath])) / - (RX_SMOOTH_FACTOR); - rtlpriv->stats.rx_rssi_percentage[rfpath] = - rtlpriv->stats.rx_rssi_percentage[rfpath] + 1; - } else { - rtlpriv->stats.rx_rssi_percentage[rfpath] = - ((rtlpriv->stats.rx_rssi_percentage[rfpath] * - (RX_SMOOTH_FACTOR - 1)) + - (pstats->rx_mimo_signalstrength[rfpath])) / - (RX_SMOOTH_FACTOR); - } - } -} - -static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 last_rssi, tmpval; - - if (pstats->packet_toself || pstats->packet_beacon) { - rtlpriv->stats.rssi_calculate_cnt++; - if (rtlpriv->stats.ui_rssi.total_num++ >= - PHY_RSSI_SLID_WIN_MAX) { - rtlpriv->stats.ui_rssi.total_num = - PHY_RSSI_SLID_WIN_MAX; - last_rssi = rtlpriv->stats.ui_rssi.elements[ - rtlpriv->stats.ui_rssi.index]; - rtlpriv->stats.ui_rssi.total_val -= last_rssi; - } - rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength; - rtlpriv->stats.ui_rssi.elements - [rtlpriv->stats.ui_rssi.index++] = - pstats->signalstrength; - if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX) - rtlpriv->stats.ui_rssi.index = 0; - tmpval = rtlpriv->stats.ui_rssi.total_val / - rtlpriv->stats.ui_rssi.total_num; - rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw, - (u8) tmpval); - pstats->rssi = rtlpriv->stats.signal_strength; - } - if (!pstats->is_cck && pstats->packet_toself) - rtl92d_loop_over_paths(hw, pstats); -} - -static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - int weighting = 0; - - if (rtlpriv->stats.recv_signal_power == 0) - rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; - if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power) - weighting = 5; - else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power) - weighting = (-5); - rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * - 5 + pstats->recvsignalpower + weighting) / 6; -} - -static void _rtl92de_process_pwdb(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - long undec_sm_pwdb; - - if (mac->opmode == NL80211_IFTYPE_ADHOC || - mac->opmode == NL80211_IFTYPE_AP) - return; - else - undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; - - if (pstats->packet_toself || pstats->packet_beacon) { - if (undec_sm_pwdb < 0) - undec_sm_pwdb = pstats->rx_pwdb_all; - if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) { - undec_sm_pwdb = (((undec_sm_pwdb) * - (RX_SMOOTH_FACTOR - 1)) + - (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); - undec_sm_pwdb = undec_sm_pwdb + 1; - } else { - undec_sm_pwdb = (((undec_sm_pwdb) * - (RX_SMOOTH_FACTOR - 1)) + - (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); - } - rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb; - _rtl92de_update_rxsignalstatistics(hw, pstats); - } -} - -static void rtl92d_loop_over_streams(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - int stream; - - for (stream = 0; stream < 2; stream++) { - if (pstats->rx_mimo_sig_qual[stream] != -1) { - if (rtlpriv->stats.rx_evm_percentage[stream] == 0) { - rtlpriv->stats.rx_evm_percentage[stream] = - pstats->rx_mimo_sig_qual[stream]; - } - rtlpriv->stats.rx_evm_percentage[stream] = - ((rtlpriv->stats.rx_evm_percentage[stream] - * (RX_SMOOTH_FACTOR - 1)) + - (pstats->rx_mimo_sig_qual[stream] * 1)) / - (RX_SMOOTH_FACTOR); - } - } -} - -static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw, - struct rtl_stats *pstats) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 last_evm, tmpval; - - if (pstats->signalquality == 0) - return; - if (pstats->packet_toself || pstats->packet_beacon) { - if (rtlpriv->stats.ui_link_quality.total_num++ >= - PHY_LINKQUALITY_SLID_WIN_MAX) { - rtlpriv->stats.ui_link_quality.total_num = - PHY_LINKQUALITY_SLID_WIN_MAX; - last_evm = rtlpriv->stats.ui_link_quality.elements[ - rtlpriv->stats.ui_link_quality.index]; - rtlpriv->stats.ui_link_quality.total_val -= last_evm; - } - rtlpriv->stats.ui_link_quality.total_val += - pstats->signalquality; - rtlpriv->stats.ui_link_quality.elements[ - rtlpriv->stats.ui_link_quality.index++] = - pstats->signalquality; - if (rtlpriv->stats.ui_link_quality.index >= - PHY_LINKQUALITY_SLID_WIN_MAX) - rtlpriv->stats.ui_link_quality.index = 0; - tmpval = rtlpriv->stats.ui_link_quality.total_val / - rtlpriv->stats.ui_link_quality.total_num; - rtlpriv->stats.signal_quality = tmpval; - rtlpriv->stats.last_sigstrength_inpercent = tmpval; - rtl92d_loop_over_streams(hw, pstats); - } -} - -static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw, - u8 *buffer, - struct rtl_stats *pcurrent_stats) -{ - - if (!pcurrent_stats->packet_matchbssid && - !pcurrent_stats->packet_beacon) - return; - - _rtl92de_process_ui_rssi(hw, pcurrent_stats); - _rtl92de_process_pwdb(hw, pcurrent_stats); - _rtl92de_process_ui_link_quality(hw, pcurrent_stats); -} - -static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct rtl_stats *pstats, - struct rx_desc_92d *pdesc, - struct rx_fwinfo_92d *p_drvinfo) -{ - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - struct ieee80211_hdr *hdr; - u8 *tmp_buf; - u8 *praddr; - u16 type, cfc; - __le16 fc; - bool packet_matchbssid, packet_toself, packet_beacon = false; - - tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; - hdr = (struct ieee80211_hdr *)tmp_buf; - fc = hdr->frame_control; - cfc = le16_to_cpu(fc); - type = WLAN_FC_GET_TYPE(fc); - praddr = hdr->addr1; - packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && - ether_addr_equal(mac->bssid, - (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 : - (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : - hdr->addr3) && - (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); - packet_toself = packet_matchbssid && - ether_addr_equal(praddr, rtlefuse->dev_addr); - if (ieee80211_is_beacon(fc)) - packet_beacon = true; - _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, - packet_matchbssid, packet_toself, - packet_beacon); - _rtl92de_process_phyinfo(hw, tmp_buf, pstats); -} - -bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, - struct ieee80211_rx_status *rx_status, - u8 *pdesc8, struct sk_buff *skb) -{ - __le32 *pdesc = (__le32 *)pdesc8; - struct rx_fwinfo_92d *p_drvinfo; - u32 phystatus = get_rx_desc_physt(pdesc); - - stats->length = (u16)get_rx_desc_pkt_len(pdesc); - stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) * - RX_DRV_INFO_SIZE_UNIT; - stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03); - stats->icv = (u16)get_rx_desc_icv(pdesc); - stats->crc = (u16)get_rx_desc_crc32(pdesc); - stats->hwerror = (stats->crc | stats->icv); - stats->decrypted = !get_rx_desc_swdec(pdesc); - stats->rate = (u8)get_rx_desc_rxmcs(pdesc); - stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc); - stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1); - stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) && - (get_rx_desc_faggr(pdesc) == 1)); - stats->timestamp_low = get_rx_desc_tsfl(pdesc); - stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc); - stats->is_ht = (bool)get_rx_desc_rxht(pdesc); - rx_status->freq = hw->conf.chandef.chan->center_freq; - rx_status->band = hw->conf.chandef.chan->band; - if (get_rx_desc_crc32(pdesc)) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (!get_rx_desc_swdec(pdesc)) - rx_status->flag |= RX_FLAG_DECRYPTED; - if (get_rx_desc_bw(pdesc)) - rx_status->bw = RATE_INFO_BW_40; - if (get_rx_desc_rxht(pdesc)) - rx_status->encoding = RX_ENC_HT; - rx_status->flag |= RX_FLAG_MACTIME_START; - if (stats->decrypted) - rx_status->flag |= RX_FLAG_DECRYPTED; - rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht, - false, stats->rate); - rx_status->mactime = get_rx_desc_tsfl(pdesc); - if (phystatus) { - p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + - stats->rx_bufshift); - _rtl92de_translate_rx_signal_stuff(hw, - skb, stats, - (struct rx_desc_92d *)pdesc, - p_drvinfo); - } - /*rx_status->qual = stats->signal; */ - rx_status->signal = stats->recvsignalpower + 10; - return true; -} - static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, u8 *virtualaddress8) { @@ -712,87 +286,6 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8, set_tx_desc_own(pdesc, 1); } -void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, - u8 desc_name, u8 *val) -{ - __le32 *pdesc = (__le32 *)pdesc8; - - if (istx) { - switch (desc_name) { - case HW_DESC_OWN: - wmb(); - set_tx_desc_own(pdesc, 1); - break; - case HW_DESC_TX_NEXTDESC_ADDR: - set_tx_desc_next_desc_address(pdesc, *(u32 *)val); - break; - default: - WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", - desc_name); - break; - } - } else { - switch (desc_name) { - case HW_DESC_RXOWN: - wmb(); - set_rx_desc_own(pdesc, 1); - break; - case HW_DESC_RXBUFF_ADDR: - set_rx_desc_buff_addr(pdesc, *(u32 *)val); - break; - case HW_DESC_RXPKT_LEN: - set_rx_desc_pkt_len(pdesc, *(u32 *)val); - break; - case HW_DESC_RXERO: - set_rx_desc_eor(pdesc, 1); - break; - default: - WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", - desc_name); - break; - } - } -} - -u64 rtl92de_get_desc(struct ieee80211_hw *hw, - u8 *p_desc8, bool istx, u8 desc_name) -{ - __le32 *p_desc = (__le32 *)p_desc8; - u32 ret = 0; - - if (istx) { - switch (desc_name) { - case HW_DESC_OWN: - ret = get_tx_desc_own(p_desc); - break; - case HW_DESC_TXBUFF_ADDR: - ret = get_tx_desc_tx_buffer_address(p_desc); - break; - default: - WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n", - desc_name); - break; - } - } else { - switch (desc_name) { - case HW_DESC_OWN: - ret = get_rx_desc_own(p_desc); - break; - case HW_DESC_RXPKT_LEN: - ret = get_rx_desc_pkt_len(p_desc); - break; - case HW_DESC_RXBUFF_ADDR: - ret = get_rx_desc_buff_addr(p_desc); - break; - default: - WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n", - desc_name); - break; - } - } - return ret; -} - bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index 2992668c156c..d3c480c75678 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -8,384 +8,17 @@ #define TX_DESC_AGGR_SUBFRAME_SIZE 32 #define RX_DESC_SIZE 32 -#define RX_DRV_INFO_SIZE_UNIT 8 #define TX_DESC_NEXT_DESC_OFFSET 40 #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 -/* macros to read/write various fields in RX or TX descriptors */ - -static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); -} - -static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); -} - -static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(25)); -} - -static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(26)); -} - -static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(27)); -} - -static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(28)); -} - -static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(31)); -} - -static inline u32 get_tx_desc_own(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(31)); -} - -static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0)); -} - -static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, BIT(5)); -} - -static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, BIT(7)); -} - -static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); -} - -static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16)); -} - -static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); -} - -static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26)); -} - -static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 2), __val, BIT(17)); -} - -static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); -} - -static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16)); -} - -static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28)); -} - -static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0)); -} - -static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(6)); -} - -static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(7)); -} - -static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(8)); -} - -static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(10)); -} - -static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(11)); -} - -static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(12)); -} - -static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(13)); -} - -static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20)); -} - -static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(25)); -} - -static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(26)); -} - -static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, BIT(27)); -} - -static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28)); -} - -static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30)); -} - -static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); -} - -static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 5), __val, BIT(6)); -} - -static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8)); -} - -static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); -} - -static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11)); -} - -static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); -} - -static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) -{ - *(__pdesc + 8) = cpu_to_le32(__val); -} - -static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) -{ - return le32_to_cpu(*(__pdesc + 8)); -} - -static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) -{ - *(__pdesc + 10) = cpu_to_le32(__val); -} - -static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, GENMASK(13, 0)); -} - -static inline u32 get_rx_desc_crc32(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(14)); -} - -static inline u32 get_rx_desc_icv(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(15)); -} - -static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, GENMASK(19, 16)); -} - -static inline u32 get_rx_desc_shift(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, GENMASK(25, 24)); -} - -static inline u32 get_rx_desc_physt(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(26)); -} - -static inline u32 get_rx_desc_swdec(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(27)); -} - -static inline u32 get_rx_desc_own(__le32 *__pdesc) -{ - return le32_get_bits(*__pdesc, BIT(31)); -} - -static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); -} - -static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(30)); -} - -static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) -{ - le32p_replace_bits(__pdesc, __val, BIT(31)); -} - -static inline u32 get_rx_desc_paggr(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 1), BIT(14)); -} - -static inline u32 get_rx_desc_faggr(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 1), BIT(15)); -} - -static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0)); -} - -static inline u32 get_rx_desc_rxht(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 3), BIT(6)); -} - -static inline u32 get_rx_desc_splcp(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 3), BIT(8)); -} - -static inline u32 get_rx_desc_bw(__le32 *__pdesc) -{ - return le32_get_bits(*(__pdesc + 3), BIT(9)); -} - -static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) -{ - return le32_to_cpu(*(__pdesc + 5)); -} - -static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) -{ - return le32_to_cpu(*(__pdesc + 6)); -} - -static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) -{ - *(__pdesc + 6) = cpu_to_le32(__val); -} - static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size) { memset((void *)__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET)); } -/* For 92D early mode */ -static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits(__paddr, __value, GENMASK(2, 0)); -} - -static inline void set_earlymode_len0(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits(__paddr, __value, GENMASK(15, 4)); -} - -static inline void set_earlymode_len1(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits(__paddr, __value, GENMASK(27, 16)); -} - -static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits(__paddr, __value, GENMASK(31, 28)); -} - -static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0)); -} - -static inline void set_earlymode_len3(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8)); -} - -static inline void set_earlymode_len4(__le32 *__paddr, u32 __value) -{ - le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20)); -} - -struct rx_fwinfo_92d { - u8 gain_trsw[4]; - u8 pwdb_all; - u8 cfosho[4]; - u8 cfotail[4]; - s8 rxevm[2]; - s8 rxsnr[4]; - u8 pdsnr[2]; - u8 csi_current[2]; - u8 csi_target[2]; - u8 sigevm; - u8 max_ex_pwr; - u8 ex_intf_flag:1; - u8 sgi_en:1; - u8 rxsc:2; - u8 reserve:4; -} __packed; - struct tx_desc_92d { u32 pktsize:16; u32 offset:8; @@ -488,78 +121,12 @@ struct tx_desc_92d { u32 reserve_pass_pcie_mm_limit[4]; } __packed; -struct rx_desc_92d { - u32 length:14; - u32 crc32:1; - u32 icverror:1; - u32 drv_infosize:4; - u32 security:3; - u32 qos:1; - u32 shift:2; - u32 phystatus:1; - u32 swdec:1; - u32 lastseg:1; - u32 firstseg:1; - u32 eor:1; - u32 own:1; - - u32 macid:5; - u32 tid:4; - u32 hwrsvd:5; - u32 paggr:1; - u32 faggr:1; - u32 a1_fit:4; - u32 a2_fit:4; - u32 pam:1; - u32 pwr:1; - u32 moredata:1; - u32 morefrag:1; - u32 type:2; - u32 mc:1; - u32 bc:1; - - u32 seq:12; - u32 frag:4; - u32 nextpktlen:14; - u32 nextind:1; - u32 rsvd:1; - - u32 rxmcs:6; - u32 rxht:1; - u32 amsdu:1; - u32 splcp:1; - u32 bandwidth:1; - u32 htc:1; - u32 tcpchk_rpt:1; - u32 ipcchk_rpt:1; - u32 tcpchk_valid:1; - u32 hwpcerr:1; - u32 hwpcind:1; - u32 iv0:16; - - u32 iv1; - - u32 tsfl; - - u32 bufferaddress; - u32 bufferaddress64; - -} __packed; - void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, u8 *pbd_desc_tx, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); -bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, - struct rtl_stats *stats, - struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb); -void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val); -u64 rtl92de_get_desc(struct ieee80211_hw *hw, - u8 *p_desc, bool istx, u8 desc_name); bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 094cb36153f5..13e689037acc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -1110,16 +1110,22 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M, - DESC92C_RATE5_5M, DESC92C_RATE11M}; - u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M, - DESC92C_RATE12M, DESC92C_RATE18M, - DESC92C_RATE24M, DESC92C_RATE36M, - DESC92C_RATE48M, DESC92C_RATE54M}; - u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1, - DESC92C_RATEMCS2, DESC92C_RATEMCS3, - DESC92C_RATEMCS4, DESC92C_RATEMCS5, - DESC92C_RATEMCS6, DESC92C_RATEMCS7}; + static const u8 cck_rates[] = { + DESC92C_RATE1M, DESC92C_RATE2M, + DESC92C_RATE5_5M, DESC92C_RATE11M + }; + static const u8 ofdm_rates[] = { + DESC92C_RATE6M, DESC92C_RATE9M, + DESC92C_RATE12M, DESC92C_RATE18M, + DESC92C_RATE24M, DESC92C_RATE36M, + DESC92C_RATE48M, DESC92C_RATE54M + }; + static const u8 ht_rates_1t[] = { + DESC92C_RATEMCS0, DESC92C_RATEMCS1, + DESC92C_RATEMCS2, DESC92C_RATEMCS3, + DESC92C_RATEMCS4, DESC92C_RATEMCS5, + DESC92C_RATEMCS6, DESC92C_RATEMCS7 + }; u8 i; u8 power_index; @@ -2155,15 +2161,16 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, static u8 _get_right_chnl_place_for_iqk(u8 chnl) { - u8 channel_all[TARGET_CHNL_NUM_2G_5G] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 36, 38, 40, 42, 44, 46, - 48, 50, 52, 54, 56, 58, 60, 62, 64, - 100, 102, 104, 106, 108, 110, - 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, - 138, 140, 149, 151, 153, 155, 157, - 159, 161, 163, 165}; + static const u8 channel_all[TARGET_CHNL_NUM_2G_5G] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 36, 38, 40, 42, 44, 46, + 48, 50, 52, 54, 56, 58, 60, 62, 64, + 100, 102, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, + 124, 126, 128, 130, 132, 134, 136, + 138, 140, 149, 151, 153, 155, 157, + 159, 161, 163, 165 + }; u8 place = chnl; if (chnl > 14) { diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 6e8c87a2fae4..2ea72d9e3957 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -979,6 +979,9 @@ int rtl_usb_probe(struct usb_interface *intf, usb_priv->dev.intf = intf; usb_priv->dev.udev = udev; usb_set_intfdata(intf, hw); + /* For dual MAC RTL8192DU, which has two interfaces. */ + rtlpriv->rtlhal.interfaceindex = + intf->altsetting[0].desc.bInterfaceNumber; /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_USB; rtlpriv->cfg = rtl_hal_cfg; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 9fabf597cfd6..442419568734 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -20,6 +20,7 @@ #define MASKBYTE1 0xff00 #define MASKBYTE2 0xff0000 #define MASKBYTE3 0xff000000 +#define MASKH3BYTES 0xffffff00 #define MASKHWORD 0xffff0000 #define MASKLWORD 0x0000ffff #define MASKDWORD 0xffffffff @@ -48,6 +49,10 @@ #define MASK20BITS 0xfffff #define RFREG_OFFSET_MASK 0xfffff +/* For dual MAC RTL8192DU */ +#define MAC0_ACCESS_PHY1 0x4000 +#define MAC1_ACCESS_PHY0 0x2000 + #define RF_CHANGE_BY_INIT 0 #define RF_CHANGE_BY_IPS BIT(28) #define RF_CHANGE_BY_PS BIT(29) @@ -1043,33 +1048,6 @@ struct octet_string { u16 length; }; -struct rtl_hdr_3addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 payload[]; -} __packed; - -struct rtl_info_element { - u8 id; - u8 len; - u8 data[]; -} __packed; - -struct rtl_probe_rsp { - struct rtl_hdr_3addr header; - u32 time_stamp[2]; - __le16 beacon_interval; - __le16 capability; - /*SSID, supported rates, FH params, DS params, - * CF params, IBSS params, TIM (if beacon), RSN - */ - struct rtl_info_element info_element[]; -} __packed; - struct rtl_led_ctl { bool led_opendrain; enum rtl_led_pin sw_led0; @@ -2268,6 +2246,7 @@ struct rtl_hal_ops { bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw, u8 configtype); void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t); + void (*phy_iq_calibrate)(struct ieee80211_hw *hw); void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw); void (*dm_dynamic_txpower)(struct ieee80211_hw *hw); void (*c2h_command_handle)(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index cffad1c01249..22838ede03cd 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -28,8 +28,16 @@ config RTW88_8822B config RTW88_8822C tristate +config RTW88_8723X + tristate + +config RTW88_8703B + tristate + select RTW88_8723X + config RTW88_8723D tristate + select RTW88_8723X config RTW88_8821C tristate @@ -122,6 +130,20 @@ config RTW88_8723DS 802.11n SDIO wireless network adapter +config RTW88_8723CS + tristate "Realtek 8723CS SDIO wireless network adapter" + depends on MMC + select RTW88_CORE + select RTW88_SDIO + select RTW88_8703B + help + Select this option to enable support for 8723CS chipset (EXPERIMENTAL) + + This module adds support for the 8723CS 802.11n SDIO + wireless network adapter. + + If you choose to build a module, it'll be called rtw88_8723cs. + config RTW88_8723DU tristate "Realtek 8723DU USB wireless network adapter" depends on USB diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index fd212c09d88a..8f47359b4380 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -44,6 +44,15 @@ rtw88_8822cs-objs := rtw8822cs.o obj-$(CONFIG_RTW88_8822CU) += rtw88_8822cu.o rtw88_8822cu-objs := rtw8822cu.o +obj-$(CONFIG_RTW88_8723X) += rtw88_8723x.o +rtw88_8723x-objs := rtw8723x.o + +obj-$(CONFIG_RTW88_8703B) += rtw88_8703b.o +rtw88_8703b-objs := rtw8703b.o rtw8703b_tables.o + +obj-$(CONFIG_RTW88_8723CS) += rtw88_8723cs.o +rtw88_8723cs-objs := rtw8723cs.o + obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 86467d2f8888..de3332eb7a22 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -3937,7 +3937,9 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38); bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54); - if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) { + if (!coex_stat->wl_under_ips && + (!coex_stat->wl_under_lps || coex_stat->wl_force_lps_ctrl) && + !coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) { rtw_coex_get_bt_supported_version(rtwdev, &coex_stat->bt_supported_version); rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver); diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index f20c0471c82a..eb69006c463e 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -26,6 +26,7 @@ enum rtw_debug_mask { RTW_DBG_STATE = 0x00020000, RTW_DBG_SDIO = 0x00040000, + RTW_DBG_UNEXP = 0x80000000, RTW_DBG_ALL = 0xffffffff }; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 3f037ddcecf1..ab7d414d0ba6 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -783,12 +783,18 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100; struct rtw_sta_info *si = sta ? (struct rtw_sta_info *)sta->drv_priv : NULL; - s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset; + s32 thold = RTW_DEFAULT_CQM_THOLD; + u32 hyst = RTW_DEFAULT_CQM_HYST; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER)) return; + if (bss_conf->cqm_rssi_thold) + thold = bss_conf->cqm_rssi_thold; + if (bss_conf->cqm_rssi_hyst) + hyst = bss_conf->cqm_rssi_hyst; + if (!connect) { SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); @@ -805,15 +811,15 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, rtw_fw_send_h2c_command(rtwdev, h2c_pkt); memset(h2c_pkt, 0, sizeof(h2c_pkt)); - threshold = clamp_t(s32, threshold, rssi_min, rssi_max); + thold = clamp_t(s32, thold + rssi_offset, rssi_min, rssi_max); SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, BCN_FILTER_OFFLOAD_MODE_DEFAULT); - SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold); + SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, thold); SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT); SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id); - SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst); + SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, hyst); SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 84e47c71ea12..e999c24e4634 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -29,6 +29,8 @@ #define BCN_FILTER_CONNECTION_LOSS 1 #define BCN_FILTER_CONNECTED 2 #define BCN_FILTER_NOTIFY_BEACON_LOSS 3 +#define RTW_DEFAULT_CQM_THOLD -70 +#define RTW_DEFAULT_CQM_HYST 4 #define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10) diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 0c1c1ff31085..0dba8aae7716 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -943,6 +943,12 @@ static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, { int ret = 0; + /* reset firmware if still present */ + if (rtwdev->chip->id == RTW_CHIP_TYPE_8703B && + rtw_read8_mask(rtwdev, REG_MCUFW_CTRL, BIT_RAM_DL_SEL)) { + rtw_write8(rtwdev, REG_MCUFW_CTRL, 0x00); + } + en_download_firmware_legacy(rtwdev, true); ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); en_download_firmware_legacy(rtwdev, false); @@ -1033,14 +1039,15 @@ static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, msleep(20); } - /* priority queue is still not empty, throw a warning, + /* priority queue is still not empty, throw a debug message * * Note that if we want to flush the tx queue when having a lot of * traffic (ex, 100Mbps up), some of the packets could be dropped. * And it requires like ~2secs to flush the full priority queue. */ if (!drop) - rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue); + rtw_dbg(rtwdev, RTW_DBG_UNEXP, + "timed out to flush queue %d\n", prio_queue); } static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 7af5bf7fe5b6..0acebbfa13c4 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -386,6 +386,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc); if (rtw_bf_support) rtw_bf_assoc(rtwdev, vif, conf); + + rtw_fw_beacon_filter_config(rtwdev, true, vif); } else { rtw_leave_lps(rtwdev); rtw_bf_disassoc(rtwdev, vif, conf); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index ffba6b88f392..7ab7a988b123 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -227,9 +227,6 @@ static void rtw_watch_dog_work(struct work_struct *work) else clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); - rtw_coex_wl_status_check(rtwdev); - rtw_coex_query_bt_hid_list(rtwdev); - if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) rtw_coex_wl_status_change_notify(rtwdev, 0); @@ -257,6 +254,8 @@ static void rtw_watch_dog_work(struct work_struct *work) /* make sure BB/RF is working for dynamic mech */ rtw_leave_lps(rtwdev); + rtw_coex_wl_status_check(rtwdev); + rtw_coex_query_bt_hid_list(rtwdev); rtw_phy_dynamic_mechanism(rtwdev); @@ -2204,6 +2203,7 @@ EXPORT_SYMBOL(rtw_core_deinit); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { + bool sta_mode_only = rtwdev->hci.type == RTW_HCI_TYPE_SDIO; struct rtw_hal *hal = &rtwdev->hal; int max_tx_headroom = 0; int ret; @@ -2232,10 +2232,12 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); + if (sta_mode_only) + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + else + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->available_antennas_tx = hal->antenna_tx; hw->wiphy->available_antennas_rx = hal->antenna_rx; @@ -2246,7 +2248,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev); - if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) { + if (!sta_mode_only && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) { hw->wiphy->iface_combinations = rtw_iface_combs; hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw_iface_combs); } diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index e14d1da43940..49894331f7b4 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -187,6 +187,7 @@ enum rtw_chip_type { RTW_CHIP_TYPE_8822C, RTW_CHIP_TYPE_8723D, RTW_CHIP_TYPE_8821C, + RTW_CHIP_TYPE_8703B, }; enum rtw_tx_queue_type { @@ -1700,11 +1701,13 @@ struct rtw_dm_info { s8 delta_power_index[RTW_RF_PATH_MAX]; s8 delta_power_index_last[RTW_RF_PATH_MAX]; u8 default_ofdm_index; + u8 default_cck_index; bool pwr_trk_triggered; bool pwr_trk_init_trigger; struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX]; s8 txagc_remnant_cck; s8 txagc_remnant_ofdm; + u8 rx_cck_agc_report_type; /* backup dack results for each path and I/Q */ u32 dack_adck[RTW_RF_PATH_MAX]; diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 9986a4cb37eb..7a093f3d5f74 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -729,7 +729,8 @@ static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) } if (!drop) - rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); + rtw_dbg(rtwdev, RTW_DBG_UNEXP, + "timed out to flush pci tx ring[%d]\n", pci_q); } static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c new file mode 100644 index 000000000000..8919f9e11f03 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -0,0 +1,2109 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright Fiona Klute <[email protected]> */ + +#include <linux/of_net.h> +#include "main.h" +#include "coex.h" +#include "debug.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rx.h" +#include "rtw8703b.h" +#include "rtw8703b_tables.h" +#include "rtw8723x.h" + +#define BIT_MASK_TXQ_INIT (BIT(7)) +#define WLAN_RL_VAL 0x3030 +/* disable BAR */ +#define WLAN_BAR_VAL 0x0201ffff +#define WLAN_PIFS_VAL 0 +#define WLAN_RX_PKT_LIMIT 0x18 +#define WLAN_SLOT_TIME 0x09 +#define WLAN_SPEC_SIFS 0x100a +#define WLAN_MAX_AGG_NR 0x1f +#define WLAN_AMPDU_MAX_TIME 0x70 + +/* unit is 32us */ +#define TBTT_PROHIBIT_SETUP_TIME 0x04 +#define TBTT_PROHIBIT_HOLD_TIME 0x80 +#define TBTT_PROHIBIT_HOLD_TIME_STOP_BCN 0x64 + +/* raw pkt_stat->drv_info_sz is in unit of 8-bytes */ +#define RX_DRV_INFO_SZ_UNIT_8703B 8 + +#define TRANS_SEQ_END \ + 0xFFFF, \ + RTW_PWR_CUT_ALL_MSK, \ + RTW_PWR_INTF_ALL_MSK, \ + 0, \ + RTW_PWR_CMD_END, 0, 0 + +/* rssi in percentage % (dbm = % - 100) */ +/* These are used to select simple signal quality levels, might need + * tweaking. Same for rf_para tables below. + */ +static const u8 wl_rssi_step_8703b[] = {60, 50, 44, 30}; +static const u8 bt_rssi_step_8703b[] = {30, 30, 30, 30}; +static const struct coex_5g_afh_map afh_5g_8703b[] = { {0, 0, 0} }; + +/* Actually decreasing wifi TX power/RX gain isn't implemented in + * rtw8703b, but hopefully adjusting the BT side helps. + */ +static const struct coex_rf_para rf_para_tx_8703b[] = { + {0, 0, false, 7}, /* for normal */ + {0, 10, false, 7}, /* for WL-CPT */ + {1, 0, true, 4}, + {1, 2, true, 4}, + {1, 10, true, 4}, + {1, 15, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8703b[] = { + {0, 0, false, 7}, /* for normal */ + {0, 10, false, 7}, /* for WL-CPT */ + {1, 0, true, 5}, + {1, 2, true, 5}, + {1, 10, true, 5}, + {1, 15, true, 5} +}; + +static const u32 rtw8703b_ofdm_swing_table[] = { + 0x0b40002d, /* 0, -15.0dB */ + 0x0c000030, /* 1, -14.5dB */ + 0x0cc00033, /* 2, -14.0dB */ + 0x0d800036, /* 3, -13.5dB */ + 0x0e400039, /* 4, -13.0dB */ + 0x0f00003c, /* 5, -12.5dB */ + 0x10000040, /* 6, -12.0dB */ + 0x11000044, /* 7, -11.5dB */ + 0x12000048, /* 8, -11.0dB */ + 0x1300004c, /* 9, -10.5dB */ + 0x14400051, /* 10, -10.0dB */ + 0x15800056, /* 11, -9.5dB */ + 0x16c0005b, /* 12, -9.0dB */ + 0x18000060, /* 13, -8.5dB */ + 0x19800066, /* 14, -8.0dB */ + 0x1b00006c, /* 15, -7.5dB */ + 0x1c800072, /* 16, -7.0dB */ + 0x1e400079, /* 17, -6.5dB */ + 0x20000080, /* 18, -6.0dB */ + 0x22000088, /* 19, -5.5dB */ + 0x24000090, /* 20, -5.0dB */ + 0x26000098, /* 21, -4.5dB */ + 0x288000a2, /* 22, -4.0dB */ + 0x2ac000ab, /* 23, -3.5dB */ + 0x2d4000b5, /* 24, -3.0dB */ + 0x300000c0, /* 25, -2.5dB */ + 0x32c000cb, /* 26, -2.0dB */ + 0x35c000d7, /* 27, -1.5dB */ + 0x390000e4, /* 28, -1.0dB */ + 0x3c8000f2, /* 29, -0.5dB */ + 0x40000100, /* 30, +0dB */ + 0x43c0010f, /* 31, +0.5dB */ + 0x47c0011f, /* 32, +1.0dB */ + 0x4c000130, /* 33, +1.5dB */ + 0x50800142, /* 34, +2.0dB */ + 0x55400155, /* 35, +2.5dB */ + 0x5a400169, /* 36, +3.0dB */ + 0x5fc0017f, /* 37, +3.5dB */ + 0x65400195, /* 38, +4.0dB */ + 0x6b8001ae, /* 39, +4.5dB */ + 0x71c001c7, /* 40, +5.0dB */ + 0x788001e2, /* 41, +5.5dB */ + 0x7f8001fe /* 42, +6.0dB */ +}; + +static const u32 rtw8703b_cck_pwr_regs[] = { + 0x0a22, 0x0a23, 0x0a24, 0x0a25, 0x0a26, 0x0a27, 0x0a28, 0x0a29, + 0x0a9a, 0x0a9b, 0x0a9c, 0x0a9d, 0x0aa0, 0x0aa1, 0x0aa2, 0x0aa3, +}; + +static const u8 rtw8703b_cck_swing_table[][16] = { + {0x44, 0x42, 0x3C, 0x33, 0x28, 0x1C, 0x13, 0x0B, 0x05, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-16dB*/ + {0x48, 0x46, 0x3F, 0x36, 0x2A, 0x1E, 0x14, 0x0B, 0x05, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/ + {0x4D, 0x4A, 0x43, 0x39, 0x2C, 0x20, 0x15, 0x0C, 0x06, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15dB*/ + {0x51, 0x4F, 0x47, 0x3C, 0x2F, 0x22, 0x16, 0x0D, 0x06, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/ + {0x56, 0x53, 0x4B, 0x40, 0x32, 0x24, 0x17, 0x0E, 0x06, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14dB*/ + {0x5B, 0x58, 0x50, 0x43, 0x35, 0x26, 0x19, 0x0E, 0x07, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/ + {0x60, 0x5D, 0x54, 0x47, 0x38, 0x28, 0x1A, 0x0F, 0x07, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13dB*/ + {0x66, 0x63, 0x59, 0x4C, 0x3B, 0x2B, 0x1C, 0x10, 0x08, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/ + {0x6C, 0x69, 0x5F, 0x50, 0x3F, 0x2D, 0x1E, 0x11, 0x08, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12dB*/ + {0x73, 0x6F, 0x64, 0x55, 0x42, 0x30, 0x1F, 0x12, 0x08, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/ + {0x79, 0x76, 0x6A, 0x5A, 0x46, 0x33, 0x21, 0x13, 0x09, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11dB*/ + {0x81, 0x7C, 0x71, 0x5F, 0x4A, 0x36, 0x23, 0x14, 0x0A, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/ + {0x88, 0x84, 0x77, 0x65, 0x4F, 0x39, 0x25, 0x15, 0x0A, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10dB*/ + {0x90, 0x8C, 0x7E, 0x6B, 0x54, 0x3C, 0x27, 0x17, 0x0B, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/ + {0x99, 0x94, 0x86, 0x71, 0x58, 0x40, 0x2A, 0x18, 0x0B, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9dB*/ + {0xA2, 0x9D, 0x8E, 0x78, 0x5E, 0x43, 0x2C, 0x19, 0x0C, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/ + {0xAC, 0xA6, 0x96, 0x7F, 0x63, 0x47, 0x2F, 0x1B, 0x0D, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8dB*/ + {0xB6, 0xB0, 0x9F, 0x87, 0x69, 0x4C, 0x32, 0x1D, 0x0D, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/ + {0xC1, 0xBA, 0xA8, 0x8F, 0x6F, 0x50, 0x35, 0x1E, 0x0E, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7dB*/ + {0xCC, 0xC5, 0xB2, 0x97, 0x76, 0x55, 0x38, 0x20, 0x0F, 0x05, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/ + {0xD8, 0xD1, 0xBD, 0xA0, 0x7D, 0x5A, 0x3B, 0x22, 0x10, 0x05, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} /*-6dB*/ +}; + +#define RTW_OFDM_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_ofdm_swing_table) +#define RTW_CCK_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_cck_swing_table) + +static const struct rtw_pwr_seq_cmd trans_pre_enable_8703b[] = { + /* set up external crystal (XTAL) */ + {REG_PAD_CTRL1 + 2, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), BIT(7)}, + /* set CLK_REQ to high active */ + {0x0069, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + /* unlock ISO/CLK/power control register */ + {REG_RSV_CTRL, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8703b[] = { + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8703b[] = { + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), BIT(4)}, + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK | RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), BIT(7)}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8703b[] = { + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x0001, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0004, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0x0004, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + /* wait for power ready */ + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(0), 0}, + {0x0010, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x0049, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0063, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0062, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0058, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x005A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0068, + RTW_PWR_CUT_TEST_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0x0069, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8703b[] = { + {0x001f, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0}, + {0x0049, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0x0010, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), 0}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_reset_mcu_8703b[] = { + {REG_SYS_FUNC_EN + 1, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT_FEN_CPUEN, 0}, + /* reset MCU ready */ + {REG_MCUFW_CTRL, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0}, + /* reset MCU IO wrapper */ + {REG_RSV_CTRL + 1, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {REG_RSV_CTRL + 1, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 1}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_lps_8703b[] = { + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0xff}, + {0x0522, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0xff}, + {0x05f8, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xff, 0}, + {0x05f9, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xff, 0}, + {0x05fa, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xff, 0}, + {0x05fb, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xff, 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0100, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0x03}, + {0x0101, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0093, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xff, 0}, + {0x0553, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {TRANS_SEQ_END}, +}; + +static const struct rtw_pwr_seq_cmd *card_enable_flow_8703b[] = { + trans_pre_enable_8703b, + trans_carddis_to_cardemu_8703b, + trans_cardemu_to_act_8703b, + NULL +}; + +static const struct rtw_pwr_seq_cmd *card_disable_flow_8703b[] = { + trans_act_to_lps_8703b, + trans_act_to_reset_mcu_8703b, + trans_act_to_cardemu_8703b, + trans_cardemu_to_carddis_8703b, + NULL +}; + +static const struct rtw_rfe_def rtw8703b_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8703b_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8703b_txpwr_lmt_tbl,}, +}; + +static const struct rtw_page_table page_table_8703b[] = { + {12, 2, 2, 0, 1}, + {12, 2, 2, 0, 1}, + {12, 2, 2, 0, 1}, + {12, 2, 2, 0, 1}, + {12, 2, 2, 0, 1}, +}; + +static const struct rtw_rqpn rqpn_table_8703b[] = { + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, +}; + +/* Default power index table for RTL8703B, used if EFUSE does not + * contain valid data. Replaces EFUSE data from offset 0x10 (start of + * txpwr_idx_table). + */ +static const u8 rtw8703b_txpwr_idx_table[] = { + 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, + 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02 +}; + +static void try_mac_from_devicetree(struct rtw_dev *rtwdev) +{ + struct device_node *node = rtwdev->dev->of_node; + struct rtw_efuse *efuse = &rtwdev->efuse; + int ret; + + if (node) { + ret = of_get_mac_address(node, efuse->addr); + if (ret == 0) { + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "got wifi mac address from DT: %pM\n", + efuse->addr); + } + } +} + +#define DBG_EFUSE_FIX(rtwdev, name) \ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \ + # name "=0x%x\n", rtwdev->efuse.name) + +static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 *pwr = (u8 *)efuse->txpwr_idx_table; + bool valid = false; + int ret; + + ret = rtw8723x_read_efuse(rtwdev, log_map); + if (ret != 0) + return ret; + + if (!is_valid_ether_addr(efuse->addr)) + try_mac_from_devicetree(rtwdev); + + /* If TX power index table in EFUSE is invalid, fall back to + * built-in table. + */ + for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++) + if (pwr[i] != 0xff) { + valid = true; + break; + } + if (!valid) { + for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++) + pwr[i] = rtw8703b_txpwr_idx_table[i]; + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "Replaced invalid EFUSE TX power index table."); + rtw8723x_debug_txpwr_limit(rtwdev, + efuse->txpwr_idx_table, 2); + } + + /* Override invalid antenna settings. */ + if (efuse->bt_setting == 0xff) { + /* shared antenna */ + efuse->bt_setting |= BIT(0); + /* RF path A */ + efuse->bt_setting &= ~BIT(6); + DBG_EFUSE_FIX(rtwdev, bt_setting); + } + + /* Override invalid board options: The coex code incorrectly + * assumes that if bits 6 & 7 are set the board doesn't + * support coex. Regd is also derived from rf_board_option and + * should be 0 if there's no valid data. + */ + if (efuse->rf_board_option == 0xff) { + efuse->regd = 0; + efuse->rf_board_option &= GENMASK(5, 0); + DBG_EFUSE_FIX(rtwdev, rf_board_option); + } + + /* Override invalid crystal cap setting, default comes from + * vendor driver. Chip specific. + */ + if (efuse->crystal_cap == 0xff) { + efuse->crystal_cap = 0x20; + DBG_EFUSE_FIX(rtwdev, crystal_cap); + } + + return 0; +} + +static void rtw8703b_pwrtrack_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 path; + + /* TODO: The vendor driver selects these using tables in + * halrf_powertracking_ce.c, functions are called + * get_swing_index and get_cck_swing_index. There the current + * fixed values are only the defaults in case no match is + * found. + */ + dm_info->default_ofdm_index = 30; + dm_info->default_cck_index = 20; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + ewma_thermal_init(&dm_info->avg_thermal[path]); + dm_info->delta_power_index[path] = 0; + } + dm_info->pwr_trk_triggered = false; + dm_info->pwr_trk_init_trigger = true; + dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; + dm_info->txagc_remnant_cck = 0; + dm_info->txagc_remnant_ofdm = 0; +} + +static void rtw8703b_phy_set_param(struct rtw_dev *rtwdev) +{ + u8 xtal_cap = rtwdev->efuse.crystal_cap & 0x3F; + + /* power on BB/RF domain */ + rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, + BIT_FEN_EN_25_1 | BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB); + rtw_write8_set(rtwdev, REG_RF_CTRL, + BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); + rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, 0x0780); + rtw_write8(rtwdev, REG_AFE_CTRL1 + 1, 0x80); + + rtw_phy_load_tables(rtwdev); + + rtw_write32_clr(rtwdev, REG_RCR, BIT_RCR_ADF); + /* 0xff is from vendor driver, rtw8723d uses + * BIT_HIQ_NO_LMT_EN_ROOT. Comment in vendor driver: "Packet + * in Hi Queue Tx immediately". I wonder if setting all bits + * is really necessary. + */ + rtw_write8_set(rtwdev, REG_HIQ_NO_LMT_EN, 0xff); + rtw_write16_set(rtwdev, REG_AFE_CTRL_4, BIT_CK320M_AFE_EN | BIT_EN_SYN); + + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL, + xtal_cap | (xtal_cap << 6)); + rtw_write32_set(rtwdev, REG_FPGA0_RFMOD, BIT_CCKEN | BIT_OFDMEN); + + /* Init EDCA */ + rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SPEC_SIFS); + rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, WLAN_SPEC_SIFS); + rtw_write16(rtwdev, REG_SIFS, WLAN_SPEC_SIFS); /* CCK */ + rtw_write16(rtwdev, REG_SIFS + 2, WLAN_SPEC_SIFS); /* OFDM */ + /* TXOP */ + rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226); + rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324); + rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B); + rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F); + + /* Init retry */ + rtw_write8(rtwdev, REG_ACKTO, 0x40); + + /* Set up RX aggregation. sdio.c also sets DMA mode, but not + * the burst parameters. + */ + rtw_write8(rtwdev, REG_RXDMA_MODE, + BIT_DMA_MODE | + FIELD_PREP_CONST(BIT_MASK_AGG_BURST_NUM, AGG_BURST_NUM) | + FIELD_PREP_CONST(BIT_MASK_AGG_BURST_SIZE, AGG_BURST_SIZE)); + + /* Init beacon parameters */ + rtw_write8(rtwdev, REG_BCN_CTRL, + BIT_DIS_TSF_UDT | BIT_EN_BCN_FUNCTION | BIT_EN_TXBCN_RPT); + rtw_write8(rtwdev, REG_TBTT_PROHIBIT, TBTT_PROHIBIT_SETUP_TIME); + rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 1, + TBTT_PROHIBIT_HOLD_TIME_STOP_BCN & 0xFF); + rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 2, + (rtw_read8(rtwdev, REG_TBTT_PROHIBIT + 2) & 0xF0) + | (TBTT_PROHIBIT_HOLD_TIME_STOP_BCN >> 8)); + + /* configure packet burst */ + rtw_write8_set(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU); + rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RX_PKT_LIMIT); + rtw_write8(rtwdev, REG_MAX_AGGR_NUM, WLAN_MAX_AGG_NR); + rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_VAL); + rtw_write8_clr(rtwdev, REG_FWHW_TXQ_CTRL, BIT_MASK_TXQ_INIT); + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, WLAN_AMPDU_MAX_TIME); + + rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME); + rtw_write16(rtwdev, REG_RETRY_LIMIT, WLAN_RL_VAL); + rtw_write32(rtwdev, REG_BAR_MODE_CTRL, WLAN_BAR_VAL); + rtw_write16(rtwdev, REG_ATIMWND, 0x2); + + rtw_phy_init(rtwdev); + + if (rtw_read32_mask(rtwdev, REG_BB_AMP, BIT_MASK_RX_LNA) != 0) { + rtwdev->dm_info.rx_cck_agc_report_type = 1; + } else { + rtwdev->dm_info.rx_cck_agc_report_type = 0; + rtw_warn(rtwdev, "unexpected cck agc report type"); + } + + rtw8723x_lck(rtwdev); + + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20); + + rtw8703b_pwrtrack_init(rtwdev); +} + +static bool rtw8703b_check_spur_ov_thres(struct rtw_dev *rtwdev, + u32 freq, u32 thres) +{ + bool ret = false; + + rtw_write32(rtwdev, REG_ANALOG_P4, DIS_3WIRE); + rtw_write32(rtwdev, REG_PSDFN, freq); + rtw_write32(rtwdev, REG_PSDFN, START_PSD | freq); + + msleep(30); + if (rtw_read32(rtwdev, REG_PSDRPT) >= thres) + ret = true; + + rtw_write32(rtwdev, REG_PSDFN, freq); + rtw_write32(rtwdev, REG_ANALOG_P4, EN_3WIRE); + + return ret; +} + +static void rtw8703b_cfg_notch(struct rtw_dev *rtwdev, u8 channel, bool notch) +{ + if (!notch) { + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x1f); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0); + return; + } + + switch (channel) { + case 5: + fallthrough; + case 13: + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xb); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x06000000); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1); + break; + case 6: + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x4); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000600); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1); + break; + case 7: + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x3); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x06000000); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1); + break; + case 8: + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xa); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000380); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1); + break; + case 14: + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x5); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1); + rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000); + rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00180000); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1); + break; + default: + rtw_warn(rtwdev, + "Bug: Notch filter enable called for channel %u!", + channel); + rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0); + rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0); + break; + } +} + +static void rtw8703b_spur_cal(struct rtw_dev *rtwdev, u8 channel) +{ + bool notch; + u32 freq; + + if (channel == 5) { + freq = FREQ_CH5; + } else if (channel == 6) { + freq = FREQ_CH6; + } else if (channel == 7) { + freq = FREQ_CH7; + } else if (channel == 8) { + freq = FREQ_CH8; + } else if (channel == 13) { + freq = FREQ_CH13; + } else if (channel == 14) { + freq = FREQ_CH14; + } else { + rtw8703b_cfg_notch(rtwdev, channel, false); + return; + } + + notch = rtw8703b_check_spur_ov_thres(rtwdev, freq, SPUR_THRES); + rtw8703b_cfg_notch(rtwdev, channel, notch); +} + +static void rtw8703b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + u32 rf_cfgch_a; + u32 rf_cfgch_b; + /* default value for 20M */ + u32 rf_rck = 0x00000C08; + + rf_cfgch_a = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK); + rf_cfgch_b = rtw_read_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK); + + rf_cfgch_a &= ~RFCFGCH_CHANNEL_MASK; + rf_cfgch_b &= ~RFCFGCH_CHANNEL_MASK; + rf_cfgch_a |= (channel & RFCFGCH_CHANNEL_MASK); + rf_cfgch_b |= (channel & RFCFGCH_CHANNEL_MASK); + + rf_cfgch_a &= ~RFCFGCH_BW_MASK; + switch (bw) { + case RTW_CHANNEL_WIDTH_20: + rf_cfgch_a |= RFCFGCH_BW_20M; + break; + case RTW_CHANNEL_WIDTH_40: + rf_cfgch_a |= RFCFGCH_BW_40M; + rf_rck = 0x00000C4C; + break; + default: + break; + } + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_cfgch_a); + rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_cfgch_b); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK1, RFREG_MASK, rf_rck); + rtw8703b_spur_cal(rtwdev, channel); +} + +#define CCK_DFIR_NR_8703B 2 +static const struct rtw_backup_info cck_dfir_cfg[][CCK_DFIR_NR_8703B] = { + [0] = { + { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x5A7DA0BD }, + { .len = 4, .reg = REG_CCK_DBG, .val = 0x0000223B }, + }, + [1] = { + { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x00000000 }, + { .len = 4, .reg = REG_CCK_DBG, .val = 0x00000000 }, + }, +}; + +static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_ch_idx) +{ + const struct rtw_backup_info *cck_dfir; + int i; + + cck_dfir = channel <= 13 ? cck_dfir_cfg[0] : cck_dfir_cfg[1]; + + for (i = 0; i < CCK_DFIR_NR_8703B; i++, cck_dfir++) + rtw_write32(rtwdev, cck_dfir->reg, cck_dfir->val); + + switch (bw) { + case RTW_CHANNEL_WIDTH_20: + rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0); + rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0); + rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE, + GENMASK(31, 20), 0x0); + rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000); + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000); + break; + case RTW_CHANNEL_WIDTH_40: + rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x1); + rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x1); + rtw_write32(rtwdev, REG_BBRX_DFIR, 0x40100000); + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x51F60000); + rtw_write32_mask(rtwdev, REG_CCK0_SYS, BIT_CCK_SIDE_BAND, + primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, 0xC00, + primary_ch_idx == RTW_SC_20_UPPER ? 2 : 1); + + rtw_write32_mask(rtwdev, REG_BB_PWR_SAV5_11N, GENMASK(27, 26), + primary_ch_idx == RTW_SC_20_UPPER ? 1 : 2); + break; + default: + break; + } +} + +static void rtw8703b_set_channel(struct rtw_dev *rtwdev, u8 channel, + u8 bw, u8 primary_chan_idx) +{ + rtw8703b_set_channel_rf(rtwdev, channel, bw); + rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx); + rtw8703b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx); +} + +/* Not all indices are valid, based on available data. None of the + * known valid values are positive, so use 0x7f as "invalid". + */ +#define LNA_IDX_INVALID 0x7f +static const s8 lna_gain_table[16] = { + -2, LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID, + -6, LNA_IDX_INVALID, LNA_IDX_INVALID, -19, + -32, LNA_IDX_INVALID, -36, -42, + LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID, -48, +}; + +static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx) +{ + s8 lna_gain = 0; + + if (lna_idx < ARRAY_SIZE(lna_gain_table)) + lna_gain = lna_gain_table[lna_idx]; + + if (lna_gain >= 0) { + rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx); + return -120; + } + + return lna_gain - 2 * vga_idx; +} + +static void query_phy_status_cck(struct rtw_dev *rtwdev, u8 *phy_raw, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw; + u8 vga_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & VGA_BITS; + u8 lna_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & LNA_L_BITS; + s8 rx_power; + + if (rtwdev->dm_info.rx_cck_agc_report_type == 1) + lna_idx = FIELD_PREP(BIT_LNA_H_MASK, + phy_status->cck_rpt_b_ofdm_cfosho_b & LNA_H_BIT) + | FIELD_PREP(BIT_LNA_L_MASK, lna_idx); + else + lna_idx = FIELD_PREP(BIT_LNA_L_MASK, lna_idx); + rx_power = get_cck_rx_pwr(rtwdev, lna_idx, vga_idx); + + pkt_stat->rx_power[RF_PATH_A] = rx_power; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + rtwdev->dm_info.rssi[RF_PATH_A] = pkt_stat->rssi; + pkt_stat->signal_power = rx_power; +} + +static void query_phy_status_ofdm(struct rtw_dev *rtwdev, u8 *phy_raw, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s8 val_s8; + + val_s8 = phy_status->path_agc[RF_PATH_A].gain & 0x3F; + pkt_stat->rx_power[RF_PATH_A] = (val_s8 * 2) - 110; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + pkt_stat->rx_snr[RF_PATH_A] = (s8)(phy_status->path_rxsnr[RF_PATH_A] / 2); + + /* signal power reported by HW */ + val_s8 = phy_status->cck_sig_qual_ofdm_pwdb_all >> 1; + pkt_stat->signal_power = (val_s8 & 0x7f) - 110; + + pkt_stat->rx_evm[RF_PATH_A] = phy_status->stream_rxevm[RF_PATH_A]; + pkt_stat->cfo_tail[RF_PATH_A] = phy_status->path_cfotail[RF_PATH_A]; + + dm_info->curr_rx_rate = pkt_stat->rate; + dm_info->rssi[RF_PATH_A] = pkt_stat->rssi; + dm_info->rx_snr[RF_PATH_A] = pkt_stat->rx_snr[RF_PATH_A] >> 1; + /* convert to KHz (used only for debugfs) */ + dm_info->cfo_tail[RF_PATH_A] = (pkt_stat->cfo_tail[RF_PATH_A] * 5) >> 1; + + /* (EVM value as s8 / 2) is dbm, should usually be in -33 to 0 + * range. rx_evm_dbm needs the absolute (positive) value. + */ + val_s8 = (s8)pkt_stat->rx_evm[RF_PATH_A]; + val_s8 = clamp_t(s8, -val_s8 >> 1, 0, 64); + val_s8 &= 0x3F; /* 64->0: second path of 1SS rate is 64 */ + dm_info->rx_evm_dbm[RF_PATH_A] = val_s8; +} + +static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + if (pkt_stat->rate <= DESC_RATE11M) + query_phy_status_cck(rtwdev, phy_status, pkt_stat); + else + query_phy_status_ofdm(rtwdev, phy_status, pkt_stat); +} + +static void rtw8703b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, + struct rtw_rx_pkt_stat *pkt_stat, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_hdr *hdr; + u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz; + u8 *phy_status = NULL; + + memset(pkt_stat, 0, sizeof(*pkt_stat)); + + pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc); + pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc); + pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc); + pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) && + GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE; + pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc); + pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc); + pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc); + pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc); + pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc); + pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc); + pkt_stat->ppdu_cnt = 0; + pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc); + + pkt_stat->drv_info_sz *= RX_DRV_INFO_SZ_UNIT_8703B; + + if (pkt_stat->is_c2h) + return; + + hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift + + pkt_stat->drv_info_sz); + + pkt_stat->bw = GET_RX_DESC_BW(rx_desc); + + if (pkt_stat->phy_status) { + phy_status = rx_desc + desc_sz + pkt_stat->shift; + query_phy_status(rtwdev, phy_status, pkt_stat); + } + + rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status); + + /* Rtl8723cs driver checks for size < 14 or size > 8192 and + * simply drops the packet. Maybe this should go into + * rtw_rx_fill_rx_status()? + */ + if (pkt_stat->pkt_len == 0) { + rx_status->flag |= RX_FLAG_NO_PSDU; + rtw_dbg(rtwdev, RTW_DBG_RX, "zero length packet"); + } +} + +#define ADDA_ON_VAL_8703B 0x03c00014 + +static +void rtw8703b_iqk_config_mac(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[0], 0x3F); + for (int i = 1; i < RTW8723X_IQK_MAC8_REG_NUM; i++) + rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i], + backup->mac8[i] & (~BIT(3))); +} + +#define IQK_LTE_WRITE_VAL_8703B 0x00007700 +#define IQK_DELAY_TIME_8703B 4 + +static void rtw8703b_iqk_one_shot(struct rtw_dev *rtwdev, bool tx) +{ + u32 regval; + ktime_t t; + s64 dur; + int ret; + + /* enter IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); + rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B); + + /* One shot, LOK & IQK */ + rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf9000000); + rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf8000000); + + t = ktime_get(); + msleep(IQK_DELAY_TIME_8703B); + ret = read_poll_timeout(rtw_read32, regval, regval != 0, 1000, + 100000, false, rtwdev, + REG_IQK_RDY); + dur = ktime_us_delta(ktime_get(), t); + + if (ret) + rtw_warn(rtwdev, "[IQK] %s timed out after %lldus!\n", + tx ? "TX" : "RX", dur); + else + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] %s done after %lldus\n", + tx ? "TX" : "RX", dur); +} + +static void rtw8703b_iqk_txrx_path_post(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup); + rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg); + + /* leave IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x0); +} + +static u8 rtw8703b_iqk_check_tx_failed(struct rtw_dev *rtwdev) +{ + s32 tx_x, tx_y; + u32 tx_fail; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xeac = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_RY)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe94 = 0x%x, 0xe9c = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_TX), + rtw_read32(rtwdev, REG_IQK_RES_TY)); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xe90(before IQK) = 0x%x, 0xe98(after IQK) = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RDY), + rtw_read32(rtwdev, 0xe98)); + + tx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_TX_FAIL); + tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + + if (!tx_fail && tx_x != IQK_TX_X_ERR && tx_y != IQK_TX_Y_ERR) + return IQK_TX_OK; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A TX IQK failed\n"); + + return 0; +} + +static u8 rtw8703b_iqk_check_rx_failed(struct rtw_dev *rtwdev) +{ + s32 rx_x, rx_y; + u32 rx_fail; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xea4 = 0x%x, 0xeac = 0x%x\n", + rtw_read32(rtwdev, REG_IQK_RES_RX), + rtw_read32(rtwdev, REG_IQK_RES_RY)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xea0(before IQK) = 0x%x, 0xea8(after IQK) = 0x%x\n", + rtw_read32(rtwdev, 0xea0), + rtw_read32(rtwdev, 0xea8)); + + rx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_RX_FAIL); + rx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX); + rx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY); + rx_y = abs(iqkxy_to_s32(rx_y)); + + if (!rx_fail && rx_x != IQK_RX_X_ERR && rx_y != IQK_RX_Y_ERR && + rx_x < IQK_RX_X_UPPER && rx_x > IQK_RX_X_LOWER && + rx_y < IQK_RX_Y_LMT) + return IQK_RX_OK; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A RX IQK failed\n"); + + return 0; +} + +static u8 rtw8703b_iqk_tx_path(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + u8 status; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK!\n"); + + /* IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00); + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000); + rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000); + rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000); + + /* LO calibration setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x00462911); + + /* leave IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000); + + /* PA, PAD setting */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x7); + rtw_write_rf(rtwdev, RF_PATH_A, 0x7f, RFREG_MASK, 0xd400); + + rtw8703b_iqk_one_shot(rtwdev, true); + status = rtw8703b_iqk_check_tx_failed(rtwdev); + + rtw8703b_iqk_txrx_path_post(rtwdev, backup); + + return status; +} + +static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + u8 status; + u32 tx_x, tx_y; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 1!\n"); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK1 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); + rtw_write32(rtwdev, REG_BB_SEL_BTG, 0x99000000); + + /* disable IQC mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + + /* IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00); + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + + /* path IQK setting */ + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000); + rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000); + rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000); + + /* LOK setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a911); + + /* RX IQK mode */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000); + rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007); + rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0x57db7); + + rtw8703b_iqk_one_shot(rtwdev, true); + /* leave IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000); + status = rtw8703b_iqk_check_tx_failed(rtwdev); + + if (!status) + goto restore; + + /* second round */ + tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX); + tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY); + + rtw_write32(rtwdev, REG_TXIQK_11N, BIT_SET_TXIQK_11N(tx_x, tx_y)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe40 = 0x%x u4tmp = 0x%x\n", + rtw_read32(rtwdev, REG_TXIQK_11N), + BIT_SET_TXIQK_11N(tx_x, tx_y)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 2!\n"); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK 2 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); + + /* IQK setting */ + rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800); + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x38008c1c); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x18008c1c); + rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c); + rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82110000); + rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160c1f); + rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000); + rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000); + + /* LO calibration setting */ + rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a8d1); + + /* leave IQK mode */ + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000); + /* modify RX IQK mode table */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1); + /* RF_RCK_OS, RF_TXPA_G1, RF_TXPA_G2 */ + rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000); + rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007); + rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0xf7d77); + + /* PA, PAD setting */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x5); + + rtw8703b_iqk_one_shot(rtwdev, false); + status |= rtw8703b_iqk_check_rx_failed(rtwdev); + +restore: + rtw8703b_iqk_txrx_path_post(rtwdev, backup); + + return status; +} + +static +void rtw8703b_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t, + const struct rtw8723x_iqk_backup_regs *backup) +{ + u32 i; + u8 a_ok; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t); + + rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8703B); + rtw8703b_iqk_config_mac(rtwdev, backup); + rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf); + rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05600); + rtw_write32(rtwdev, REG_TRMUX_11N, 0x000800e4); + rtw_write32(rtwdev, REG_BB_PWR_SAV1_11N, 0x25204000); + + for (i = 0; i < PATH_IQK_RETRY; i++) { + a_ok = rtw8703b_iqk_tx_path(rtwdev, backup); + if (a_ok == IQK_TX_OK) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path A TX IQK success!\n"); + result[t][IQK_S1_TX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TX, + BIT_MASK_RES_TX); + result[t][IQK_S1_TX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_TY, + BIT_MASK_RES_TY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK fail!\n"); + result[t][IQK_S1_TX_X] = 0x100; + result[t][IQK_S1_TX_Y] = 0x0; + } + + for (i = 0; i < PATH_IQK_RETRY; i++) { + a_ok = rtw8703b_iqk_rx_path(rtwdev, backup); + if (a_ok == (IQK_TX_OK | IQK_RX_OK)) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] path A RX IQK success!\n"); + result[t][IQK_S1_RX_X] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RX, + BIT_MASK_RES_RX); + result[t][IQK_S1_RX_Y] = + rtw_read32_mask(rtwdev, REG_IQK_RES_RY, + BIT_MASK_RES_RY); + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK fail!\n"); + result[t][IQK_S1_RX_X] = 0x100; + result[t][IQK_S1_RX_Y] = 0x0; + } + + if (a_ok == 0x0) + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A IQK fail!\n"); + + rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK); + mdelay(1); +} + +static +void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[]) +{ + u32 tmp_rx_iqi = 0x40000100 & GENMASK(31, 16); + s32 tx1_a, tx1_a_ext; + s32 tx1_c, tx1_c_ext; + s32 oldval_1; + s32 x, y; + + if (result[IQK_S1_TX_X] == 0) + return; + + oldval_1 = rtw_read32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_D); + + x = iqkxy_to_s32(result[IQK_S1_TX_X]); + tx1_a = iqk_mult(x, oldval_1, &tx1_a_ext); + rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_A, tx1_a); + rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, + BIT_MASK_OFDM0_EXT_A, tx1_a_ext); + + y = iqkxy_to_s32(result[IQK_S1_TX_Y]); + tx1_c = iqk_mult(y, oldval_1, &tx1_c_ext); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + BIT_SET_TXIQ_ELM_C1(tx1_c)); + rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, + BIT_MASK_TXIQ_ELM_C, BIT_SET_TXIQ_ELM_C2(tx1_c)); + rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, + BIT_MASK_OFDM0_EXT_C, tx1_c_ext); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] X = 0x%x, TX1_A = 0x%x, oldval_1 0x%x\n", + x, tx1_a, oldval_1); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] Y = 0x%x, TX1_C = 0x%x\n", y, tx1_c); + + if (result[IQK_S1_RX_X] == 0) + return; + + tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]); + tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]); + rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi); + rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2, + BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y])); +} + +static void rtw8703b_phy_calibration(struct rtw_dev *rtwdev) +{ + /* For some reason path A is called S1 and B S0 in shared + * rtw88 calibration data. + */ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw8723x_iqk_backup_regs backup; + u8 final_candidate = IQK_ROUND_INVALID; + s32 result[IQK_ROUND_SIZE][IQK_NR]; + bool good; + u8 i, j; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Start!\n"); + + memset(result, 0, sizeof(result)); + + rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup); + rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup); + rtw8723x_iqk_backup_regs(rtwdev, &backup); + + for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) { + rtw8723x_iqk_config_path_ctrl(rtwdev); + rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B); + + rtw8703b_iqk_one_round(rtwdev, result, i, &backup); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] back to BB mode, load original values!\n"); + if (i > IQK_ROUND_0) + rtw8723x_iqk_restore_regs(rtwdev, &backup); + rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup); + rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup); + + for (j = IQK_ROUND_0; j < i; j++) { + good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i); + + if (good) { + final_candidate = j; + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] cmp %d:%d final_candidate is %x\n", + j, i, final_candidate); + goto iqk_done; + } + } + } + + if (final_candidate == IQK_ROUND_INVALID) { + s32 reg_tmp = 0; + + for (i = 0; i < IQK_NR; i++) + reg_tmp += result[IQK_ROUND_HYBRID][i]; + + if (reg_tmp != 0) { + final_candidate = IQK_ROUND_HYBRID; + } else { + WARN(1, "IQK failed\n"); + goto out; + } + } + +iqk_done: + /* only path A is calibrated in rtl8703b */ + rtw8703b_iqk_fill_a_matrix(rtwdev, result[final_candidate]); + + dm_info->iqk.result.s1_x = result[final_candidate][IQK_S1_TX_X]; + dm_info->iqk.result.s1_y = result[final_candidate][IQK_S1_TX_Y]; + dm_info->iqk.result.s0_x = result[final_candidate][IQK_S0_TX_X]; + dm_info->iqk.result.s0_y = result[final_candidate][IQK_S0_TX_Y]; + dm_info->iqk.done = true; + +out: + rtw_write32(rtwdev, REG_BB_SEL_BTG, backup.bb_sel_btg); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] final_candidate is %x\n", + final_candidate); + + for (i = IQK_ROUND_0; i < IQK_ROUND_SIZE; i++) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] Result %u: rege94_s1=%x rege9c_s1=%x regea4_s1=%x regeac_s1=%x rege94_s0=%x rege9c_s0=%x regea4_s0=%x regeac_s0=%x %s\n", + i, + result[i][0], result[i][1], result[i][2], result[i][3], + result[i][4], result[i][5], result[i][6], result[i][7], + final_candidate == i ? "(final candidate)" : ""); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xc80 = 0x%x 0xc94 = 0x%x 0xc14 = 0x%x 0xca0 = 0x%x\n", + rtw_read32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE), + rtw_read32(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N), + rtw_read32(rtwdev, REG_A_RXIQI), + rtw_read32(rtwdev, REG_RXIQK_MATRIX_LSB_11N)); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "[IQK] 0xcd0 = 0x%x 0xcd4 = 0x%x 0xcd8 = 0x%x\n", + rtw_read32(rtwdev, REG_TXIQ_AB_S0), + rtw_read32(rtwdev, REG_TXIQ_CD_S0), + rtw_read32(rtwdev, REG_RXIQ_AB_S0)); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Finished.\n"); +} + +static void rtw8703b_set_iqk_matrix_by_result(struct rtw_dev *rtwdev, + u32 ofdm_swing, u8 rf_path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s32 ele_A, ele_D, ele_C; + s32 ele_A_ext, ele_C_ext, ele_D_ext; + s32 iqk_result_x; + s32 iqk_result_y; + s32 value32; + + switch (rf_path) { + default: + case RF_PATH_A: + iqk_result_x = dm_info->iqk.result.s1_x; + iqk_result_y = dm_info->iqk.result.s1_y; + break; + case RF_PATH_B: + iqk_result_x = dm_info->iqk.result.s0_x; + iqk_result_y = dm_info->iqk.result.s0_y; + break; + } + + /* new element D */ + ele_D = OFDM_SWING_D(ofdm_swing); + iqk_mult(iqk_result_x, ele_D, &ele_D_ext); + /* new element A */ + iqk_result_x = iqkxy_to_s32(iqk_result_x); + ele_A = iqk_mult(iqk_result_x, ele_D, &ele_A_ext); + /* new element C */ + iqk_result_y = iqkxy_to_s32(iqk_result_y); + ele_C = iqk_mult(iqk_result_y, ele_D, &ele_C_ext); + + switch (rf_path) { + case RF_PATH_A: + default: + /* write new elements A, C, D, and element B is always 0 */ + value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D); + rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, value32); + value32 = BIT_SET_TXIQ_ELM_C1(ele_C); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + value32); + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS; + value32 |= BIT_SET_OFDM0_EXTS(ele_A_ext, ele_C_ext, ele_D_ext); + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + + case RF_PATH_B: + /* write new elements A, C, D, and element B is always 0 */ + value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D); + rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, value32); + value32 = BIT_SET_TXIQ_ELM_C1(ele_C); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS, + value32); + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS_B; + value32 |= BIT_SET_OFDM0_EXTS_B(ele_A_ext, ele_C_ext, ele_D_ext); + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + } +} + +static void rtw8703b_set_iqk_matrix(struct rtw_dev *rtwdev, s8 ofdm_index, + u8 rf_path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s32 value32; + u32 ofdm_swing; + + ofdm_index = clamp_t(s8, ofdm_index, 0, RTW_OFDM_SWING_TABLE_SIZE - 1); + + ofdm_swing = rtw8703b_ofdm_swing_table[ofdm_index]; + + if (dm_info->iqk.done) { + rtw8703b_set_iqk_matrix_by_result(rtwdev, ofdm_swing, rf_path); + return; + } + + switch (rf_path) { + case RF_PATH_A: + default: + rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, ofdm_swing); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS, + 0x00); + + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS; + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + + case RF_PATH_B: + rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, ofdm_swing); + rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS, + 0x00); + + value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD); + value32 &= ~BIT_MASK_OFDM0_EXTS_B; + rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32); + break; + } +} + +static void rtw8703b_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx, + s8 txagc_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->txagc_remnant_ofdm = txagc_idx; + + /* Only path A is calibrated for rtl8703b */ + rtw8703b_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A); +} + +static void rtw8703b_pwrtrack_set_cck_pwr(struct rtw_dev *rtwdev, s8 swing_idx, + s8 txagc_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + dm_info->txagc_remnant_cck = txagc_idx; + + swing_idx = clamp_t(s8, swing_idx, 0, RTW_CCK_SWING_TABLE_SIZE - 1); + + BUILD_BUG_ON(ARRAY_SIZE(rtw8703b_cck_pwr_regs) + != ARRAY_SIZE(rtw8703b_cck_swing_table[0])); + + for (int i = 0; i < ARRAY_SIZE(rtw8703b_cck_pwr_regs); i++) + rtw_write8(rtwdev, rtw8703b_cck_pwr_regs[i], + rtw8703b_cck_swing_table[swing_idx][i]); +} + +static void rtw8703b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_hal *hal = &rtwdev->hal; + u8 limit_ofdm; + u8 limit_cck = 21; + s8 final_ofdm_swing_index; + s8 final_cck_swing_index; + + limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev); + + final_ofdm_swing_index = dm_info->default_ofdm_index + + dm_info->delta_power_index[path]; + final_cck_swing_index = dm_info->default_cck_index + + dm_info->delta_power_index[path]; + + if (final_ofdm_swing_index > limit_ofdm) + rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, limit_ofdm, + final_ofdm_swing_index - limit_ofdm); + else if (final_ofdm_swing_index < 0) + rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, 0, + final_ofdm_swing_index); + else + rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, final_ofdm_swing_index, 0); + + if (final_cck_swing_index > limit_cck) + rtw8703b_pwrtrack_set_cck_pwr(rtwdev, limit_cck, + final_cck_swing_index - limit_cck); + else if (final_cck_swing_index < 0) + rtw8703b_pwrtrack_set_cck_pwr(rtwdev, 0, + final_cck_swing_index); + else + rtw8703b_pwrtrack_set_cck_pwr(rtwdev, final_cck_swing_index, 0); + + rtw_phy_set_tx_power_level(rtwdev, hal->current_channel); +} + +static void rtw8703b_phy_pwrtrack(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_swing_table swing_table; + u8 thermal_value, delta, path; + bool do_iqk = false; + + rtw_phy_config_swing_table(rtwdev, &swing_table); + + if (rtwdev->efuse.thermal_meter[0] == 0xff) + return; + + thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00); + + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A); + + do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev); + + if (do_iqk) + rtw8723x_lck(rtwdev); + + if (dm_info->pwr_trk_init_trigger) + dm_info->pwr_trk_init_trigger = false; + else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value, + RF_PATH_A)) + goto iqk; + + delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A); + + delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1); + + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + s8 delta_cur, delta_last; + + delta_last = dm_info->delta_power_index[path]; + delta_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table, + path, RF_PATH_A, delta); + if (delta_last == delta_cur) + continue; + + dm_info->delta_power_index[path] = delta_cur; + rtw8703b_pwrtrack_set(rtwdev, path); + } + + rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta); + +iqk: + if (do_iqk) + rtw8703b_phy_calibration(rtwdev); +} + +static void rtw8703b_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (efuse->power_track_type != 0) { + rtw_warn(rtwdev, "unsupported power track type"); + return; + } + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw8703b_phy_pwrtrack(rtwdev); + dm_info->pwr_trk_triggered = false; +} + +static void rtw8703b_coex_set_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8703b_coex_set_gnt_debug(struct rtw_dev *rtwdev) +{ +} + +static void rtw8703b_coex_set_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + + coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option; + coex_rfe->ant_switch_polarity = 0; + coex_rfe->ant_switch_exist = false; + coex_rfe->ant_switch_with_bt = false; + coex_rfe->ant_switch_diversity = false; + coex_rfe->wlg_at_btg = true; + + /* disable LTE coex on wifi side */ + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0); + rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff); + rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff); +} + +static void rtw8703b_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ +} + +static void rtw8703b_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ +} + +static const u8 rtw8703b_pwrtrk_2gb_n[] = { + 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, + 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11 +}; + +static const u8 rtw8703b_pwrtrk_2gb_p[] = { + 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15 +}; + +static const u8 rtw8703b_pwrtrk_2ga_n[] = { + 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, + 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11 +}; + +static const u8 rtw8703b_pwrtrk_2ga_p[] = { + 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15 +}; + +static const u8 rtw8703b_pwrtrk_2g_cck_b_n[] = { + 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, + 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11 +}; + +static const u8 rtw8703b_pwrtrk_2g_cck_b_p[] = { + 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6, + 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13 +}; + +static const u8 rtw8703b_pwrtrk_2g_cck_a_n[] = { + 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, + 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11 +}; + +static const u8 rtw8703b_pwrtrk_2g_cck_a_p[] = { + 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6, + 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13 +}; + +static const s8 rtw8703b_pwrtrk_xtal_n[] = { + 0, 0, 0, -1, -1, -1, -1, -2, -2, -2, -3, -3, -3, -3, -3, + -4, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1 +}; + +static const s8 rtw8703b_pwrtrk_xtal_p[] = { + 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 1, 0, -1, -1, -1, + -2, -3, -7, -9, -10, -11, -14, -16, -18, -20, -22, -24, -26, -28, -30 +}; + +static const struct rtw_pwr_track_tbl rtw8703b_rtw_pwr_track_tbl = { + .pwrtrk_2gb_n = rtw8703b_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8703b_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8703b_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8703b_pwrtrk_2ga_p, + .pwrtrk_2g_cckb_n = rtw8703b_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8703b_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8703b_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8703b_pwrtrk_2g_cck_a_p, + .pwrtrk_xtal_n = rtw8703b_pwrtrk_xtal_n, + .pwrtrk_xtal_p = rtw8703b_pwrtrk_xtal_p, +}; + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8703b[] = { + {0xffffffff, 0xffffffff}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0x6a5a5555, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x5a5a5aaa}, + {0x66555555, 0x6aaa5aaa}, + {0x66555555, 0xaaaa5aaa}, + {0x66555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x55555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa}, +}; + +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8703b[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x3a, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x08, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x20, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x0c, 0x03, 0x10, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} }, +}; + +static struct rtw_chip_ops rtw8703b_ops = { + .mac_init = rtw8723x_mac_init, + .dump_fw_crash = NULL, + .shutdown = NULL, + .read_efuse = rtw8703b_read_efuse, + .phy_set_param = rtw8703b_phy_set_param, + .set_channel = rtw8703b_set_channel, + .query_rx_desc = rtw8703b_query_rx_desc, + .read_rf = rtw_phy_read_rf_sipi, + .write_rf = rtw_phy_write_rf_reg_sipi, + .set_tx_power_index = rtw8723x_set_tx_power_index, + .set_antenna = NULL, + .cfg_ldo25 = rtw8723x_cfg_ldo25, + .efuse_grant = rtw8723x_efuse_grant, + .false_alarm_statistics = rtw8723x_false_alarm_statistics, + .phy_calibration = rtw8703b_phy_calibration, + .dpk_track = NULL, + /* 8723d uses REG_CSRATIO to set dm_info.cck_pd_default, which + * is used in its cck_pd_set function. According to comments + * in the vendor driver code it doesn't exist in this chip + * generation, only 0xa0a ("ODM_CCK_PD_THRESH", which is only + * *written* to). + */ + .cck_pd_set = NULL, + .pwr_track = rtw8703b_pwr_track, + .config_bfee = NULL, + .set_gid_table = NULL, + .cfg_csi_rate = NULL, + .adaptivity_init = NULL, + .adaptivity = NULL, + .cfo_init = NULL, + .cfo_track = NULL, + .config_tx_path = NULL, + .config_txrx_mode = NULL, + .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum, + + /* for coex */ + .coex_set_init = rtw8723x_coex_cfg_init, + .coex_set_ant_switch = NULL, + .coex_set_gnt_fix = rtw8703b_coex_set_gnt_fix, + .coex_set_gnt_debug = rtw8703b_coex_set_gnt_debug, + .coex_set_rfe_type = rtw8703b_coex_set_rfe_type, + .coex_set_wl_tx_power = rtw8703b_coex_set_wl_tx_power, + .coex_set_wl_rx_gain = rtw8703b_coex_set_wl_rx_gain, +}; + +const struct rtw_chip_info rtw8703b_hw_spec = { + .ops = &rtw8703b_ops, + .id = RTW_CHIP_TYPE_8703B, + + .fw_name = "rtw88/rtw8703b_fw.bin", + .wlan_cpu = RTW_WCPU_11N, + .tx_pkt_desc_sz = 40, + .tx_buf_desc_sz = 16, + .rx_pkt_desc_sz = 24, + .rx_buf_desc_sz = 8, + .phy_efuse_size = 256, + .log_efuse_size = 512, + .ptct_efuse_size = 15, + .txff_size = 32768, + .rxff_size = 16384, + .rsvd_drv_pg_num = 8, + .band = RTW_BAND_2G, + .page_size = TX_PAGE_SIZE, + .csi_buf_pg_num = 0, + .dig_min = 0x20, + .txgi_factor = 1, + .is_pwr_by_rate_dec = true, + .rx_ldpc = false, + .tx_stbc = false, + .max_power_index = 0x3f, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + + .path_div_supported = false, + .ht_supported = true, + .vht_supported = false, + .lps_deep_mode_supported = 0, + + .sys_func_en = 0xFD, + .pwr_on_seq = card_enable_flow_8703b, + .pwr_off_seq = card_disable_flow_8703b, + .rqpn_table = rqpn_table_8703b, + .prioq_addrs = &rtw8723x_common.prioq_addrs, + .page_table = page_table_8703b, + /* used only in pci.c, not needed for SDIO devices */ + .intf_table = NULL, + + .dig = rtw8723x_common.dig, + .dig_cck = rtw8723x_common.dig_cck, + + .rf_sipi_addr = {0x840, 0x844}, + .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr, + .fix_rf_phy_num = 2, + .ltecoex_addr = &rtw8723x_common.ltecoex_addr, + + .mac_tbl = &rtw8703b_mac_tbl, + .agc_tbl = &rtw8703b_agc_tbl, + .bb_tbl = &rtw8703b_bb_tbl, + .rf_tbl = {&rtw8703b_rf_a_tbl}, + + .rfe_defs = rtw8703b_rfe_defs, + .rfe_defs_size = ARRAY_SIZE(rtw8703b_rfe_defs), + + .iqk_threshold = 8, + .pwr_track_tbl = &rtw8703b_rtw_pwr_track_tbl, + + /* WOWLAN firmware exists, but not implemented yet */ + .wow_fw_name = "rtw88/rtw8703b_wow_fw.bin", + .wowlan_stub = NULL, + .max_scan_ie_len = IEEE80211_MAX_DATA_LEN, + + /* Vendor driver has a time-based format, converted from + * 20180330 + */ + .coex_para_ver = 0x0133ed6a, + .bt_desired_ver = 0x1c, + .scbd_support = true, + .new_scbd10_def = true, + .ble_hid_profile_support = false, + .wl_mimo_ps_support = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .bt_rssi_step = bt_rssi_step_8703b, + .wl_rssi_step = wl_rssi_step_8703b, + /* sant -> shared antenna, nsant -> non-shared antenna + * Not sure if 8703b versions with non-shard antenna even exist. + */ + .table_sant_num = ARRAY_SIZE(table_sant_8703b), + .table_sant = table_sant_8703b, + .table_nsant_num = 0, + .table_nsant = NULL, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8703b), + .tdma_sant = tdma_sant_8703b, + .tdma_nsant_num = 0, + .tdma_nsant = NULL, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8703b), + .wl_rf_para_tx = rf_para_tx_8703b, + .wl_rf_para_rx = rf_para_rx_8703b, + .bt_afh_span_bw20 = 0x20, + .bt_afh_span_bw40 = 0x30, + .afh_5g_num = ARRAY_SIZE(afh_5g_8703b), + .afh_5g = afh_5g_8703b, + /* REG_BTG_SEL doesn't seem to have a counterpart in the + * vendor driver. Mathematically it's REG_PAD_CTRL1 + 3. + * + * It is used in the cardemu_to_act power sequence by though + * (by address, 0x0067), comment: "0x67[0] = 0 to disable + * BT_GPS_SEL pins" That seems to fit. + */ + .btg_reg = NULL, + /* These registers are used to read (and print) from if + * CONFIG_RTW88_DEBUGFS is enabled. + */ + .coex_info_hw_regs_num = 0, + .coex_info_hw_regs = NULL, +}; +EXPORT_SYMBOL(rtw8703b_hw_spec); + +MODULE_FIRMWARE("rtw88/rtw8703b_fw.bin"); +MODULE_FIRMWARE("rtw88/rtw8703b_wow_fw.bin"); + +MODULE_AUTHOR("Fiona Klute <[email protected]>"); +MODULE_DESCRIPTION("Realtek 802.11n wireless 8703b driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.h b/drivers/net/wireless/realtek/rtw88/rtw8703b.h new file mode 100644 index 000000000000..3e2da2e6739d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright Fiona Klute <[email protected]> */ + +#ifndef __RTW8703B_H__ +#define __RTW8703B_H__ + +#include "rtw8723x.h" + +extern const struct rtw_chip_info rtw8703b_hw_spec; + +/* phy status parsing */ +#define VGA_BITS GENMASK(4, 0) +#define LNA_L_BITS GENMASK(7, 5) +#define LNA_H_BIT BIT(7) +/* masks for assembling LNA index from high and low bits */ +#define BIT_LNA_H_MASK BIT(3) +#define BIT_LNA_L_MASK GENMASK(2, 0) + +struct phy_rx_agc_info { +#ifdef __LITTLE_ENDIAN + u8 gain: 7; + u8 trsw: 1; +#else + u8 trsw: 1; + u8 gain: 7; +#endif +} __packed; + +/* This struct is called phy_status_rpt_8192cd in the vendor driver, + * there might be potential to share it with drivers for other chips + * of the same generation. + */ +struct phy_status_8703b { + struct phy_rx_agc_info path_agc[2]; + u8 ch_corr[2]; + u8 cck_sig_qual_ofdm_pwdb_all; + /* for CCK: bits 0:4: VGA index, bits 5:7: LNA index (low) */ + u8 cck_agc_rpt_ofdm_cfosho_a; + /* for CCK: bit 7 is high bit of LNA index if long report type */ + u8 cck_rpt_b_ofdm_cfosho_b; + u8 reserved_1; + u8 noise_power_db_msb; + s8 path_cfotail[2]; + u8 pcts_mask[2]; + s8 stream_rxevm[2]; + u8 path_rxsnr[2]; + u8 noise_power_db_lsb; + u8 reserved_2[3]; + u8 stream_csi[2]; + u8 stream_target_csi[2]; + s8 sig_evm; + u8 reserved_3; + +#ifdef __LITTLE_ENDIAN + u8 antsel_rx_keep_2: 1; + u8 sgi_en: 1; + u8 rxsc: 2; + u8 idle_long: 1; + u8 r_ant_train_en: 1; + u8 ant_sel_b: 1; + u8 ant_sel: 1; +#else /* __BIG_ENDIAN */ + u8 ant_sel: 1; + u8 ant_sel_b: 1; + u8 r_ant_train_en: 1; + u8 idle_long: 1; + u8 rxsc: 2; + u8 sgi_en: 1; + u8 antsel_rx_keep_2: 1; +#endif +} __packed; + +/* Baseband registers */ +#define REG_BB_PWR_SAV5_11N 0x0818 +/* BIT(11) should be 1 for 8703B *and* 8723D, which means LNA uses 4 + * bit for CCK rates in report, not 3. Vendor driver logs a warning if + * it's 0, but handles the case. + * + * Purpose of other parts of this register is unknown, 8723cs driver + * code indicates some other chips use certain bits for antenna + * diversity. + */ +#define REG_BB_AMP 0x0950 +#define BIT_MASK_RX_LNA (BIT(11)) + +/* 0xaXX: 40MHz channel settings */ +#define REG_CCK_TXSF2 0x0a24 /* CCK TX filter 2 */ +#define REG_CCK_DBG 0x0a28 /* debug port */ +#define REG_OFDM0_A_TX_AFE 0x0c84 +#define REG_TXIQK_MATRIXB_LSB2_11N 0x0c9c +#define REG_OFDM0_TX_PSD_NOISE 0x0ce4 /* TX pseudo noise weighting */ +#define REG_IQK_RDY 0x0e90 /* is != 0 when IQK is done */ + +/* RF registers */ +#define RF_RCK1 0x1E + +#define AGG_BURST_NUM 3 +#define AGG_BURST_SIZE 0 /* 1K */ +#define BIT_MASK_AGG_BURST_NUM (GENMASK(3, 2)) +#define BIT_MASK_AGG_BURST_SIZE (GENMASK(5, 4)) + +#endif /* __RTW8703B_H__ */ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c new file mode 100644 index 000000000000..81020fd907aa --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c @@ -0,0 +1,902 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright Fiona Klute <[email protected]> */ + +#include "main.h" +#include "phy.h" +#include "rtw8703b_tables.h" + +static const struct rtw_phy_pg_cfg_pair rtw8703b_bb_pg[] = { + { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, }, + { 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, }, + { 0, 0, 0, 0x00000e00, 0xffffffff, 0x34363636, }, + { 0, 0, 0, 0x00000e04, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000e10, 0xffffffff, 0x30343434, }, + { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26262830, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8703b_bb_pg); + +/* Regd: FCC -> 0, ETSI -> 2, MKK -> 1 + * Band: 2.4G -> 0, 5G -> 1 + * Bandwidth (bw): 20M -> 0, 40M -> 1, 80M -> 2, 160M -> 3 + * Rate Section (rs): CCK -> 0, OFDM -> 1, HT -> 2, VHT -> 3 + */ +static const struct rtw_txpwr_lmt_cfg_pair rtw8703b_txpwr_lmt[] = { + {0, 0, 0, 0, 1, 30}, + {2, 0, 0, 0, 1, 26}, + {1, 0, 0, 0, 1, 32}, + {0, 0, 0, 0, 2, 30}, + {2, 0, 0, 0, 2, 26}, + {1, 0, 0, 0, 2, 32}, + {0, 0, 0, 0, 3, 30}, + {2, 0, 0, 0, 3, 26}, + {1, 0, 0, 0, 3, 32}, + {0, 0, 0, 0, 4, 30}, + {2, 0, 0, 0, 4, 26}, + {1, 0, 0, 0, 4, 32}, + {0, 0, 0, 0, 5, 30}, + {2, 0, 0, 0, 5, 26}, + {1, 0, 0, 0, 5, 32}, + {0, 0, 0, 0, 6, 30}, + {2, 0, 0, 0, 6, 26}, + {1, 0, 0, 0, 6, 32}, + {0, 0, 0, 0, 7, 30}, + {2, 0, 0, 0, 7, 26}, + {1, 0, 0, 0, 7, 32}, + {0, 0, 0, 0, 8, 30}, + {2, 0, 0, 0, 8, 26}, + {1, 0, 0, 0, 8, 32}, + {0, 0, 0, 0, 9, 30}, + {2, 0, 0, 0, 9, 26}, + {1, 0, 0, 0, 9, 32}, + {0, 0, 0, 0, 10, 30}, + {2, 0, 0, 0, 10, 26}, + {1, 0, 0, 0, 10, 32}, + {0, 0, 0, 0, 11, 30}, + {2, 0, 0, 0, 11, 26}, + {1, 0, 0, 0, 11, 32}, + {0, 0, 0, 0, 12, 63}, + {2, 0, 0, 0, 12, 26}, + {1, 0, 0, 0, 12, 32}, + {0, 0, 0, 0, 13, 63}, + {2, 0, 0, 0, 13, 26}, + {1, 0, 0, 0, 13, 32}, + {0, 0, 0, 0, 14, 63}, + {2, 0, 0, 0, 14, 63}, + {1, 0, 0, 0, 14, 32}, + {0, 0, 0, 1, 1, 28}, + {2, 0, 0, 1, 1, 28}, + {1, 0, 0, 1, 1, 28}, + {0, 0, 0, 1, 2, 28}, + {2, 0, 0, 1, 2, 32}, + {1, 0, 0, 1, 2, 32}, + {0, 0, 0, 1, 3, 32}, + {2, 0, 0, 1, 3, 32}, + {1, 0, 0, 1, 3, 32}, + {0, 0, 0, 1, 4, 32}, + {2, 0, 0, 1, 4, 32}, + {1, 0, 0, 1, 4, 32}, + {0, 0, 0, 1, 5, 32}, + {2, 0, 0, 1, 5, 32}, + {1, 0, 0, 1, 5, 32}, + {0, 0, 0, 1, 6, 32}, + {2, 0, 0, 1, 6, 32}, + {1, 0, 0, 1, 6, 32}, + {0, 0, 0, 1, 7, 32}, + {2, 0, 0, 1, 7, 32}, + {1, 0, 0, 1, 7, 32}, + {0, 0, 0, 1, 8, 32}, + {2, 0, 0, 1, 8, 32}, + {1, 0, 0, 1, 8, 32}, + {0, 0, 0, 1, 9, 32}, + {2, 0, 0, 1, 9, 32}, + {1, 0, 0, 1, 9, 32}, + {0, 0, 0, 1, 10, 28}, + {2, 0, 0, 1, 10, 32}, + {1, 0, 0, 1, 10, 32}, + {0, 0, 0, 1, 11, 28}, + {2, 0, 0, 1, 11, 32}, + {1, 0, 0, 1, 11, 32}, + {0, 0, 0, 1, 12, 63}, + {2, 0, 0, 1, 12, 32}, + {1, 0, 0, 1, 12, 32}, + {0, 0, 0, 1, 13, 63}, + {2, 0, 0, 1, 13, 28}, + {1, 0, 0, 1, 13, 28}, + {0, 0, 0, 1, 14, 63}, + {2, 0, 0, 1, 14, 63}, + {1, 0, 0, 1, 14, 63}, + {0, 0, 0, 2, 1, 26}, + {2, 0, 0, 2, 1, 26}, + {1, 0, 0, 2, 1, 28}, + {0, 0, 0, 2, 2, 26}, + {2, 0, 0, 2, 2, 32}, + {1, 0, 0, 2, 2, 32}, + {0, 0, 0, 2, 3, 32}, + {2, 0, 0, 2, 3, 32}, + {1, 0, 0, 2, 3, 32}, + {0, 0, 0, 2, 4, 32}, + {2, 0, 0, 2, 4, 32}, + {1, 0, 0, 2, 4, 32}, + {0, 0, 0, 2, 5, 32}, + {2, 0, 0, 2, 5, 32}, + {1, 0, 0, 2, 5, 32}, + {0, 0, 0, 2, 6, 32}, + {2, 0, 0, 2, 6, 32}, + {1, 0, 0, 2, 6, 32}, + {0, 0, 0, 2, 7, 32}, + {2, 0, 0, 2, 7, 32}, + {1, 0, 0, 2, 7, 32}, + {0, 0, 0, 2, 8, 32}, + {2, 0, 0, 2, 8, 32}, + {1, 0, 0, 2, 8, 32}, + {0, 0, 0, 2, 9, 32}, + {2, 0, 0, 2, 9, 32}, + {1, 0, 0, 2, 9, 32}, + {0, 0, 0, 2, 10, 26}, + {2, 0, 0, 2, 10, 32}, + {1, 0, 0, 2, 10, 32}, + {0, 0, 0, 2, 11, 26}, + {2, 0, 0, 2, 11, 32}, + {1, 0, 0, 2, 11, 32}, + {0, 0, 0, 2, 12, 63}, + {2, 0, 0, 2, 12, 32}, + {1, 0, 0, 2, 12, 32}, + {0, 0, 0, 2, 13, 63}, + {2, 0, 0, 2, 13, 26}, + {1, 0, 0, 2, 13, 28}, + {0, 0, 0, 2, 14, 63}, + {2, 0, 0, 2, 14, 63}, + {1, 0, 0, 2, 14, 63}, + {0, 0, 1, 2, 1, 63}, + {2, 0, 1, 2, 1, 63}, + {1, 0, 1, 2, 1, 63}, + {0, 0, 1, 2, 2, 63}, + {2, 0, 1, 2, 2, 63}, + {1, 0, 1, 2, 2, 63}, + {0, 0, 1, 2, 3, 26}, + {2, 0, 1, 2, 3, 26}, + {1, 0, 1, 2, 3, 26}, + {0, 0, 1, 2, 4, 26}, + {2, 0, 1, 2, 4, 28}, + {1, 0, 1, 2, 4, 26}, + {0, 0, 1, 2, 5, 28}, + {2, 0, 1, 2, 5, 28}, + {1, 0, 1, 2, 5, 26}, + {0, 0, 1, 2, 6, 28}, + {2, 0, 1, 2, 6, 28}, + {1, 0, 1, 2, 6, 26}, + {0, 0, 1, 2, 7, 28}, + {2, 0, 1, 2, 7, 28}, + {1, 0, 1, 2, 7, 26}, + {0, 0, 1, 2, 8, 26}, + {2, 0, 1, 2, 8, 28}, + {1, 0, 1, 2, 8, 26}, + {0, 0, 1, 2, 9, 26}, + {2, 0, 1, 2, 9, 28}, + {1, 0, 1, 2, 9, 26}, + {0, 0, 1, 2, 10, 26}, + {2, 0, 1, 2, 10, 28}, + {1, 0, 1, 2, 10, 26}, + {0, 0, 1, 2, 11, 26}, + {2, 0, 1, 2, 11, 26}, + {1, 0, 1, 2, 11, 26}, + {0, 0, 1, 2, 12, 63}, + {2, 0, 1, 2, 12, 26}, + {1, 0, 1, 2, 12, 26}, + {0, 0, 1, 2, 13, 63}, + {2, 0, 1, 2, 13, 26}, + {1, 0, 1, 2, 13, 26}, + {0, 0, 1, 2, 14, 63}, + {2, 0, 1, 2, 14, 63}, + {1, 0, 1, 2, 14, 63}, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8703b_txpwr_lmt); + +static const u32 rtw8703b_mac[] = { + 0x02F, 0x00000030, + 0x035, 0x00000000, + 0x067, 0x00000002, + 0x092, 0x00000080, + 0x421, 0x0000000F, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000002, + 0x435, 0x00000003, + 0x436, 0x00000005, + 0x437, 0x00000007, + 0x438, 0x00000000, + 0x439, 0x00000000, + 0x43A, 0x00000000, + 0x43B, 0x00000001, + 0x43C, 0x00000002, + 0x43D, 0x00000003, + 0x43E, 0x00000005, + 0x43F, 0x00000007, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000028, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000028, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000C, + 0x63F, 0x0000000C, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66A, 0x000000B0, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x765, 0x00000018, + 0x76E, 0x00000004, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8703b_mac, rtw_phy_cfg_mac); + +static const u32 rtw8703b_agc[] = { + 0xC78, 0xFC000101, + 0xC78, 0xFB010101, + 0xC78, 0xFA020101, + 0xC78, 0xF9030101, + 0xC78, 0xF8040101, + 0xC78, 0xF7050101, + 0xC78, 0xF6060101, + 0xC78, 0xF5070101, + 0xC78, 0xF4080101, + 0xC78, 0xF3090101, + 0xC78, 0xF20A0101, + 0xC78, 0xF10B0101, + 0xC78, 0xF00C0101, + 0xC78, 0xEF0D0101, + 0xC78, 0xEE0E0101, + 0xC78, 0xED0F0101, + 0xC78, 0xEC100101, + 0xC78, 0xEB110101, + 0xC78, 0xEA120101, + 0xC78, 0xE9130101, + 0xC78, 0xE8140101, + 0xC78, 0xE7150101, + 0xC78, 0xE6160101, + 0xC78, 0xE5170101, + 0xC78, 0xE4180101, + 0xC78, 0xE3190101, + 0xC78, 0x661A0101, + 0xC78, 0x651B0101, + 0xC78, 0x641C0101, + 0xC78, 0x631D0101, + 0xC78, 0x071E0101, + 0xC78, 0x061F0101, + 0xC78, 0x05200101, + 0xC78, 0x04210101, + 0xC78, 0x03220101, + 0xC78, 0xE8230001, + 0xC78, 0xE7240001, + 0xC78, 0xE6250001, + 0xC78, 0xE5260001, + 0xC78, 0xE4270001, + 0xC78, 0x89280001, + 0xC78, 0x88290001, + 0xC78, 0x872A0001, + 0xC78, 0x862B0001, + 0xC78, 0x852C0001, + 0xC78, 0x482D0001, + 0xC78, 0x472E0001, + 0xC78, 0x462F0001, + 0xC78, 0x45300001, + 0xC78, 0x44310001, + 0xC78, 0x07320001, + 0xC78, 0x06330001, + 0xC78, 0x05340001, + 0xC78, 0x04350001, + 0xC78, 0x03360001, + 0xC78, 0x02370001, + 0xC78, 0x01380001, + 0xC78, 0x00390001, + 0xC78, 0x003A0001, + 0xC78, 0x003B0001, + 0xC78, 0x003C0001, + 0xC78, 0x003D0001, + 0xC78, 0x003E0001, + 0xC78, 0x003F0001, + 0xC78, 0x7F002001, + 0xC78, 0x7F012001, + 0xC78, 0x7F022001, + 0xC78, 0x7F032001, + 0xC78, 0x7F042001, + 0xC78, 0x7F052001, + 0xC78, 0x7F062001, + 0xC78, 0x7F072001, + 0xC78, 0x7F082001, + 0xC78, 0x7F092001, + 0xC78, 0x7F0A2001, + 0xC78, 0x7F0B2001, + 0xC78, 0x7F0C2001, + 0xC78, 0x7F0D2001, + 0xC78, 0x7F0E2001, + 0xC78, 0x7F0F2001, + 0xC78, 0x7F102001, + 0xC78, 0x7F112001, + 0xC78, 0x7E122001, + 0xC78, 0x7D132001, + 0xC78, 0x7C142001, + 0xC78, 0x7B152001, + 0xC78, 0x7A162001, + 0xC78, 0x79172001, + 0xC78, 0x78182001, + 0xC78, 0x77192001, + 0xC78, 0x761A2001, + 0xC78, 0x751B2001, + 0xC78, 0x741C2001, + 0xC78, 0x731D2001, + 0xC78, 0x721E2001, + 0xC78, 0x711F2001, + 0xC78, 0x70202001, + 0xC78, 0x6F212001, + 0xC78, 0x6E222001, + 0xC78, 0x6D232001, + 0xC78, 0x6C242001, + 0xC78, 0x6B252001, + 0xC78, 0x6A262001, + 0xC78, 0x69272001, + 0xC78, 0x68282001, + 0xC78, 0x67292001, + 0xC78, 0x662A2001, + 0xC78, 0x652B2001, + 0xC78, 0x642C2001, + 0xC78, 0x632D2001, + 0xC78, 0x622E2001, + 0xC78, 0x612F2001, + 0xC78, 0x60302001, + 0xC78, 0x42312001, + 0xC78, 0x41322001, + 0xC78, 0x40332001, + 0xC78, 0x23342001, + 0xC78, 0x22352001, + 0xC78, 0x21362001, + 0xC78, 0x20372001, + 0xC78, 0x00382001, + 0xC78, 0x02392001, + 0xC78, 0x013A2001, + 0xC78, 0x003B2001, + 0xC78, 0x003C2001, + 0xC78, 0x003D2001, + 0xC78, 0x003E2001, + 0xC78, 0x003F2001, + 0xC78, 0x7F003101, + 0xC78, 0x7F013101, + 0xC78, 0x7F023101, + 0xC78, 0x7F033101, + 0xC78, 0x7F043101, + 0xC78, 0x7F053101, + 0xC78, 0x7F063101, + 0xC78, 0x7E073101, + 0xC78, 0x7D083101, + 0xC78, 0x7C093101, + 0xC78, 0x7B0A3101, + 0xC78, 0x7A0B3101, + 0xC78, 0x790C3101, + 0xC78, 0x780D3101, + 0xC78, 0x770E3101, + 0xC78, 0x760F3101, + 0xC78, 0x75103101, + 0xC78, 0x74113101, + 0xC78, 0x73123101, + 0xC78, 0x72133101, + 0xC78, 0x71143101, + 0xC78, 0x70153101, + 0xC78, 0x6F163101, + 0xC78, 0x69173101, + 0xC78, 0x68183101, + 0xC78, 0x67193101, + 0xC78, 0x661A3101, + 0xC78, 0x651B3101, + 0xC78, 0x641C3101, + 0xC78, 0x631D3101, + 0xC78, 0x621E3101, + 0xC78, 0x611F3101, + 0xC78, 0x60203101, + 0xC78, 0x42213101, + 0xC78, 0x41223101, + 0xC78, 0x40233101, + 0xC78, 0x22243101, + 0xC78, 0x21253101, + 0xC78, 0x20263101, + 0xC78, 0x00273101, + 0xC78, 0x00283101, + 0xC78, 0x00293101, + 0xC78, 0x002A3101, + 0xC78, 0x002B3101, + 0xC78, 0x002C3101, + 0xC78, 0x002D3101, + 0xC78, 0x002E3101, + 0xC78, 0x002F3101, + 0xC78, 0x00303101, + 0xC78, 0x00313101, + 0xC78, 0x00323101, + 0xC78, 0x00333101, + 0xC78, 0x00343101, + 0xC78, 0x00353101, + 0xC78, 0x00363101, + 0xC78, 0x00373101, + 0xC78, 0x00383101, + 0xC78, 0x00393101, + 0xC78, 0x003A3101, + 0xC78, 0x003B3101, + 0xC78, 0x003C3101, + 0xC78, 0x003D3101, + 0xC78, 0x003E3101, + 0xC78, 0x003F3101, + 0xC78, 0xFA403101, + 0xC78, 0xF9413101, + 0xC78, 0xF8423101, + 0xC78, 0xF7433101, + 0xC78, 0xF6443101, + 0xC78, 0xF5453101, + 0xC78, 0xF4463101, + 0xC78, 0xF3473101, + 0xC78, 0xF2483101, + 0xC78, 0xE1493101, + 0xC78, 0xE04A3101, + 0xC78, 0xEF4B3101, + 0xC78, 0xEE4C3101, + 0xC78, 0xED4D3101, + 0xC78, 0xEC4E3101, + 0xC78, 0xEB4F3101, + 0xC78, 0xEA503101, + 0xC78, 0xE9513101, + 0xC78, 0xE8523101, + 0xC78, 0xE7533101, + 0xC78, 0xE6543101, + 0xC78, 0xE5553101, + 0xC78, 0xE4563101, + 0xC78, 0xE3573101, + 0xC78, 0xE2583101, + 0xC78, 0xE1593101, + 0xC78, 0xE05A3101, + 0xC78, 0xC25B3101, + 0xC78, 0xC15C3101, + 0xC78, 0xC05D3101, + 0xC78, 0x825E3101, + 0xC78, 0x815F3101, + 0xC78, 0x80603101, + 0xC78, 0x80613101, + 0xC78, 0x80623101, + 0xC78, 0x80633101, + 0xC78, 0x80643101, + 0xC78, 0x80653101, + 0xC78, 0x80663101, + 0xC78, 0x80673101, + 0xC78, 0x80683101, + 0xC78, 0x80693101, + 0xC78, 0x806A3101, + 0xC78, 0x806B3101, + 0xC78, 0x806C3101, + 0xC78, 0x806D3101, + 0xC78, 0x806E3101, + 0xC78, 0x806F3101, + 0xC78, 0x80703101, + 0xC78, 0x80713101, + 0xC78, 0x80723101, + 0xC78, 0x80733101, + 0xC78, 0x80743101, + 0xC78, 0x80753101, + 0xC78, 0x80763101, + 0xC78, 0x80773101, + 0xC78, 0x80783101, + 0xC78, 0x80793101, + 0xC78, 0x807A3101, + 0xC78, 0x807B3101, + 0xC78, 0x807C3101, + 0xC78, 0x807D3101, + 0xC78, 0x807E3101, + 0xC78, 0x807F3101, + 0xC78, 0xFF402001, + 0xC78, 0xFF412001, + 0xC78, 0xFF422001, + 0xC78, 0xFF432001, + 0xC78, 0xFF442001, + 0xC78, 0xFF452001, + 0xC78, 0xFF462001, + 0xC78, 0xFF472001, + 0xC78, 0xFF482001, + 0xC78, 0xFF492001, + 0xC78, 0xFF4A2001, + 0xC78, 0xFF4B2001, + 0xC78, 0xFF4C2001, + 0xC78, 0xFE4D2001, + 0xC78, 0xFD4E2001, + 0xC78, 0xFC4F2001, + 0xC78, 0xFB502001, + 0xC78, 0xFA512001, + 0xC78, 0xF9522001, + 0xC78, 0xF8532001, + 0xC78, 0xF7542001, + 0xC78, 0xF6552001, + 0xC78, 0xF5562001, + 0xC78, 0xF4572001, + 0xC78, 0xF3582001, + 0xC78, 0xF2592001, + 0xC78, 0xF15A2001, + 0xC78, 0xF05B2001, + 0xC78, 0xEF5C2001, + 0xC78, 0xEE5D2001, + 0xC78, 0xED5E2001, + 0xC78, 0xEC5F2001, + 0xC78, 0xEB602001, + 0xC78, 0xEA612001, + 0xC78, 0xE9622001, + 0xC78, 0xE8632001, + 0xC78, 0xE7642001, + 0xC78, 0xE6652001, + 0xC78, 0xE5662001, + 0xC78, 0xE4672001, + 0xC78, 0xE3682001, + 0xC78, 0xC5692001, + 0xC78, 0xC46A2001, + 0xC78, 0xC36B2001, + 0xC78, 0xA46C2001, + 0xC78, 0x846D2001, + 0xC78, 0x836E2001, + 0xC78, 0x826F2001, + 0xC78, 0x81702001, + 0xC78, 0x80712001, + 0xC78, 0x80722001, + 0xC78, 0x80732001, + 0xC78, 0x80742001, + 0xC78, 0x80752001, + 0xC78, 0x80762001, + 0xC78, 0x80772001, + 0xC78, 0x80782001, + 0xC78, 0x80792001, + 0xC78, 0x807A2001, + 0xC78, 0x807B2001, + 0xC78, 0x807C2001, + 0xC78, 0x807D2001, + 0xC78, 0x807E2001, + 0xC78, 0x807F2001, + 0xC50, 0x69553422, + 0xC50, 0x69553420, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8703b_agc, rtw_phy_cfg_agc); + +/* init values for BB registers */ +static const u32 rtw8703b_bb[] = { + 0x800, 0x83045700, + 0x804, 0x00000003, + 0x808, 0x0000FC00, + 0x80C, 0x0000000A, + 0x810, 0x10001331, + 0x814, 0x020C3D10, + 0x818, 0x02200385, + 0x81C, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390204, + 0x828, 0x00000000, + 0x82C, 0x00000000, + 0x830, 0x00000000, + 0x834, 0x00000000, + 0x838, 0x00000000, + 0x83C, 0x00000000, + 0x840, 0x00010000, + 0x844, 0x00000000, + 0x848, 0x00000000, + 0x84C, 0x00000000, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569A11A9, + 0x85C, 0x01000014, + 0x860, 0x66F60110, + 0x864, 0x061F0649, + 0x868, 0x00000000, + 0x86C, 0x27272700, + 0x870, 0x07000760, + 0x874, 0x25004000, + 0x878, 0x00000808, + 0x87C, 0x004F0201, + 0x880, 0xB0000B1E, + 0x884, 0x00000001, + 0x888, 0x00000000, + 0x88C, 0xCCC000C0, + 0x890, 0x00000800, + 0x894, 0xFFFFFFFE, + 0x898, 0x40302010, + 0x89C, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90C, 0x81121111, + 0x910, 0x00000002, + 0x914, 0x00000201, + 0x948, 0x99000000, + 0x94C, 0x00000010, + 0x950, 0x20003800, + 0x954, 0x4A880000, + 0x958, 0x4BC5D87A, + 0x95C, 0x04EB9B79, + 0xA00, 0x00D047C8, + 0xA04, 0x80FF800C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F120F, + 0xA10, 0x9500BB78, + 0xA14, 0x1114D028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0xD1D80000, + 0xA24, 0x5A7DA0BD, + 0xA28, 0x0000223B, + 0xA2C, 0x00D30000, + 0xA70, 0x101FBF00, + 0xA74, 0x00000007, + 0xA78, 0x00008900, + 0xA7C, 0x225B0606, + 0xA80, 0x2180FA74, + 0xA84, 0x00120000, + 0xA88, 0x040C0000, + 0xA8C, 0x12345678, + 0xA90, 0xABCDEF00, + 0xA94, 0x001B1B89, + 0xA98, 0x05100000, + 0xA9C, 0x3F000000, + 0xAA0, 0x00000000, + 0xB2C, 0x00000000, + 0xC00, 0x48071D40, + 0xC04, 0x03A05611, + 0xC08, 0x000000E4, + 0xC0C, 0x6C6C6C6C, + 0xC10, 0x18800000, + 0xC14, 0x40000100, + 0xC18, 0x08800000, + 0xC1C, 0x40000100, + 0xC20, 0x00000000, + 0xC24, 0x00000000, + 0xC28, 0x00000000, + 0xC2C, 0x00000000, + 0xC30, 0x69E9AC4B, + 0xC34, 0x31000040, + 0xC38, 0x21688080, + 0xC3C, 0x000016CC, + 0xC40, 0x1F78403F, + 0xC44, 0x00010036, + 0xC48, 0xEC020107, + 0xC4C, 0x007F037F, + 0xC50, 0x69553420, + 0xC54, 0x43BC0094, + 0xC58, 0x00015967, + 0xC5C, 0x18250492, + 0xC60, 0x00000000, + 0xC64, 0x7112848B, + 0xC68, 0x47C07BFF, + 0xC6C, 0x00000036, + 0xC70, 0x2C7F000D, + 0xC74, 0x020600DB, + 0xC78, 0x0000001F, + 0xC7C, 0x00B91612, + 0xC80, 0x390000E4, + 0xC84, 0x19F60000, + 0xC88, 0x40000100, + 0xC8C, 0x20200000, + 0xC90, 0x00091521, + 0xC94, 0x00000000, + 0xC98, 0x00121820, + 0xC9C, 0x00007F7F, + 0xCA0, 0x00000000, + 0xCA4, 0x000300A0, + 0xCA8, 0x00000000, + 0xCAC, 0x00000000, + 0xCB0, 0x00000000, + 0xCB4, 0x00000000, + 0xCB8, 0x00000000, + 0xCBC, 0x28000000, + 0xCC0, 0x00000000, + 0xCC4, 0x00000000, + 0xCC8, 0x00000000, + 0xCCC, 0x00000000, + 0xCD0, 0x00000000, + 0xCD4, 0x00000000, + 0xCD8, 0x64B22427, + 0xCDC, 0x00766932, + 0xCE0, 0x00222222, + 0xCE4, 0x10000000, + 0xCE8, 0x37644302, + 0xCEC, 0x2F97D40C, + 0xD00, 0x00030740, + 0xD04, 0x40020401, + 0xD08, 0x0000907F, + 0xD0C, 0x20010201, + 0xD10, 0xA0633333, + 0xD14, 0x3333BC53, + 0xD18, 0x7A8F5B6F, + 0xD2C, 0xCB979975, + 0xD30, 0x00000000, + 0xD34, 0x80608000, + 0xD38, 0x98000000, + 0xD3C, 0x40127353, + 0xD40, 0x00000000, + 0xD44, 0x00000000, + 0xD48, 0x00000000, + 0xD4C, 0x00000000, + 0xD50, 0x6437140A, + 0xD54, 0x00000000, + 0xD58, 0x00000282, + 0xD5C, 0x30032064, + 0xD60, 0x4653DE68, + 0xD64, 0x04518A3C, + 0xD68, 0x00002101, + 0xE00, 0x2D2D2D2D, + 0xE04, 0x2D2D2D2D, + 0xE08, 0x0390272D, + 0xE10, 0x2D2D2D2D, + 0xE14, 0x2D2D2D2D, + 0xE18, 0x2D2D2D2D, + 0xE1C, 0x2D2D2D2D, + 0xE28, 0x00000000, + 0xE30, 0x1000DC1F, + 0xE34, 0x10008C1F, + 0xE38, 0x02140102, + 0xE3C, 0x681604C2, + 0xE40, 0x01007C00, + 0xE44, 0x01004800, + 0xE48, 0xFB000000, + 0xE4C, 0x000028D1, + 0xE50, 0x1000DC1F, + 0xE54, 0x10008C1F, + 0xE58, 0x02140102, + 0xE5C, 0x28160D05, + 0xE60, 0x00000048, + 0xE68, 0x001B25A4, + 0xE6C, 0x01C00014, + 0xE70, 0x01C00014, + 0xE74, 0x02000014, + 0xE78, 0x02000014, + 0xE7C, 0x02000014, + 0xE80, 0x02000014, + 0xE84, 0x01C00014, + 0xE88, 0x02000014, + 0xE8C, 0x01C00014, + 0xED0, 0x01C00014, + 0xED4, 0x01C00014, + 0xED8, 0x01C00014, + 0xEDC, 0x00000014, + 0xEE0, 0x00000014, + 0xEE8, 0x21555448, + 0xEEC, 0x03C00014, + 0xF14, 0x00000003, + 0xF4C, 0x00000000, + 0xF00, 0x00000300, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8703b_bb, rtw_phy_cfg_bb); + +static const u32 rtw8703b_rf_a[] = { + 0x018, 0x00008C01, + 0x0B5, 0x0008C050, + 0x0B1, 0x00054258, + 0x0B2, 0x00054C00, + 0x030, 0x00018000, + 0x031, 0x00000027, + 0x032, 0x000A7F07, + 0x030, 0x00020000, + 0x031, 0x00000027, + 0x032, 0x000E7D87, + 0x01C, 0x000F8635, + 0x0EF, 0x00080000, + 0x030, 0x00008000, + 0x031, 0x00000004, + 0x032, 0x00006105, + 0x0EF, 0x00000000, + 0x0EF, 0x00000400, + 0x041, 0x0000BD54, + 0x041, 0x00003DD4, + 0x041, 0x0000FDD4, + 0x0EF, 0x00000000, + 0x0DF, 0x00000600, + 0x050, 0x0000C6DB, + 0x051, 0x00004505, + 0x052, 0x0000E31D, + 0x053, 0x00040579, + 0x054, 0x00000000, + 0x055, 0x0008206E, + 0x056, 0x00040000, + 0x0EF, 0x00000100, + 0x034, 0x0000ADD7, + 0x034, 0x00009DD4, + 0x034, 0x00008DD1, + 0x034, 0x00007DCE, + 0x034, 0x00006DCB, + 0x034, 0x00005CCE, + 0x034, 0x000048CD, + 0x034, 0x000034CC, + 0x034, 0x0000244F, + 0x034, 0x0000144C, + 0x034, 0x0000004E, + 0x0EF, 0x00000000, + 0x0EF, 0x00002000, + 0x03B, 0x0003801F, + 0x03B, 0x00030002, + 0x03B, 0x00028001, + 0x03B, 0x00020000, + 0x03B, 0x00018003, + 0x03B, 0x00010002, + 0x03B, 0x00008001, + 0x03B, 0x00000000, + 0x0EF, 0x00000000, + 0x082, 0x000C0000, + 0x083, 0x000AF025, + 0x01E, 0x00000C08, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8703b_rf_a, A); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h new file mode 100644 index 000000000000..98bd399bddbf --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright Fiona Klute <[email protected]> */ + +#ifndef __RTW8703B_TABLES_H__ +#define __RTW8703B_TABLES_H__ + +extern const struct rtw_table rtw8703b_bb_pg_tbl; +extern const struct rtw_table rtw8703b_txpwr_lmt_tbl; +extern const struct rtw_table rtw8703b_mac_tbl; +extern const struct rtw_table rtw8703b_agc_tbl; +extern const struct rtw_table rtw8703b_bb_tbl; +extern const struct rtw_table rtw8703b_rf_a_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c new file mode 100644 index 000000000000..8d38d36be8c0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright Fiona Klute <[email protected]> */ + +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/module.h> +#include "main.h" +#include "rtw8703b.h" +#include "sdio.h" + +static const struct sdio_device_id rtw_8723cs_id_table[] = { + { + SDIO_DEVICE(SDIO_VENDOR_ID_REALTEK, + SDIO_DEVICE_ID_REALTEK_RTW8723CS), + .driver_data = (kernel_ulong_t)&rtw8703b_hw_spec, + }, + {} +}; +MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table); + +static struct sdio_driver rtw_8723cs_driver = { + .name = "rtw8723cs", + .id_table = rtw_8723cs_id_table, + .probe = rtw_sdio_probe, + .remove = rtw_sdio_remove, + .drv = { + .pm = &rtw_sdio_pm_ops, + .shutdown = rtw_sdio_shutdown + }}; +module_sdio_driver(rtw_8723cs_driver); + +MODULE_AUTHOR("Fiona Klute <[email protected]>"); +MODULE_DESCRIPTION("Realtek 802.11n wireless 8723cs driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index c575476a0020..f8df4c84d39f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -9,36 +9,13 @@ #include "tx.h" #include "rx.h" #include "phy.h" +#include "rtw8723x.h" #include "rtw8723d.h" #include "rtw8723d_table.h" #include "mac.h" #include "reg.h" #include "debug.h" -static const struct rtw_hw_reg rtw8723d_txagc[] = { - [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 }, - [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 }, - [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 }, - [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 }, - [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff }, - [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 }, - [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 }, - [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 }, - [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff }, - [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 }, - [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 }, - [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 }, - [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff }, - [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 }, - [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 }, - [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 }, - [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff }, - [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 }, - [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 }, - [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 }, -}; - -#define WLAN_TXQ_RPT_EN 0x1F #define WLAN_SLOT_TIME 0x09 #define WLAN_RL_VAL 0x3030 #define WLAN_BAR_VAL 0x0201ffff @@ -65,34 +42,6 @@ static const struct rtw_hw_reg rtw8723d_txagc[] = { #define WLAN_LTR_CTRL1 0xCB004010 #define WLAN_LTR_CTRL2 0x01233425 -static void rtw8723d_lck(struct rtw_dev *rtwdev) -{ - u32 lc_cal; - u8 val_ctx, rf_val; - int ret; - - val_ctx = rtw_read8(rtwdev, REG_CTX); - if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) - rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE); - else - rtw_write8(rtwdev, REG_TXPAUSE, 0xFF); - lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK); - - rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK); - - ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1, - 10000, 1000000, false, - rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK); - if (ret) - rtw_warn(rtwdev, "failed to poll LCK status bit\n"); - - rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal); - if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) - rtw_write8(rtwdev, REG_CTX, val_ctx); - else - rtw_write8(rtwdev, REG_TXPAUSE, 0x00); -} - static const u32 rtw8723d_ofdm_swing_table[] = { 0x0b40002d, 0x0c000030, 0x0cc00033, 0x0d800036, 0x0e400039, 0x0f00003c, 0x10000040, 0x11000044, 0x12000048, 0x1300004c, 0x14400051, 0x15800056, @@ -196,7 +145,7 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); - rtw8723d_lck(rtwdev); + rtw8723x_lck(rtwdev); rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20); @@ -204,67 +153,6 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) rtw8723d_pwrtrack_init(rtwdev); } -static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse, - struct rtw8723d_efuse *map) -{ - ether_addr_copy(efuse->addr, map->e.mac_addr); -} - -static void rtw8723du_efuse_parsing(struct rtw_efuse *efuse, - struct rtw8723d_efuse *map) -{ - ether_addr_copy(efuse->addr, map->u.mac_addr); -} - -static void rtw8723ds_efuse_parsing(struct rtw_efuse *efuse, - struct rtw8723d_efuse *map) -{ - ether_addr_copy(efuse->addr, map->s.mac_addr); -} - -static int rtw8723d_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) -{ - struct rtw_efuse *efuse = &rtwdev->efuse; - struct rtw8723d_efuse *map; - int i; - - map = (struct rtw8723d_efuse *)log_map; - - efuse->rfe_option = 0; - efuse->rf_board_option = map->rf_board_option; - efuse->crystal_cap = map->xtal_k; - efuse->pa_type_2g = map->pa_type; - efuse->lna_type_2g = map->lna_type_2g[0]; - efuse->channel_plan = map->channel_plan; - efuse->country_code[0] = map->country_code[0]; - efuse->country_code[1] = map->country_code[1]; - efuse->bt_setting = map->rf_bt_setting; - efuse->regd = map->rf_board_option & 0x7; - efuse->thermal_meter[0] = map->thermal_meter; - efuse->thermal_meter_k = map->thermal_meter; - efuse->afe = map->afe; - - for (i = 0; i < 4; i++) - efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; - - switch (rtw_hci_type(rtwdev)) { - case RTW_HCI_TYPE_PCIE: - rtw8723de_efuse_parsing(efuse, map); - break; - case RTW_HCI_TYPE_USB: - rtw8723du_efuse_parsing(efuse, map); - break; - case RTW_HCI_TYPE_SDIO: - rtw8723ds_efuse_parsing(efuse, map); - break; - default: - /* unsupported now */ - return -ENOTSUPP; - } - - return 0; -} - static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, struct rtw_rx_pkt_stat *pkt_stat) { @@ -540,297 +428,11 @@ static void rtw8723d_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw8723d_set_channel_bb(rtwdev, channel, bw, primary_chan_idx); } -#define BIT_CFENDFORM BIT(9) -#define BIT_WMAC_TCR_ERR0 BIT(12) -#define BIT_WMAC_TCR_ERR1 BIT(13) -#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \ - BIT_WMAC_TCR_ERR1) -#define WLAN_RX_FILTER0 0xFFFF -#define WLAN_RX_FILTER1 0x400 -#define WLAN_RX_FILTER2 0xFFFF -#define WLAN_RCR_CFG 0x700060CE - -static int rtw8723d_mac_init(struct rtw_dev *rtwdev) -{ - rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN); - rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG); - - rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0); - rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1); - rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2); - rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG); - - rtw_write32(rtwdev, REG_INT_MIG, 0); - rtw_write32(rtwdev, REG_MCUTST_1, 0x0); - - rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA); - rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0); - - return 0; -} - static void rtw8723d_shutdown(struct rtw_dev *rtwdev) { rtw_write16_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); } -static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) -{ - u8 ldo_pwr; - - ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3); - if (enable) { - ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE; - ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN; - } else { - ldo_pwr &= ~BIT_LDO25_EN; - } - rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr); -} - -static void -rtw8723d_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) -{ - struct rtw_hal *hal = &rtwdev->hal; - const struct rtw_hw_reg *txagc; - u8 rate, pwr_index; - int j; - - for (j = 0; j < rtw_rate_size[rs]; j++) { - rate = rtw_rate_section[rs][j]; - pwr_index = hal->tx_pwr_tbl[path][rate]; - - if (rate >= ARRAY_SIZE(rtw8723d_txagc)) { - rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate); - continue; - } - txagc = &rtw8723d_txagc[rate]; - if (!txagc->addr) { - rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate); - continue; - } - - rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index); - } -} - -static void rtw8723d_set_tx_power_index(struct rtw_dev *rtwdev) -{ - struct rtw_hal *hal = &rtwdev->hal; - int rs, path; - - for (path = 0; path < hal->rf_path_num; path++) { - for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++) - rtw8723d_set_tx_power_index_by_rate(rtwdev, path, rs); - } -} - -static void rtw8723d_efuse_grant(struct rtw_dev *rtwdev, bool on) -{ - if (on) { - rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); - - rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR); - rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M); - } else { - rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF); - } -} - -static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev) -{ - struct rtw_dm_info *dm_info = &rtwdev->dm_info; - u32 cck_fa_cnt; - u32 ofdm_fa_cnt; - u32 crc32_cnt; - u32 val32; - - /* hold counter */ - rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1); - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1); - - cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0); - cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8; - - val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N); - ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT); - ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT); - val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N); - dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT); - ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT); - val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N); - ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT); - ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT); - val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N); - ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT); - - dm_info->cck_fa_cnt = cck_fa_cnt; - dm_info->ofdm_fa_cnt = ofdm_fa_cnt; - dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt; - - dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N); - dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N); - crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N); - dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR); - dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK); - crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N); - dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR); - dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK); - dm_info->vht_err_cnt = 0; - dm_info->vht_ok_cnt = 0; - - val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N); - dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) | - u32_get_bits(val32, BIT_MASK_CCK_FA_LSB); - dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt; - - /* reset counter */ - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1); - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0); - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1); - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0); - rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0); - rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0); - rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2); - rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1); - rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0); -} - -static const u32 iqk_adda_regs[] = { - 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, - 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec -}; - -static const u32 iqk_mac8_regs[] = {0x522, 0x550, 0x551}; -static const u32 iqk_mac32_regs[] = {0x40}; - -static const u32 iqk_bb_regs[] = { - 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04 -}; - -#define IQK_ADDA_REG_NUM ARRAY_SIZE(iqk_adda_regs) -#define IQK_MAC8_REG_NUM ARRAY_SIZE(iqk_mac8_regs) -#define IQK_MAC32_REG_NUM ARRAY_SIZE(iqk_mac32_regs) -#define IQK_BB_REG_NUM ARRAY_SIZE(iqk_bb_regs) - -struct iqk_backup_regs { - u32 adda[IQK_ADDA_REG_NUM]; - u8 mac8[IQK_MAC8_REG_NUM]; - u32 mac32[IQK_MAC32_REG_NUM]; - u32 bb[IQK_BB_REG_NUM]; - - u32 lte_path; - u32 lte_gnt; - - u32 bb_sel_btg; - u8 btg_sel; - - u8 igia; - u8 igib; -}; - -static void rtw8723d_iqk_backup_regs(struct rtw_dev *rtwdev, - struct iqk_backup_regs *backup) -{ - int i; - - for (i = 0; i < IQK_ADDA_REG_NUM; i++) - backup->adda[i] = rtw_read32(rtwdev, iqk_adda_regs[i]); - - for (i = 0; i < IQK_MAC8_REG_NUM; i++) - backup->mac8[i] = rtw_read8(rtwdev, iqk_mac8_regs[i]); - for (i = 0; i < IQK_MAC32_REG_NUM; i++) - backup->mac32[i] = rtw_read32(rtwdev, iqk_mac32_regs[i]); - - for (i = 0; i < IQK_BB_REG_NUM; i++) - backup->bb[i] = rtw_read32(rtwdev, iqk_bb_regs[i]); - - backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0); - backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0); - - backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG); -} - -static void rtw8723d_iqk_restore_regs(struct rtw_dev *rtwdev, - const struct iqk_backup_regs *backup) -{ - int i; - - for (i = 0; i < IQK_ADDA_REG_NUM; i++) - rtw_write32(rtwdev, iqk_adda_regs[i], backup->adda[i]); - - for (i = 0; i < IQK_MAC8_REG_NUM; i++) - rtw_write8(rtwdev, iqk_mac8_regs[i], backup->mac8[i]); - for (i = 0; i < IQK_MAC32_REG_NUM; i++) - rtw_write32(rtwdev, iqk_mac32_regs[i], backup->mac32[i]); - - for (i = 0; i < IQK_BB_REG_NUM; i++) - rtw_write32(rtwdev, iqk_bb_regs[i], backup->bb[i]); - - rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); - rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia); - - rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50); - rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib); - - rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00); - rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00); -} - -static void rtw8723d_iqk_backup_path_ctrl(struct rtw_dev *rtwdev, - struct iqk_backup_regs *backup) -{ - backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL); - rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n", - backup->btg_sel); -} - -static void rtw8723d_iqk_config_path_ctrl(struct rtw_dev *rtwdev) -{ - rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1); - rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n", - rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); -} - -static void rtw8723d_iqk_restore_path_ctrl(struct rtw_dev *rtwdev, - const struct iqk_backup_regs *backup) -{ - rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel); - rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n", - rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); -} - -static void rtw8723d_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev, - struct iqk_backup_regs *backup) -{ - backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL); - rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038); - mdelay(1); - backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA); - rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n", - backup->lte_gnt); -} - -static void rtw8723d_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev) -{ - rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, 0x0000ff00); - rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038); - rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, BIT_LTE_MUX_CTRL_PATH, 0x1); -} - -static void rtw8723d_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev, - const struct iqk_backup_regs *bak) -{ - rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt); - rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038); - rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path); -} - struct rtw_8723d_iqk_cfg { const char *name; u32 val_bb_sel_btg; @@ -930,6 +532,8 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev, return 0; } +#define IQK_LTE_WRITE_VAL_8723D 0x0000ff00 + static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx, const struct rtw_8723d_iqk_cfg *iqk_cfg) { @@ -937,7 +541,7 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx, /* enter IQK mode */ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); - rtw8723d_iqk_config_lte_path_gnt(rtwdev); + rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D); rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0054); mdelay(1); @@ -959,9 +563,9 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx, static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev, const struct rtw_8723d_iqk_cfg *iqk_cfg, - const struct iqk_backup_regs *backup) + const struct rtw8723x_iqk_backup_regs *backup) { - rtw8723d_iqk_restore_lte_path_gnt(rtwdev, backup); + rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup); rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg); /* leave IQK mode */ @@ -974,7 +578,7 @@ static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev, static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev, const struct rtw_8723d_iqk_cfg *iqk_cfg, - const struct iqk_backup_regs *backup) + const struct rtw8723x_iqk_backup_regs *backup) { u8 status; @@ -1033,7 +637,7 @@ static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev, static u8 rtw8723d_iqk_rx_path(struct rtw_dev *rtwdev, const struct rtw_8723d_iqk_cfg *iqk_cfg, - const struct iqk_backup_regs *backup) + const struct rtw8723x_iqk_backup_regs *backup) { u32 tx_x, tx_y; u8 status; @@ -1220,14 +824,6 @@ void rtw8723d_iqk_fill_s0_matrix(struct rtw_dev *rtwdev, const s32 result[]) result[IQK_S0_RX_Y]); } -static void rtw8723d_iqk_path_adda_on(struct rtw_dev *rtwdev) -{ - int i; - - for (i = 0; i < IQK_ADDA_REG_NUM; i++) - rtw_write32(rtwdev, iqk_adda_regs[i], 0x03c00016); -} - static void rtw8723d_iqk_config_mac(struct rtw_dev *rtwdev) { rtw_write8(rtwdev, REG_TXPAUSE, 0xff); @@ -1245,70 +841,14 @@ void rtw8723d_iqk_rf_standby(struct rtw_dev *rtwdev, enum rtw_rf_path path) rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); } -static -bool rtw8723d_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR], - u8 c1, u8 c2) -{ - u32 i, j, diff; - u32 bitmap = 0; - u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID}; - bool ret = true; - - s32 tmp1, tmp2; - - for (i = 0; i < IQK_NR; i++) { - tmp1 = iqkxy_to_s32(result[c1][i]); - tmp2 = iqkxy_to_s32(result[c2][i]); - - diff = abs(tmp1 - tmp2); - - if (diff <= MAX_TOLERANCE) - continue; - - if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) { - if (result[c1][i] + result[c1][i + 1] == 0) - candidate[i / IQK_SX_NR] = c2; - else if (result[c2][i] + result[c2][i + 1] == 0) - candidate[i / IQK_SX_NR] = c1; - else - bitmap |= BIT(i); - } else { - bitmap |= BIT(i); - } - } - - if (bitmap != 0) - goto check_sim; - - for (i = 0; i < PATH_NR; i++) { - if (candidate[i] == IQK_ROUND_INVALID) - continue; - - for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++) - result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j]; - ret = false; - } - - return ret; - -check_sim: - for (i = 0; i < IQK_NR; i++) { - j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */ - if (bitmap & GENMASK(j + 1, j)) - continue; - - result[IQK_ROUND_HYBRID][i] = result[c1][i]; - } - - return false; -} +#define ADDA_ON_VAL_8723D 0x03c00016 static -void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path) +void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723x_path path) { if (path == PATH_S0) { rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_A); - rtw8723d_iqk_path_adda_on(rtwdev); + rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D); } rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK); @@ -1317,13 +857,13 @@ void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path) if (path == PATH_S1) { rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_B); - rtw8723d_iqk_path_adda_on(rtwdev); + rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D); } } static void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t, - const struct iqk_backup_regs *backup) + const struct rtw8723x_iqk_backup_regs *backup) { u32 i; u8 s1_ok, s0_ok; @@ -1331,7 +871,7 @@ void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t, rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t); - rtw8723d_iqk_path_adda_on(rtwdev); + rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D); rtw8723d_iqk_config_mac(rtwdev); rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf); rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05611); @@ -1427,7 +967,7 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; s32 result[IQK_ROUND_SIZE][IQK_NR]; - struct iqk_backup_regs backup; + struct rtw8723x_iqk_backup_regs backup; u8 i, j; u8 final_candidate = IQK_ROUND_INVALID; bool good; @@ -1436,23 +976,23 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev) memset(result, 0, sizeof(result)); - rtw8723d_iqk_backup_path_ctrl(rtwdev, &backup); - rtw8723d_iqk_backup_lte_path_gnt(rtwdev, &backup); - rtw8723d_iqk_backup_regs(rtwdev, &backup); + rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup); + rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup); + rtw8723x_iqk_backup_regs(rtwdev, &backup); for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) { - rtw8723d_iqk_config_path_ctrl(rtwdev); - rtw8723d_iqk_config_lte_path_gnt(rtwdev); + rtw8723x_iqk_config_path_ctrl(rtwdev); + rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D); rtw8723d_iqk_one_round(rtwdev, result, i, &backup); if (i > IQK_ROUND_0) - rtw8723d_iqk_restore_regs(rtwdev, &backup); - rtw8723d_iqk_restore_lte_path_gnt(rtwdev, &backup); - rtw8723d_iqk_restore_path_ctrl(rtwdev, &backup); + rtw8723x_iqk_restore_regs(rtwdev, &backup); + rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup); + rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup); for (j = IQK_ROUND_0; j < i; j++) { - good = rtw8723d_iqk_similarity_cmp(rtwdev, result, j, i); + good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i); if (good) { final_candidate = j; @@ -1546,26 +1086,6 @@ static void rtw8723d_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) } /* for coex */ -static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev) -{ - /* enable TBTT nterrupt */ - rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); - - /* BT report packet sample rate */ - /* 0x790[5:0]=0x5 */ - rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); - - /* enable BT counter statistics */ - rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); - - /* enable PTA (3-wire function form BT side) */ - rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); - rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); - - /* enable PTA (tx/rx signal form WiFi side) */ - rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); -} - static void rtw8723d_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) { } @@ -1671,39 +1191,6 @@ static void rtw8723d_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) } } -static u8 rtw8723d_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev) -{ - struct rtw_dm_info *dm_info = &rtwdev->dm_info; - u8 tx_rate = dm_info->tx_rate; - u8 limit_ofdm = 30; - - switch (tx_rate) { - case DESC_RATE1M...DESC_RATE5_5M: - case DESC_RATE11M: - break; - case DESC_RATE6M...DESC_RATE48M: - limit_ofdm = 36; - break; - case DESC_RATE54M: - limit_ofdm = 34; - break; - case DESC_RATEMCS0...DESC_RATEMCS2: - limit_ofdm = 38; - break; - case DESC_RATEMCS3...DESC_RATEMCS4: - limit_ofdm = 36; - break; - case DESC_RATEMCS5...DESC_RATEMCS7: - limit_ofdm = 34; - break; - default: - rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate); - break; - } - - return limit_ofdm; -} - static void rtw8723d_set_iqk_matrix_by_result(struct rtw_dev *rtwdev, u32 ofdm_swing, u8 rf_path) { @@ -1845,7 +1332,7 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) s8 final_ofdm_swing_index; s8 final_cck_swing_index; - limit_ofdm = rtw8723d_pwrtrack_get_limit_ofdm(rtwdev); + limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev); final_ofdm_swing_index = RTW_DEF_OFDM_SWING_INDEX + dm_info->delta_power_index[path]; @@ -1873,26 +1360,6 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) rtw_phy_set_tx_power_level(rtwdev, hal->current_channel); } -static void rtw8723d_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path, - u8 delta) -{ - struct rtw_dm_info *dm_info = &rtwdev->dm_info; - const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; - const s8 *pwrtrk_xtal; - s8 xtal_cap; - - if (dm_info->thermal_avg[therm_path] > - rtwdev->efuse.thermal_meter[therm_path]) - pwrtrk_xtal = tbl->pwrtrk_xtal_p; - else - pwrtrk_xtal = tbl->pwrtrk_xtal_n; - - xtal_cap = rtwdev->efuse.crystal_cap & 0x3F; - xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F); - rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL, - xtal_cap | (xtal_cap << 6)); -} - static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; @@ -1912,7 +1379,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev) do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev); if (do_iqk) - rtw8723d_lck(rtwdev); + rtw8723x_lck(rtwdev); if (dm_info->pwr_trk_init_trigger) dm_info->pwr_trk_init_trigger = false; @@ -1937,7 +1404,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev) rtw8723d_pwrtrack_set(rtwdev, path); } - rtw8723d_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta); + rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta); iqk: if (do_iqk) @@ -1963,49 +1430,29 @@ static void rtw8723d_pwr_track(struct rtw_dev *rtwdev) dm_info->pwr_trk_triggered = false; } -static void rtw8723d_fill_txdesc_checksum(struct rtw_dev *rtwdev, - struct rtw_tx_pkt_info *pkt_info, - u8 *txdesc) -{ - size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */ - __le16 chksum = 0; - __le16 *data = (__le16 *)(txdesc); - struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc; - - le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM); - - while (words--) - chksum ^= *data++; - - chksum = ~chksum; - - le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum), - RTW_TX_DESC_W7_TXDESC_CHECKSUM); -} - static struct rtw_chip_ops rtw8723d_ops = { .phy_set_param = rtw8723d_phy_set_param, - .read_efuse = rtw8723d_read_efuse, + .read_efuse = rtw8723x_read_efuse, .query_rx_desc = rtw8723d_query_rx_desc, .set_channel = rtw8723d_set_channel, - .mac_init = rtw8723d_mac_init, + .mac_init = rtw8723x_mac_init, .shutdown = rtw8723d_shutdown, .read_rf = rtw_phy_read_rf_sipi, .write_rf = rtw_phy_write_rf_reg_sipi, - .set_tx_power_index = rtw8723d_set_tx_power_index, + .set_tx_power_index = rtw8723x_set_tx_power_index, .set_antenna = NULL, - .cfg_ldo25 = rtw8723d_cfg_ldo25, - .efuse_grant = rtw8723d_efuse_grant, - .false_alarm_statistics = rtw8723d_false_alarm_statistics, + .cfg_ldo25 = rtw8723x_cfg_ldo25, + .efuse_grant = rtw8723x_efuse_grant, + .false_alarm_statistics = rtw8723x_false_alarm_statistics, .phy_calibration = rtw8723d_phy_calibration, .cck_pd_set = rtw8723d_phy_cck_pd_set, .pwr_track = rtw8723d_pwr_track, .config_bfee = NULL, .set_gid_table = NULL, .cfg_csi_rate = NULL, - .fill_txdesc_checksum = rtw8723d_fill_txdesc_checksum, + .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum, - .coex_set_init = rtw8723d_coex_cfg_init, + .coex_set_init = rtw8723x_coex_cfg_init, .coex_set_ant_switch = NULL, .coex_set_gnt_fix = rtw8723d_coex_cfg_gnt_fix, .coex_set_gnt_debug = rtw8723d_coex_cfg_gnt_debug, @@ -2592,22 +2039,6 @@ static const struct rtw_rqpn rqpn_table_8723d[] = { RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, }; -static const struct rtw_prioq_addrs prioq_addrs_8723d = { - .prio[RTW_DMA_MAPPING_EXTRA] = { - .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3, - }, - .prio[RTW_DMA_MAPPING_LOW] = { - .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1, - }, - .prio[RTW_DMA_MAPPING_NORMAL] = { - .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1, - }, - .prio[RTW_DMA_MAPPING_HIGH] = { - .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2, - }, - .wsize = false, -}; - static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = { {0x0008, 0x4a22, RTW_IP_SEL_PHY, @@ -2628,28 +2059,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8723d = { .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d), }; -static const struct rtw_hw_reg rtw8723d_dig[] = { - [0] = { .addr = 0xc50, .mask = 0x7f }, - [1] = { .addr = 0xc50, .mask = 0x7f }, -}; - -static const struct rtw_hw_reg rtw8723d_dig_cck[] = { - [0] = { .addr = 0xa0c, .mask = 0x3f00 }, -}; - -static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = { - [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0, - .hssi_2 = 0x824, .lssi_read_pi = 0x8b8}, - [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4, - .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc}, -}; - -static const struct rtw_ltecoex_addr rtw8723d_ltecoex_addr = { - .ctrl = REG_LTECOEX_CTRL, - .wdata = REG_LTECOEX_WRITE_DATA, - .rdata = REG_LTECOEX_READ_DATA, -}; - static const struct rtw_rfe_def rtw8723d_rfe_defs[] = { [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl, .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,}, @@ -2770,14 +2179,14 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .pwr_off_seq = card_disable_flow_8723d, .page_table = page_table_8723d, .rqpn_table = rqpn_table_8723d, - .prioq_addrs = &prioq_addrs_8723d, + .prioq_addrs = &rtw8723x_common.prioq_addrs, .intf_table = &phy_para_table_8723d, - .dig = rtw8723d_dig, - .dig_cck = rtw8723d_dig_cck, + .dig = rtw8723x_common.dig, + .dig_cck = rtw8723x_common.dig_cck, .rf_sipi_addr = {0x840, 0x844}, - .rf_sipi_read_addr = rtw8723d_rf_sipi_addr, + .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr, .fix_rf_phy_num = 2, - .ltecoex_addr = &rtw8723d_ltecoex_addr, + .ltecoex_addr = &rtw8723x_common.ltecoex_addr, .mac_tbl = &rtw8723d_mac_tbl, .agc_tbl = &rtw8723d_agc_tbl, .bb_tbl = &rtw8723d_bb_tbl, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index 2434e2480cbe..fba06c9f480e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -5,90 +5,7 @@ #ifndef __RTW8723D_H__ #define __RTW8723D_H__ -enum rtw8723d_path { - PATH_S1, - PATH_S0, - PATH_NR, -}; - -enum rtw8723d_iqk_round { - IQK_ROUND_0, - IQK_ROUND_1, - IQK_ROUND_2, - IQK_ROUND_HYBRID, - IQK_ROUND_SIZE, - IQK_ROUND_INVALID = 0xff, -}; - -enum rtw8723d_iqk_result { - IQK_S1_TX_X, - IQK_S1_TX_Y, - IQK_S1_RX_X, - IQK_S1_RX_Y, - IQK_S0_TX_X, - IQK_S0_TX_Y, - IQK_S0_RX_X, - IQK_S0_RX_Y, - IQK_NR, - IQK_SX_NR = IQK_NR / PATH_NR, -}; - -struct rtw8723de_efuse { - u8 mac_addr[ETH_ALEN]; /* 0xd0 */ - u8 vender_id[2]; - u8 device_id[2]; - u8 sub_vender_id[2]; - u8 sub_device_id[2]; -}; - -struct rtw8723du_efuse { - u8 res4[48]; /* 0xd0 */ - u8 vender_id[2]; /* 0x100 */ - u8 product_id[2]; /* 0x102 */ - u8 usb_option; /* 0x104 */ - u8 res5[2]; /* 0x105 */ - u8 mac_addr[ETH_ALEN]; /* 0x107 */ -}; - -struct rtw8723ds_efuse { - u8 res4[0x4a]; /* 0xd0 */ - u8 mac_addr[ETH_ALEN]; /* 0x11a */ -}; - -struct rtw8723d_efuse { - __le16 rtl_id; - u8 rsvd[2]; - u8 afe; - u8 rsvd1[11]; - - /* power index for four RF paths */ - struct rtw_txpwr_idx txpwr_idx_table[4]; - - u8 channel_plan; /* 0xb8 */ - u8 xtal_k; - u8 thermal_meter; - u8 iqk_lck; - u8 pa_type; /* 0xbc */ - u8 lna_type_2g[2]; /* 0xbd */ - u8 lna_type_5g[2]; - u8 rf_board_option; - u8 rf_feature_option; - u8 rf_bt_setting; - u8 eeprom_version; - u8 eeprom_customer_id; - u8 tx_bb_swing_setting_2g; - u8 res_c7; - u8 tx_pwr_calibrate_rate; - u8 rf_antenna_option; /* 0xc9 */ - u8 rfe_option; - u8 country_code[2]; - u8 res[3]; - union { - struct rtw8723de_efuse e; - struct rtw8723du_efuse u; - struct rtw8723ds_efuse s; - }; -}; +#include "rtw8723x.h" extern const struct rtw_chip_info rtw8723d_hw_spec; @@ -114,193 +31,9 @@ extern const struct rtw_chip_info rtw8723d_hw_spec; #define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0)) -static inline s32 iqkxy_to_s32(s32 val) -{ - /* val is Q10.8 */ - return sign_extend32(val, 9); -} - -static inline s32 iqk_mult(s32 x, s32 y, s32 *ext) -{ - /* x, y and return value are Q10.8 */ - s32 t; - - t = x * y; - if (ext) - *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */ - - return (t >> 8); /* Q.16 --> Q.8 */ -} - -#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing) -#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing) -#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing) -#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing) #define RTW_DEF_OFDM_SWING_INDEX 28 #define RTW_DEF_CCK_SWING_INDEX 28 -#define MAX_TOLERANCE 5 -#define IQK_TX_X_ERR 0x142 -#define IQK_TX_Y_ERR 0x42 -#define IQK_RX_X_UPPER 0x11a -#define IQK_RX_X_LOWER 0xe6 -#define IQK_RX_Y_LMT 0x1a -#define IQK_TX_OK BIT(0) -#define IQK_RX_OK BIT(1) -#define PATH_IQK_RETRY 2 - -#define SPUR_THRES 0x16 #define CCK_DFIR_NR 3 -#define DIS_3WIRE 0xccf000c0 -#define EN_3WIRE 0xccc000c0 -#define START_PSD 0x400000 -#define FREQ_CH13 0xfccd -#define FREQ_CH14 0xff9a -#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0) -#define RFCFGCH_BW_MASK (BIT(11) | BIT(10)) -#define RFCFGCH_BW_20M (BIT(11) | BIT(10)) -#define RFCFGCH_BW_40M BIT(10) -#define BIT_MASK_RFMOD BIT(0) -#define BIT_LCK BIT(15) - -#define REG_GPIO_INTM 0x0048 -#define REG_BTG_SEL 0x0067 -#define BIT_MASK_BTG_WL BIT(7) -#define REG_LTECOEX_PATH_CONTROL 0x0070 -#define REG_LTECOEX_CTRL 0x07c0 -#define REG_LTECOEX_WRITE_DATA 0x07c4 -#define REG_LTECOEX_READ_DATA 0x07c8 -#define REG_PSDFN 0x0808 -#define REG_BB_PWR_SAV1_11N 0x0874 -#define REG_ANA_PARAM1 0x0880 -#define REG_ANALOG_P4 0x088c -#define REG_PSDRPT 0x08b4 -#define REG_FPGA1_RFMOD 0x0900 -#define REG_BB_SEL_BTG 0x0948 -#define REG_BBRX_DFIR 0x0954 -#define BIT_MASK_RXBB_DFIR GENMASK(27, 24) -#define BIT_RXBB_DFIR_EN BIT(19) -#define REG_CCK0_SYS 0x0a00 -#define BIT_CCK_SIDE_BAND BIT(4) -#define REG_CCK_ANT_SEL_11N 0x0a04 -#define REG_PWRTH 0x0a08 -#define REG_CCK_FA_RST_11N 0x0a2c -#define BIT_MASK_CCK_CNT_KEEP BIT(12) -#define BIT_MASK_CCK_CNT_EN BIT(13) -#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN) -#define BIT_MASK_CCK_FA_KEEP BIT(14) -#define BIT_MASK_CCK_FA_EN BIT(15) -#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN) -#define REG_CCK_FA_LSB_11N 0x0a5c -#define REG_CCK_FA_MSB_11N 0x0a58 -#define REG_CCK_CCA_CNT_11N 0x0a60 -#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0) -#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8) -#define REG_PWRTH2 0x0aa8 -#define REG_CSRATIO 0x0aaa -#define REG_OFDM_FA_HOLDC_11N 0x0c00 -#define BIT_MASK_OFDM_FA_KEEP BIT(31) -#define REG_BB_RX_PATH_11N 0x0c04 -#define REG_TRMUX_11N 0x0c08 -#define REG_OFDM_FA_RSTC_11N 0x0c0c -#define BIT_MASK_OFDM_FA_RST BIT(31) -#define REG_A_RXIQI 0x0c14 -#define BIT_MASK_RXIQ_S1_X 0x000003FF -#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00 -#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F) -#define REG_OFDM0_RXDSP 0x0c40 -#define BIT_MASK_RXDSP GENMASK(28, 24) -#define BIT_EN_RXDSP BIT(9) -#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c -#define BIT_MASK_OFDM0_EXT_A BIT(31) -#define BIT_MASK_OFDM0_EXT_C BIT(29) -#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28)) -#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28)) -#define REG_OFDM0_XAAGC1 0x0c50 -#define REG_OFDM0_XBAGC1 0x0c58 -#define REG_AGCRSSI 0x0c78 -#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80 -#define BIT_MASK_TXIQ_ELM_A 0x03ff -#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \ - ((a) & 0x03ff)) -#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16) -#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F) -#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22) -#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94 -#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6) -#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0 -#define BIT_MASK_RXIQ_S1_Y2 0xF0000000 -#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF) -#define REG_TXIQ_AB_S0 0x0cd0 -#define BIT_MASK_TXIQ_A_S0 0x000007FE -#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0) -#define BIT_MASK_TXIQ_B_S0 0x0007E000 -#define REG_TXIQ_CD_S0 0x0cd4 -#define BIT_MASK_TXIQ_C_S0 0x000007FE -#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0) -#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13) -#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12) -#define REG_RXIQ_AB_S0 0x0cd8 -#define BIT_MASK_RXIQ_X_S0 0x000003FF -#define BIT_MASK_RXIQ_Y_S0 0x003FF000 -#define REG_OFDM_FA_TYPE1_11N 0x0cf0 -#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0) -#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16) -#define REG_OFDM_FA_RSTD_11N 0x0d00 -#define BIT_MASK_OFDM_FA_RST1 BIT(27) -#define BIT_MASK_OFDM_FA_KEEP1 BIT(31) -#define REG_CTX 0x0d03 -#define BIT_MASK_CTX_TYPE GENMASK(6, 4) -#define REG_OFDM1_CFOTRK 0x0d2c -#define BIT_EN_CFOTRK BIT(28) -#define REG_OFDM1_CSI1 0x0d40 -#define REG_OFDM1_CSI2 0x0d44 -#define REG_OFDM1_CSI3 0x0d48 -#define REG_OFDM1_CSI4 0x0d4c -#define REG_OFDM_FA_TYPE2_11N 0x0da0 -#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0) -#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16) -#define REG_OFDM_FA_TYPE3_11N 0x0da4 -#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0) -#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16) -#define REG_OFDM_FA_TYPE4_11N 0x0da8 -#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0) -#define REG_FPGA0_IQK_11N 0x0e28 -#define BIT_MASK_IQK_MOD 0xffffff00 -#define EN_IQK 0x808000 -#define RST_IQK 0x000000 -#define REG_TXIQK_TONE_A_11N 0x0e30 -#define REG_RXIQK_TONE_A_11N 0x0e34 -#define REG_TXIQK_PI_A_11N 0x0e38 -#define REG_RXIQK_PI_A_11N 0x0e3c -#define REG_TXIQK_11N 0x0e40 -#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y)) -#define REG_RXIQK_11N 0x0e44 -#define REG_IQK_AGC_PTS_11N 0x0e48 -#define REG_IQK_AGC_RSP_11N 0x0e4c -#define REG_TX_IQK_TONE_B 0x0e50 -#define REG_RX_IQK_TONE_B 0x0e54 -#define REG_IQK_RES_TX 0x0e94 -#define BIT_MASK_RES_TX GENMASK(25, 16) -#define REG_IQK_RES_TY 0x0e9c -#define BIT_MASK_RES_TY GENMASK(25, 16) -#define REG_IQK_RES_RX 0x0ea4 -#define BIT_MASK_RES_RX GENMASK(25, 16) -#define REG_IQK_RES_RY 0x0eac -#define BIT_IQK_TX_FAIL BIT(28) -#define BIT_IQK_RX_FAIL BIT(27) -#define BIT_IQK_DONE BIT(26) -#define BIT_MASK_RES_RY GENMASK(25, 16) -#define REG_PAGE_F_RST_11N 0x0f14 -#define BIT_MASK_F_RST_ALL BIT(16) -#define REG_IGI_C_11N 0x0f84 -#define REG_IGI_D_11N 0x0f88 -#define REG_HT_CRC32_CNT_11N 0x0f90 -#define BIT_MASK_HT_CRC_OK GENMASK(15, 0) -#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16) -#define REG_OFDM_CRC32_CNT_11N 0x0f94 -#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0) -#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16) -#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8 #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c new file mode 100644 index 000000000000..0d0b6c2cb9aa --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright 2024 Fiona Klute + * + * Based on code originally in rtw8723d.[ch], + * Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "debug.h" +#include "phy.h" +#include "reg.h" +#include "tx.h" +#include "rtw8723x.h" + +static const struct rtw_hw_reg rtw8723x_txagc[] = { + [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 }, + [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 }, + [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 }, + [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 }, + [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff }, + [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 }, + [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 }, + [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 }, + [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff }, + [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 }, + [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 }, + [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 }, + [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff }, + [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 }, + [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 }, + [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 }, + [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff }, + [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 }, + [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 }, + [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 }, +}; + +static void __rtw8723x_lck(struct rtw_dev *rtwdev) +{ + u32 lc_cal; + u8 val_ctx, rf_val; + int ret; + + val_ctx = rtw_read8(rtwdev, REG_CTX); + if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) + rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE); + else + rtw_write8(rtwdev, REG_TXPAUSE, 0xFF); + lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK); + + ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1, + 10000, 1000000, false, + rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK); + if (ret) + rtw_warn(rtwdev, "failed to poll LCK status bit\n"); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal); + if ((val_ctx & BIT_MASK_CTX_TYPE) != 0) + rtw_write8(rtwdev, REG_CTX, val_ctx); + else + rtw_write8(rtwdev, REG_TXPAUSE, 0x00); +} + +#define DBG_EFUSE_VAL(rtwdev, map, name) \ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x\n", \ + (map)->name) +#define DBG_EFUSE_2BYTE(rtwdev, map, name) \ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \ + (map)->name[0], (map)->name[1]) + +static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev, + struct rtw8723x_efuse *map) +{ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->e.mac_addr); + DBG_EFUSE_2BYTE(rtwdev, map, e.vendor_id); + DBG_EFUSE_2BYTE(rtwdev, map, e.device_id); + DBG_EFUSE_2BYTE(rtwdev, map, e.sub_vendor_id); + DBG_EFUSE_2BYTE(rtwdev, map, e.sub_device_id); +} + +static void rtw8723xu_efuse_debug(struct rtw_dev *rtwdev, + struct rtw8723x_efuse *map) +{ + DBG_EFUSE_2BYTE(rtwdev, map, u.vendor_id); + DBG_EFUSE_2BYTE(rtwdev, map, u.product_id); + DBG_EFUSE_VAL(rtwdev, map, u.usb_option); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->u.mac_addr); +} + +static void rtw8723xs_efuse_debug(struct rtw_dev *rtwdev, + struct rtw8723x_efuse *map) +{ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->s.mac_addr); +} + +static void __rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev, + struct rtw_txpwr_idx *table, + int tx_path_count) +{ + if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE)) + return; + + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "Power index table (2.4G):\n"); + /* CCK base */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK base\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4 G5\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %3u %3u %3u %3u %3u %3u\n", + 'A' + i, + table[i].pwr_idx_2g.cck_base[0], + table[i].pwr_idx_2g.cck_base[1], + table[i].pwr_idx_2g.cck_base[2], + table[i].pwr_idx_2g.cck_base[3], + table[i].pwr_idx_2g.cck_base[4], + table[i].pwr_idx_2g.cck_base[5]); + /* CCK diff */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK diff\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %2d %2d %2d %2d\n", + 'A' + i, 0 /* no diff for 1S */, + table[i].pwr_idx_2g.ht_2s_diff.cck, + table[i].pwr_idx_2g.ht_3s_diff.cck, + table[i].pwr_idx_2g.ht_4s_diff.cck); + /* BW40-1S base */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40-1S base\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %3u %3u %3u %3u %3u\n", + 'A' + i, + table[i].pwr_idx_2g.bw40_base[0], + table[i].pwr_idx_2g.bw40_base[1], + table[i].pwr_idx_2g.bw40_base[2], + table[i].pwr_idx_2g.bw40_base[3], + table[i].pwr_idx_2g.bw40_base[4]); + /* OFDM diff */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "OFDM diff\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %2d %2d %2d %2d\n", + 'A' + i, + table[i].pwr_idx_2g.ht_1s_diff.ofdm, + table[i].pwr_idx_2g.ht_2s_diff.ofdm, + table[i].pwr_idx_2g.ht_3s_diff.ofdm, + table[i].pwr_idx_2g.ht_4s_diff.ofdm); + /* BW20 diff */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW20 diff\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %2d %2d %2d %2d\n", + 'A' + i, + table[i].pwr_idx_2g.ht_1s_diff.bw20, + table[i].pwr_idx_2g.ht_2s_diff.bw20, + table[i].pwr_idx_2g.ht_3s_diff.bw20, + table[i].pwr_idx_2g.ht_4s_diff.bw20); + /* BW40 diff */ + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40 diff\n"); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n"); + for (int i = 0; i < tx_path_count; i++) + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "[%c]: %2d %2d %2d %2d\n", + 'A' + i, 0 /* no diff for 1S */, + table[i].pwr_idx_2g.ht_2s_diff.bw40, + table[i].pwr_idx_2g.ht_3s_diff.bw40, + table[i].pwr_idx_2g.ht_4s_diff.bw40); +} + +static void efuse_debug_dump(struct rtw_dev *rtwdev, + struct rtw8723x_efuse *map) +{ + if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE)) + return; + + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "EFUSE raw logical map:\n"); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, + (u8 *)map, sizeof(struct rtw8723x_efuse), false); + rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Parsed rtw8723x EFUSE data:\n"); + DBG_EFUSE_VAL(rtwdev, map, rtl_id); + DBG_EFUSE_VAL(rtwdev, map, afe); + rtw8723x_debug_txpwr_limit(rtwdev, map->txpwr_idx_table, 4); + DBG_EFUSE_VAL(rtwdev, map, channel_plan); + DBG_EFUSE_VAL(rtwdev, map, xtal_k); + DBG_EFUSE_VAL(rtwdev, map, thermal_meter); + DBG_EFUSE_VAL(rtwdev, map, iqk_lck); + DBG_EFUSE_VAL(rtwdev, map, pa_type); + DBG_EFUSE_2BYTE(rtwdev, map, lna_type_2g); + DBG_EFUSE_2BYTE(rtwdev, map, lna_type_5g); + DBG_EFUSE_VAL(rtwdev, map, rf_board_option); + DBG_EFUSE_VAL(rtwdev, map, rf_feature_option); + DBG_EFUSE_VAL(rtwdev, map, rf_bt_setting); + DBG_EFUSE_VAL(rtwdev, map, eeprom_version); + DBG_EFUSE_VAL(rtwdev, map, eeprom_customer_id); + DBG_EFUSE_VAL(rtwdev, map, tx_bb_swing_setting_2g); + DBG_EFUSE_VAL(rtwdev, map, tx_pwr_calibrate_rate); + DBG_EFUSE_VAL(rtwdev, map, rf_antenna_option); + DBG_EFUSE_VAL(rtwdev, map, rfe_option); + DBG_EFUSE_2BYTE(rtwdev, map, country_code); + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rtw8723xe_efuse_debug(rtwdev, map); + break; + case RTW_HCI_TYPE_USB: + rtw8723xu_efuse_debug(rtwdev, map); + break; + case RTW_HCI_TYPE_SDIO: + rtw8723xs_efuse_debug(rtwdev, map); + break; + default: + /* unsupported now */ + break; + } +} + +static void rtw8723xe_efuse_parsing(struct rtw_efuse *efuse, + struct rtw8723x_efuse *map) +{ + ether_addr_copy(efuse->addr, map->e.mac_addr); +} + +static void rtw8723xu_efuse_parsing(struct rtw_efuse *efuse, + struct rtw8723x_efuse *map) +{ + ether_addr_copy(efuse->addr, map->u.mac_addr); +} + +static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse, + struct rtw8723x_efuse *map) +{ + ether_addr_copy(efuse->addr, map->s.mac_addr); +} + +static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw8723x_efuse *map; + int i; + + map = (struct rtw8723x_efuse *)log_map; + efuse_debug_dump(rtwdev, map); + + efuse->rfe_option = 0; + efuse->rf_board_option = map->rf_board_option; + efuse->crystal_cap = map->xtal_k; + efuse->pa_type_2g = map->pa_type; + efuse->lna_type_2g = map->lna_type_2g[0]; + efuse->channel_plan = map->channel_plan; + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + efuse->bt_setting = map->rf_bt_setting; + efuse->regd = map->rf_board_option & 0x7; + efuse->thermal_meter[0] = map->thermal_meter; + efuse->thermal_meter_k = map->thermal_meter; + efuse->afe = map->afe; + + for (i = 0; i < 4; i++) + efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rtw8723xe_efuse_parsing(efuse, map); + break; + case RTW_HCI_TYPE_USB: + rtw8723xu_efuse_parsing(efuse, map); + break; + case RTW_HCI_TYPE_SDIO: + rtw8723xs_efuse_parsing(efuse, map); + break; + default: + /* unsupported now */ + return -EOPNOTSUPP; + } + + return 0; +} + +#define BIT_CFENDFORM BIT(9) +#define BIT_WMAC_TCR_ERR0 BIT(12) +#define BIT_WMAC_TCR_ERR1 BIT(13) +#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \ + BIT_WMAC_TCR_ERR1) +#define WLAN_RX_FILTER0 0xFFFF +#define WLAN_RX_FILTER1 0x400 +#define WLAN_RX_FILTER2 0xFFFF +#define WLAN_RCR_CFG 0x700060CE + +static int __rtw8723x_mac_init(struct rtw_dev *rtwdev) +{ + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN); + rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG); + + rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0); + rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1); + rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2); + rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG); + + rtw_write32(rtwdev, REG_INT_MIG, 0); + rtw_write32(rtwdev, REG_MCUTST_1, 0x0); + + rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA); + rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0); + + return 0; +} + +static void __rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ + u8 ldo_pwr; + + ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3); + if (enable) { + ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE; + ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN; + } else { + ldo_pwr &= ~BIT_LDO25_EN; + } + rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr); +} + +static void +rtw8723x_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +{ + struct rtw_hal *hal = &rtwdev->hal; + const struct rtw_hw_reg *txagc; + u8 rate, pwr_index; + int j; + + for (j = 0; j < rtw_rate_size[rs]; j++) { + rate = rtw_rate_section[rs][j]; + pwr_index = hal->tx_pwr_tbl[path][rate]; + + if (rate >= ARRAY_SIZE(rtw8723x_txagc)) { + rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate); + continue; + } + txagc = &rtw8723x_txagc[rate]; + if (!txagc->addr) { + rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate); + continue; + } + + rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index); + } +} + +static void __rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + int rs, path; + + for (path = 0; path < hal->rf_path_num; path++) { + for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++) + rtw8723x_set_tx_power_index_by_rate(rtwdev, path, rs); + } +} + +static void __rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on) +{ + if (on) { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); + + rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR); + rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M); + } else { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF); + } +} + +static void __rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_fa_cnt; + u32 ofdm_fa_cnt; + u32 crc32_cnt; + u32 val32; + + /* hold counter */ + rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1); + + cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0); + cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8; + + val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N); + ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT); + ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT); + val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N); + dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT); + ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT); + val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N); + ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT); + ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT); + val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N); + ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT); + + dm_info->cck_fa_cnt = cck_fa_cnt; + dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt; + + dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N); + dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N); + crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N); + dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR); + dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK); + crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N); + dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR); + dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK); + dm_info->vht_err_cnt = 0; + dm_info->vht_ok_cnt = 0; + + val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N); + dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) | + u32_get_bits(val32, BIT_MASK_CCK_FA_LSB); + dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt; + + /* reset counter */ + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0); + rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0); + rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0); + rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2); + rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1); + rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0); +} + +/* IQK (IQ calibration) */ + +static +void __rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev, + struct rtw8723x_iqk_backup_regs *backup) +{ + int i; + + for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++) + backup->adda[i] = rtw_read32(rtwdev, + rtw8723x_common.iqk_adda_regs[i]); + + for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++) + backup->mac8[i] = rtw_read8(rtwdev, + rtw8723x_common.iqk_mac8_regs[i]); + for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++) + backup->mac32[i] = rtw_read32(rtwdev, + rtw8723x_common.iqk_mac32_regs[i]); + + for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++) + backup->bb[i] = rtw_read32(rtwdev, + rtw8723x_common.iqk_bb_regs[i]); + + backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0); + backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0); + + backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG); +} + +static +void __rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + int i; + + for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++) + rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i], + backup->adda[i]); + + for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++) + rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i], + backup->mac8[i]); + for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++) + rtw_write32(rtwdev, rtw8723x_common.iqk_mac32_regs[i], + backup->mac32[i]); + + for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++) + rtw_write32(rtwdev, rtw8723x_common.iqk_bb_regs[i], + backup->bb[i]); + + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50); + rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia); + + rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50); + rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib); + + rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00); + rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00); +} + +static +bool __rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev, + s32 result[][IQK_NR], + u8 c1, u8 c2) +{ + u32 i, j, diff; + u32 bitmap = 0; + u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID}; + bool ret = true; + + s32 tmp1, tmp2; + + for (i = 0; i < IQK_NR; i++) { + tmp1 = iqkxy_to_s32(result[c1][i]); + tmp2 = iqkxy_to_s32(result[c2][i]); + + diff = abs(tmp1 - tmp2); + + if (diff <= MAX_TOLERANCE) + continue; + + if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + candidate[i / IQK_SX_NR] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + candidate[i / IQK_SX_NR] = c1; + else + bitmap |= BIT(i); + } else { + bitmap |= BIT(i); + } + } + + if (bitmap != 0) + goto check_sim; + + for (i = 0; i < PATH_NR; i++) { + if (candidate[i] == IQK_ROUND_INVALID) + continue; + + for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++) + result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j]; + ret = false; + } + + return ret; + +check_sim: + for (i = 0; i < IQK_NR; i++) { + j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */ + if (bitmap & GENMASK(j + 1, j)) + continue; + + result[IQK_ROUND_HYBRID][i] = result[c1][i]; + } + + return false; +} + +static u8 __rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 tx_rate = dm_info->tx_rate; + u8 limit_ofdm = 30; + + switch (tx_rate) { + case DESC_RATE1M...DESC_RATE5_5M: + case DESC_RATE11M: + break; + case DESC_RATE6M...DESC_RATE48M: + limit_ofdm = 36; + break; + case DESC_RATE54M: + limit_ofdm = 34; + break; + case DESC_RATEMCS0...DESC_RATEMCS2: + limit_ofdm = 38; + break; + case DESC_RATEMCS3...DESC_RATEMCS4: + limit_ofdm = 36; + break; + case DESC_RATEMCS5...DESC_RATEMCS7: + limit_ofdm = 34; + break; + default: + rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate); + break; + } + + return limit_ofdm; +} + +static +void __rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path, + u8 delta) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; + const s8 *pwrtrk_xtal; + s8 xtal_cap; + + if (dm_info->thermal_avg[therm_path] > + rtwdev->efuse.thermal_meter[therm_path]) + pwrtrk_xtal = tbl->pwrtrk_xtal_p; + else + pwrtrk_xtal = tbl->pwrtrk_xtal_n; + + xtal_cap = rtwdev->efuse.crystal_cap & 0x3F; + xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F); + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL, + xtal_cap | (xtal_cap << 6)); +} + +static +void __rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc) +{ + size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */ + __le16 chksum = 0; + __le16 *data = (__le16 *)(txdesc); + struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc; + + le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM); + + while (words--) + chksum ^= *data++; + + chksum = ~chksum; + + le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum), + RTW_TX_DESC_W7_TXDESC_CHECKSUM); +} + +static void __rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev) +{ + /* enable TBTT nterrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* BT report packet sample rate */ + /* 0x790[5:0]=0x5 */ + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); + + /* enable PTA (3-wire function form BT side) */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); + + /* enable PTA (tx/rx signal form WiFi side) */ + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); +} + +const struct rtw8723x_common rtw8723x_common = { + .iqk_adda_regs = { + 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, + 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec + }, + .iqk_mac8_regs = {0x522, 0x550, 0x551}, + .iqk_mac32_regs = {0x40}, + .iqk_bb_regs = { + 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04 + }, + + .ltecoex_addr = { + .ctrl = REG_LTECOEX_CTRL, + .wdata = REG_LTECOEX_WRITE_DATA, + .rdata = REG_LTECOEX_READ_DATA, + }, + .rf_sipi_addr = { + [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0, + .hssi_2 = 0x824, .lssi_read_pi = 0x8b8}, + [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4, + .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc}, + }, + .dig = { + [0] = { .addr = 0xc50, .mask = 0x7f }, + [1] = { .addr = 0xc50, .mask = 0x7f }, + }, + .dig_cck = { + [0] = { .addr = 0xa0c, .mask = 0x3f00 }, + }, + .prioq_addrs = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2, + }, + .wsize = false, + }, + + .lck = __rtw8723x_lck, + .read_efuse = __rtw8723x_read_efuse, + .mac_init = __rtw8723x_mac_init, + .cfg_ldo25 = __rtw8723x_cfg_ldo25, + .set_tx_power_index = __rtw8723x_set_tx_power_index, + .efuse_grant = __rtw8723x_efuse_grant, + .false_alarm_statistics = __rtw8723x_false_alarm_statistics, + .iqk_backup_regs = __rtw8723x_iqk_backup_regs, + .iqk_restore_regs = __rtw8723x_iqk_restore_regs, + .iqk_similarity_cmp = __rtw8723x_iqk_similarity_cmp, + .pwrtrack_get_limit_ofdm = __rtw8723x_pwrtrack_get_limit_ofdm, + .pwrtrack_set_xtal = __rtw8723x_pwrtrack_set_xtal, + .coex_cfg_init = __rtw8723x_coex_cfg_init, + .fill_txdesc_checksum = __rtw8723x_fill_txdesc_checksum, + .debug_txpwr_limit = __rtw8723x_debug_txpwr_limit, +}; +EXPORT_SYMBOL(rtw8723x_common); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_AUTHOR("Fiona Klute <[email protected]>"); +MODULE_DESCRIPTION("Common functions for Realtek 802.11n wireless 8723x drivers"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h new file mode 100644 index 000000000000..e93bfce994bf --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright 2024 Fiona Klute + * + * Based on code originally in rtw8723d.[ch], + * Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW8723X_H__ +#define __RTW8723X_H__ + +#include "main.h" +#include "debug.h" +#include "phy.h" +#include "reg.h" + +enum rtw8723x_path { + PATH_S1, + PATH_S0, + PATH_NR, +}; + +enum rtw8723x_iqk_round { + IQK_ROUND_0, + IQK_ROUND_1, + IQK_ROUND_2, + IQK_ROUND_HYBRID, + IQK_ROUND_SIZE, + IQK_ROUND_INVALID = 0xff, +}; + +enum rtw8723x_iqk_result { + IQK_S1_TX_X, + IQK_S1_TX_Y, + IQK_S1_RX_X, + IQK_S1_RX_Y, + IQK_S0_TX_X, + IQK_S0_TX_Y, + IQK_S0_RX_X, + IQK_S0_RX_Y, + IQK_NR, + IQK_SX_NR = IQK_NR / PATH_NR, +}; + +struct rtw8723xe_efuse { + u8 mac_addr[ETH_ALEN]; /* 0xd0 */ + u8 vendor_id[2]; + u8 device_id[2]; + u8 sub_vendor_id[2]; + u8 sub_device_id[2]; +}; + +struct rtw8723xu_efuse { + u8 res4[48]; /* 0xd0 */ + u8 vendor_id[2]; /* 0x100 */ + u8 product_id[2]; /* 0x102 */ + u8 usb_option; /* 0x104 */ + u8 res5[2]; /* 0x105 */ + u8 mac_addr[ETH_ALEN]; /* 0x107 */ +}; + +struct rtw8723xs_efuse { + u8 res4[0x4a]; /* 0xd0 */ + u8 mac_addr[ETH_ALEN]; /* 0x11a */ +}; + +struct rtw8723x_efuse { + __le16 rtl_id; + u8 rsvd[2]; + u8 afe; + u8 rsvd1[11]; + + /* power index for four RF paths */ + struct rtw_txpwr_idx txpwr_idx_table[4]; + + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; /* 0xbc */ + u8 lna_type_2g[2]; /* 0xbd */ + u8 lna_type_5g[2]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 tx_bb_swing_setting_2g; + u8 res_c7; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; /* 0xc9 */ + u8 rfe_option; + u8 country_code[2]; + u8 res[3]; + union { + struct rtw8723xe_efuse e; + struct rtw8723xu_efuse u; + struct rtw8723xs_efuse s; + }; +}; + +#define RTW8723X_IQK_ADDA_REG_NUM 16 +#define RTW8723X_IQK_MAC8_REG_NUM 3 +#define RTW8723X_IQK_MAC32_REG_NUM 1 +#define RTW8723X_IQK_BB_REG_NUM 9 + +struct rtw8723x_iqk_backup_regs { + u32 adda[RTW8723X_IQK_ADDA_REG_NUM]; + u8 mac8[RTW8723X_IQK_MAC8_REG_NUM]; + u32 mac32[RTW8723X_IQK_MAC32_REG_NUM]; + u32 bb[RTW8723X_IQK_BB_REG_NUM]; + + u32 lte_path; + u32 lte_gnt; + + u32 bb_sel_btg; + u8 btg_sel; + + u8 igia; + u8 igib; +}; + +struct rtw8723x_common { + /* registers that must be backed up before IQK and restored after */ + u32 iqk_adda_regs[RTW8723X_IQK_ADDA_REG_NUM]; + u32 iqk_mac8_regs[RTW8723X_IQK_MAC8_REG_NUM]; + u32 iqk_mac32_regs[RTW8723X_IQK_MAC32_REG_NUM]; + u32 iqk_bb_regs[RTW8723X_IQK_BB_REG_NUM]; + + /* chip register definitions */ + struct rtw_ltecoex_addr ltecoex_addr; + struct rtw_rf_sipi_addr rf_sipi_addr[2]; + struct rtw_hw_reg dig[2]; + struct rtw_hw_reg dig_cck[1]; + struct rtw_prioq_addrs prioq_addrs; + + /* common functions */ + void (*lck)(struct rtw_dev *rtwdev); + int (*read_efuse)(struct rtw_dev *rtwdev, u8 *log_map); + int (*mac_init)(struct rtw_dev *rtwdev); + void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable); + void (*set_tx_power_index)(struct rtw_dev *rtwdev); + void (*efuse_grant)(struct rtw_dev *rtwdev, bool on); + void (*false_alarm_statistics)(struct rtw_dev *rtwdev); + void (*iqk_backup_regs)(struct rtw_dev *rtwdev, + struct rtw8723x_iqk_backup_regs *backup); + void (*iqk_restore_regs)(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup); + bool (*iqk_similarity_cmp)(struct rtw_dev *rtwdev, s32 result[][IQK_NR], + u8 c1, u8 c2); + u8 (*pwrtrack_get_limit_ofdm)(struct rtw_dev *rtwdev); + void (*pwrtrack_set_xtal)(struct rtw_dev *rtwdev, u8 therm_path, + u8 delta); + void (*coex_cfg_init)(struct rtw_dev *rtwdev); + void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc); + void (*debug_txpwr_limit)(struct rtw_dev *rtwdev, + struct rtw_txpwr_idx *table, + int tx_path_count); +}; + +extern const struct rtw8723x_common rtw8723x_common; + +#define PATH_IQK_RETRY 2 +#define MAX_TOLERANCE 5 +#define IQK_TX_X_ERR 0x142 +#define IQK_TX_Y_ERR 0x42 +#define IQK_RX_X_ERR 0x132 +#define IQK_RX_Y_ERR 0x36 +#define IQK_RX_X_UPPER 0x11a +#define IQK_RX_X_LOWER 0xe6 +#define IQK_RX_Y_LMT 0x1a +#define IQK_TX_OK BIT(0) +#define IQK_RX_OK BIT(1) + +#define WLAN_TXQ_RPT_EN 0x1F + +#define SPUR_THRES 0x16 +#define DIS_3WIRE 0xccf000c0 +#define EN_3WIRE 0xccc000c0 +#define START_PSD 0x400000 +#define FREQ_CH5 0xfccd +#define FREQ_CH6 0xfc4d +#define FREQ_CH7 0xffcd +#define FREQ_CH8 0xff4d +#define FREQ_CH13 0xfccd +#define FREQ_CH14 0xff9a +#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0) +#define RFCFGCH_BW_MASK (BIT(11) | BIT(10)) +#define RFCFGCH_BW_20M (BIT(11) | BIT(10)) +#define RFCFGCH_BW_40M BIT(10) +#define BIT_MASK_RFMOD BIT(0) +#define BIT_LCK BIT(15) + +#define REG_GPIO_INTM 0x0048 +#define REG_BTG_SEL 0x0067 +#define BIT_MASK_BTG_WL BIT(7) +#define REG_LTECOEX_PATH_CONTROL 0x0070 +#define REG_LTECOEX_CTRL 0x07c0 +#define REG_LTECOEX_WRITE_DATA 0x07c4 +#define REG_LTECOEX_READ_DATA 0x07c8 +#define REG_PSDFN 0x0808 +#define REG_BB_PWR_SAV1_11N 0x0874 +#define REG_ANA_PARAM1 0x0880 +#define REG_ANALOG_P4 0x088c +#define REG_PSDRPT 0x08b4 +#define REG_FPGA1_RFMOD 0x0900 +#define REG_BB_SEL_BTG 0x0948 +#define REG_BBRX_DFIR 0x0954 +#define BIT_MASK_RXBB_DFIR GENMASK(27, 24) +#define BIT_RXBB_DFIR_EN BIT(19) +#define REG_CCK0_SYS 0x0a00 +#define BIT_CCK_SIDE_BAND BIT(4) +#define REG_CCK_ANT_SEL_11N 0x0a04 +#define REG_PWRTH 0x0a08 +#define REG_CCK_FA_RST_11N 0x0a2c +#define BIT_MASK_CCK_CNT_KEEP BIT(12) +#define BIT_MASK_CCK_CNT_EN BIT(13) +#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN) +#define BIT_MASK_CCK_FA_KEEP BIT(14) +#define BIT_MASK_CCK_FA_EN BIT(15) +#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN) +#define REG_CCK_FA_LSB_11N 0x0a5c +#define REG_CCK_FA_MSB_11N 0x0a58 +#define REG_CCK_CCA_CNT_11N 0x0a60 +#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0) +#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8) +#define REG_PWRTH2 0x0aa8 +#define REG_CSRATIO 0x0aaa +#define REG_OFDM_FA_HOLDC_11N 0x0c00 +#define BIT_MASK_OFDM_FA_KEEP BIT(31) +#define REG_BB_RX_PATH_11N 0x0c04 +#define REG_TRMUX_11N 0x0c08 +#define REG_OFDM_FA_RSTC_11N 0x0c0c +#define BIT_MASK_OFDM_FA_RST BIT(31) +#define REG_A_RXIQI 0x0c14 +#define BIT_MASK_RXIQ_S1_X 0x000003FF +#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00 +#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F) +#define REG_OFDM0_RXDSP 0x0c40 +#define BIT_MASK_RXDSP GENMASK(28, 24) +#define BIT_EN_RXDSP BIT(9) +#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c +#define BIT_MASK_OFDM0_EXT_A BIT(31) +#define BIT_MASK_OFDM0_EXT_C BIT(29) +#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28)) +#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28)) +#define BIT_MASK_OFDM0_EXTS_B (BIT(27) | BIT(25) | BIT(24)) +#define BIT_SET_OFDM0_EXTS_B(a, c, d) (((a) << 27) | ((c) << 25) | ((d) << 24)) +#define REG_OFDM0_XAAGC1 0x0c50 +#define REG_OFDM0_XBAGC1 0x0c58 +#define REG_AGCRSSI 0x0c78 +#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80 +#define REG_OFDM_0_XB_TX_IQ_IMBALANCE 0x0c88 +#define BIT_MASK_TXIQ_ELM_A 0x03ff +#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \ + ((a) & 0x03ff)) +#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16) +#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F) +#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22) +#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94 +#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6) +#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0 +#define BIT_MASK_RXIQ_S1_Y2 0xF0000000 +#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF) +#define REG_TXIQ_AB_S0 0x0cd0 +#define BIT_MASK_TXIQ_A_S0 0x000007FE +#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0) +#define BIT_MASK_TXIQ_B_S0 0x0007E000 +#define REG_TXIQ_CD_S0 0x0cd4 +#define BIT_MASK_TXIQ_C_S0 0x000007FE +#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0) +#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13) +#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12) +#define REG_RXIQ_AB_S0 0x0cd8 +#define BIT_MASK_RXIQ_X_S0 0x000003FF +#define BIT_MASK_RXIQ_Y_S0 0x003FF000 +#define REG_OFDM_FA_TYPE1_11N 0x0cf0 +#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0) +#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16) +#define REG_OFDM_FA_RSTD_11N 0x0d00 +#define BIT_MASK_OFDM_FA_RST1 BIT(27) +#define BIT_MASK_OFDM_FA_KEEP1 BIT(31) +#define REG_CTX 0x0d03 +#define BIT_MASK_CTX_TYPE GENMASK(6, 4) +#define REG_OFDM1_CFOTRK 0x0d2c +#define BIT_EN_CFOTRK BIT(28) +#define REG_OFDM1_CSI1 0x0d40 +#define REG_OFDM1_CSI2 0x0d44 +#define REG_OFDM1_CSI3 0x0d48 +#define REG_OFDM1_CSI4 0x0d4c +#define REG_OFDM_FA_TYPE2_11N 0x0da0 +#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0) +#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16) +#define REG_OFDM_FA_TYPE3_11N 0x0da4 +#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0) +#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16) +#define REG_OFDM_FA_TYPE4_11N 0x0da8 +#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0) +#define REG_FPGA0_IQK_11N 0x0e28 +#define BIT_MASK_IQK_MOD 0xffffff00 +#define EN_IQK 0x808000 +#define RST_IQK 0x000000 +#define REG_TXIQK_TONE_A_11N 0x0e30 +#define REG_RXIQK_TONE_A_11N 0x0e34 +#define REG_TXIQK_PI_A_11N 0x0e38 +#define REG_RXIQK_PI_A_11N 0x0e3c +#define REG_TXIQK_11N 0x0e40 +#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y)) +#define REG_RXIQK_11N 0x0e44 +#define REG_IQK_AGC_PTS_11N 0x0e48 +#define REG_IQK_AGC_RSP_11N 0x0e4c +#define REG_TX_IQK_TONE_B 0x0e50 +#define REG_RX_IQK_TONE_B 0x0e54 +#define REG_TXIQK_PI_B 0x0e58 +#define REG_RXIQK_PI_B 0x0e5c +#define REG_IQK_RES_TX 0x0e94 +#define BIT_MASK_RES_TX GENMASK(25, 16) +#define REG_IQK_RES_TY 0x0e9c +#define BIT_MASK_RES_TY GENMASK(25, 16) +#define REG_IQK_RES_RX 0x0ea4 +#define BIT_MASK_RES_RX GENMASK(25, 16) +#define REG_IQK_RES_RY 0x0eac +#define BIT_IQK_TX_FAIL BIT(28) +#define BIT_IQK_RX_FAIL BIT(27) +#define BIT_IQK_DONE BIT(26) +#define BIT_MASK_RES_RY GENMASK(25, 16) +#define REG_PAGE_F_RST_11N 0x0f14 +#define BIT_MASK_F_RST_ALL BIT(16) +#define REG_IGI_C_11N 0x0f84 +#define REG_IGI_D_11N 0x0f88 +#define REG_HT_CRC32_CNT_11N 0x0f90 +#define BIT_MASK_HT_CRC_OK GENMASK(15, 0) +#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16) +#define REG_OFDM_CRC32_CNT_11N 0x0f94 +#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0) +#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16) +#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8 + +#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing) +#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing) +#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing) +#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing) + +static inline s32 iqkxy_to_s32(s32 val) +{ + /* val is Q10.8 */ + return sign_extend32(val, 9); +} + +static inline s32 iqk_mult(s32 x, s32 y, s32 *ext) +{ + /* x, y and return value are Q10.8 */ + s32 t; + + t = x * y; + if (ext) + *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */ + + return (t >> 8); /* Q.16 --> Q.8 */ +} + +static inline +void rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev, + struct rtw_txpwr_idx *table, + int tx_path_count) +{ + rtw8723x_common.debug_txpwr_limit(rtwdev, table, tx_path_count); +} + +static inline void rtw8723x_lck(struct rtw_dev *rtwdev) +{ + rtw8723x_common.lck(rtwdev); +} + +static inline int rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + return rtw8723x_common.read_efuse(rtwdev, log_map); +} + +static inline int rtw8723x_mac_init(struct rtw_dev *rtwdev) +{ + return rtw8723x_common.mac_init(rtwdev); +} + +static inline void rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ + rtw8723x_common.cfg_ldo25(rtwdev, enable); +} + +static inline void rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev) +{ + rtw8723x_common.set_tx_power_index(rtwdev); +} + +static inline void rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on) +{ + rtw8723x_common.efuse_grant(rtwdev, on); +} + +static inline void rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev) +{ + rtw8723x_common.false_alarm_statistics(rtwdev); +} + +static inline +void rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev, + struct rtw8723x_iqk_backup_regs *backup) +{ + rtw8723x_common.iqk_backup_regs(rtwdev, backup); +} + +static inline +void rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + rtw8723x_common.iqk_restore_regs(rtwdev, backup); +} + +static inline +bool rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR], + u8 c1, u8 c2) +{ + return rtw8723x_common.iqk_similarity_cmp(rtwdev, result, c1, c2); +} + +static inline u8 rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev) +{ + return rtw8723x_common.pwrtrack_get_limit_ofdm(rtwdev); +} + +static inline +void rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path, + u8 delta) +{ + rtw8723x_common.pwrtrack_set_xtal(rtwdev, therm_path, delta); +} + +static inline void rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev) +{ + rtw8723x_common.coex_cfg_init(rtwdev); +} + +static inline +void rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc) +{ + rtw8723x_common.fill_txdesc_checksum(rtwdev, pkt_info, txdesc); +} + +/* IQK helper functions, defined as inline so they can be shared + * without needing an EXPORT_SYMBOL each. + */ +static inline void +rtw8723x_iqk_backup_path_ctrl(struct rtw_dev *rtwdev, + struct rtw8723x_iqk_backup_regs *backup) +{ + backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n", + backup->btg_sel); +} + +static inline void rtw8723x_iqk_config_path_ctrl(struct rtw_dev *rtwdev) +{ + rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); +} + +static inline void +rtw8723x_iqk_restore_path_ctrl(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *backup) +{ + rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n", + rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3)); +} + +static inline void +rtw8723x_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev, + struct rtw8723x_iqk_backup_regs *backup) +{ + backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038); + mdelay(1); + backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA); + rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n", + backup->lte_gnt); +} + +static inline void +rtw8723x_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev, + u32 write_data) +{ + rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, write_data); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038); + rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, + BIT_LTE_MUX_CTRL_PATH, 0x1); +} + +static inline void +rtw8723x_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev, + const struct rtw8723x_iqk_backup_regs *bak) +{ + rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt); + rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038); + rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path); +} + +/* set all ADDA registers to the given value */ +static inline void rtw8723x_iqk_path_adda_on(struct rtw_dev *rtwdev, u32 value) +{ + for (int i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++) + rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i], value); +} + +#endif /* __RTW8723X_H__ */ diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h index 3342e3761281..d3668c4efc24 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.h +++ b/drivers/net/wireless/realtek/rtw88/rx.h @@ -40,6 +40,8 @@ enum rtw_rx_desc_enc { le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29)) #define GET_RX_DESC_TSFL(rxdesc) \ le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0)) +#define GET_RX_DESC_BW(rxdesc) \ + (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24))) void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index 90ffbab7cc4c..eaea4eaeb361 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -28,6 +28,9 @@ config RTW89_8852B config RTW89_8852C tristate +config RTW89_8922A + tristate + config RTW89_8851BE tristate "Realtek 8851BE PCI wireless network (Wi-Fi 6) adapter" depends on PCI @@ -72,6 +75,18 @@ config RTW89_8852CE 802.11ax PCIe wireless network (Wi-Fi 6E) adapter +config RTW89_8922AE + tristate "Realtek 8922AE PCI wireless network (Wi-Fi 7) adapter" + depends on PCI + select RTW89_CORE + select RTW89_PCI + select RTW89_8922A + help + Select this option will enable support for 8922AE chipset + + 802.11be PCIe wireless network (Wi-Fi 7) adapter + supporting 2x2 2GHz/5GHz/6GHz 4096-QAM 160MHz channels. + config RTW89_DEBUG bool diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile index 41940099af1b..86a553fb0136 100644 --- a/drivers/net/wireless/realtek/rtw89/Makefile +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -4,10 +4,13 @@ obj-$(CONFIG_RTW89_CORE) += rtw89_core.o rtw89_core-y += core.o \ mac80211.o \ mac.o \ + mac_be.o \ phy.o \ + phy_be.o \ fw.o \ cam.o \ efuse.o \ + efuse_be.o \ regd.o \ sar.o \ coex.o \ @@ -54,8 +57,15 @@ rtw89_8852c-objs := rtw8852c.o \ obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o rtw89_8852ce-objs := rtw8852ce.o +obj-$(CONFIG_RTW89_8922A) += rtw89_8922a.o +rtw89_8922a-objs := rtw8922a.o \ + rtw8922a_rfk.o + +obj-$(CONFIG_RTW89_8922AE) += rtw89_8922ae.o +rtw89_8922ae-objs := rtw8922ae.o + rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o -rtw89_pci-y := pci.o +rtw89_pci-y := pci.o pci_be.o diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c index 2e7326a8e3e4..908e980a4b72 100644 --- a/drivers/net/wireless/realtek/rtw89/acpi.c +++ b/drivers/net/wireless/realtek/rtw89/acpi.c @@ -77,6 +77,50 @@ int rtw89_acpi_dsm_get_policy_6ghz(struct rtw89_dev *rtwdev, return 0; } +static bool chk_acpi_policy_6ghz_sp_sig(const struct rtw89_acpi_policy_6ghz_sp *p) +{ + return p->signature[0] == 0x52 && + p->signature[1] == 0x54 && + p->signature[2] == 0x4B && + p->signature[3] == 0x07; +} + +static +int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev, + union acpi_object *obj, + struct rtw89_acpi_policy_6ghz_sp **policy) +{ + const struct rtw89_acpi_policy_6ghz_sp *ptr; + u32 buf_len; + + if (obj->type != ACPI_TYPE_BUFFER) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, + "acpi: expect buffer but type: %d\n", obj->type); + return -EINVAL; + } + + buf_len = obj->buffer.length; + if (buf_len < sizeof(*ptr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n", + __func__, buf_len); + return -EINVAL; + } + + ptr = (typeof(ptr))obj->buffer.pointer; + if (!chk_acpi_policy_6ghz_sp_sig(ptr)) { + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__); + return -EINVAL; + } + + *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL); + if (!*policy) + return -ENOMEM; + + rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_6ghz_sp: ", *policy, + sizeof(*ptr)); + return 0; +} + int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, enum rtw89_acpi_dsm_func func, struct rtw89_acpi_dsm_result *res) @@ -95,6 +139,9 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, if (func == RTW89_ACPI_DSM_FUNC_6G_BP) ret = rtw89_acpi_dsm_get_policy_6ghz(rtwdev, obj, &res->u.policy_6ghz); + else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP) + ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj, + &res->u.policy_6ghz_sp); else ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value); diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h index fe85b40cf076..d274be1775bf 100644 --- a/drivers/net/wireless/realtek/rtw89/acpi.h +++ b/drivers/net/wireless/realtek/rtw89/acpi.h @@ -12,7 +12,13 @@ enum rtw89_acpi_dsm_func { RTW89_ACPI_DSM_FUNC_6G_DIS = 3, RTW89_ACPI_DSM_FUNC_6G_BP = 4, RTW89_ACPI_DSM_FUNC_TAS_EN = 5, - RTW89_ACPI_DSM_FUNC_59G_EN = 6, + RTW89_ACPI_DSM_FUNC_UNII4_SUP = 6, + RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP = 7, +}; + +enum rtw89_acpi_conf_unii4 { + RTW89_ACPI_CONF_UNII4_FCC = BIT(0), + RTW89_ACPI_CONF_UNII4_IC = BIT(1), }; enum rtw89_acpi_policy_mode { @@ -36,11 +42,24 @@ struct rtw89_acpi_policy_6ghz { struct rtw89_acpi_country_code country_list[] __counted_by(country_count); } __packed; +enum rtw89_acpi_conf_6ghz_sp { + RTW89_ACPI_CONF_6GHZ_SP_US = BIT(0), +}; + +struct rtw89_acpi_policy_6ghz_sp { + u8 signature[4]; + u8 revision; + u8 override; + u8 conf; + u8 rsvd; +} __packed; + struct rtw89_acpi_dsm_result { union { u8 value; /* caller needs to free it after using */ struct rtw89_acpi_policy_6ghz *policy_6ghz; + struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp; } u; }; diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 11fbdd142162..1864f543a6c6 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -150,8 +150,6 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam, case RTW89_ADDR_CAM_SEC_NONE: return -EINVAL; case RTW89_ADDR_CAM_SEC_ALL_UNI: - if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EINVAL; idx = find_first_zero_bit(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM); if (idx >= RTW89_SEC_CAM_IN_ADDR_CAM) @@ -232,6 +230,11 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, rtwvif = (struct rtw89_vif *)vif->drv_priv; addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); + + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) + addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_ALL_UNI; + ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx); if (ret) { rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n", @@ -356,6 +359,9 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; ext_key = true; break; + case WLAN_CIPHER_SUITE_AES_CMAC: + hw_key_type = RTW89_SEC_KEY_TYPE_BIP_CCMP128; + break; default: return -EOPNOTSUPP; } @@ -753,29 +759,80 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, - u8 *cmd) + struct rtw89_h2c_dctlinfo_ud_v1 *h2c) { struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv; + + h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id, + DCTLINFO_V1_C0_MACID) | + le32_encode_bits(1, DCTLINFO_V1_C0_OP); - SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id); - SET_DCTL_OPERATION_V1(cmd, 1); - - SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]); - SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]); - SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]); - SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]); - SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]); - SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]); - SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]); - - SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff); - SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]); - SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]); - SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]); - SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]); - SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]); - SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]); - SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]); + h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0], + DCTLINFO_V1_W4_SEC_ENT0_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[1], + DCTLINFO_V1_W4_SEC_ENT1_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[2], + DCTLINFO_V1_W4_SEC_ENT2_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[3], + DCTLINFO_V1_W4_SEC_ENT3_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[4], + DCTLINFO_V1_W4_SEC_ENT4_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[5], + DCTLINFO_V1_W4_SEC_ENT5_KEYID) | + le32_encode_bits(addr_cam->sec_ent_keyid[6], + DCTLINFO_V1_W4_SEC_ENT6_KEYID); + h2c->m4 = cpu_to_le32(DCTLINFO_V1_W4_SEC_ENT0_KEYID | + DCTLINFO_V1_W4_SEC_ENT1_KEYID | + DCTLINFO_V1_W4_SEC_ENT2_KEYID | + DCTLINFO_V1_W4_SEC_ENT3_KEYID | + DCTLINFO_V1_W4_SEC_ENT4_KEYID | + DCTLINFO_V1_W4_SEC_ENT5_KEYID | + DCTLINFO_V1_W4_SEC_ENT6_KEYID); + + h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0] & 0xff, + DCTLINFO_V1_W5_SEC_ENT_VALID) | + le32_encode_bits(addr_cam->sec_ent[0], + DCTLINFO_V1_W5_SEC_ENT0) | + le32_encode_bits(addr_cam->sec_ent[1], + DCTLINFO_V1_W5_SEC_ENT1) | + le32_encode_bits(addr_cam->sec_ent[2], + DCTLINFO_V1_W5_SEC_ENT2); + h2c->m5 = cpu_to_le32(DCTLINFO_V1_W5_SEC_ENT_VALID | + DCTLINFO_V1_W5_SEC_ENT0 | + DCTLINFO_V1_W5_SEC_ENT1 | + DCTLINFO_V1_W5_SEC_ENT2); + + h2c->w6 = le32_encode_bits(addr_cam->sec_ent[3], + DCTLINFO_V1_W6_SEC_ENT3) | + le32_encode_bits(addr_cam->sec_ent[4], + DCTLINFO_V1_W6_SEC_ENT4) | + le32_encode_bits(addr_cam->sec_ent[5], + DCTLINFO_V1_W6_SEC_ENT5) | + le32_encode_bits(addr_cam->sec_ent[6], + DCTLINFO_V1_W6_SEC_ENT6); + h2c->m6 = cpu_to_le32(DCTLINFO_V1_W6_SEC_ENT3 | + DCTLINFO_V1_W6_SEC_ENT4 | + DCTLINFO_V1_W6_SEC_ENT5 | + DCTLINFO_V1_W6_SEC_ENT6); + + if (rtw_wow->ptk_alg) { + h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8, + DCTLINFO_V1_W0_AES_IV_L); + h2c->m0 = cpu_to_le32(DCTLINFO_V1_W0_AES_IV_L); + + h2c->w1 = le32_encode_bits(ptk_tx_iv[4] | + ptk_tx_iv[5] << 8 | + ptk_tx_iv[6] << 16 | + ptk_tx_iv[7] << 24, + DCTLINFO_V1_W1_AES_IV_H); + h2c->m1 = cpu_to_le32(DCTLINFO_V1_W1_AES_IV_H); + + h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx, + DCTLINFO_V1_W4_SEC_KEY_ID); + h2c->m4 |= cpu_to_le32(DCTLINFO_V1_W4_SEC_KEY_ID); + } } void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, @@ -784,6 +841,8 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, struct rtw89_h2c_dctlinfo_ud_v2 *h2c) { struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv; h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id, DCTLINFO_V2_C0_MACID) | @@ -837,4 +896,21 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, DCTLINFO_V2_W7_SEC_ENT6_V1); h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 | DCTLINFO_V2_W7_SEC_ENT6_V1); + + if (rtw_wow->ptk_alg) { + h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8, + DCTLINFO_V2_W0_AES_IV_L); + h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_AES_IV_L); + + h2c->w1 = le32_encode_bits(ptk_tx_iv[4] | + ptk_tx_iv[5] << 8 | + ptk_tx_iv[6] << 16 | + ptk_tx_iv[7] << 24, + DCTLINFO_V2_W1_AES_IV_H); + h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_AES_IV_H); + + h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx, + DCTLINFO_V2_W4_SEC_KEY_ID); + h2c->m4 |= cpu_to_le32(DCTLINFO_V2_W4_SEC_KEY_ID); + } } diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h index fa09d11c345c..5d7b624c2dd4 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.h +++ b/drivers/net/wireless/realtek/rtw89/cam.h @@ -352,6 +352,75 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value) le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24)); } +struct rtw89_h2c_dctlinfo_ud_v1 { + __le32 c0; + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; + __le32 w7; + __le32 m0; + __le32 m1; + __le32 m2; + __le32 m3; + __le32 m4; + __le32 m5; + __le32 m6; + __le32 m7; +} __packed; + +#define DCTLINFO_V1_C0_MACID GENMASK(6, 0) +#define DCTLINFO_V1_C0_OP BIT(7) + +#define DCTLINFO_V1_W0_QOS_FIELD_H GENMASK(7, 0) +#define DCTLINFO_V1_W0_HW_EXSEQ_MACID GENMASK(14, 8) +#define DCTLINFO_V1_W0_QOS_DATA BIT(15) +#define DCTLINFO_V1_W0_AES_IV_L GENMASK(31, 16) +#define DCTLINFO_V1_W0_ALL GENMASK(31, 0) +#define DCTLINFO_V1_W1_AES_IV_H GENMASK(31, 0) +#define DCTLINFO_V1_W1_ALL GENMASK(31, 0) +#define DCTLINFO_V1_W2_SEQ0 GENMASK(11, 0) +#define DCTLINFO_V1_W2_SEQ1 GENMASK(23, 12) +#define DCTLINFO_V1_W2_AMSDU_MAX_LEN GENMASK(26, 24) +#define DCTLINFO_V1_W2_STA_AMSDU_EN BIT(27) +#define DCTLINFO_V1_W2_CHKSUM_OFLD_EN BIT(28) +#define DCTLINFO_V1_W2_WITH_LLC BIT(29) +#define DCTLINFO_V1_W2_ALL GENMASK(29, 0) +#define DCTLINFO_V1_W3_SEQ2 GENMASK(11, 0) +#define DCTLINFO_V1_W3_SEQ3 GENMASK(23, 12) +#define DCTLINFO_V1_W3_TGT_IND GENMASK(27, 24) +#define DCTLINFO_V1_W3_TGT_IND_EN BIT(28) +#define DCTLINFO_V1_W3_HTC_LB GENMASK(31, 29) +#define DCTLINFO_V1_W3_ALL GENMASK(31, 0) +#define DCTLINFO_V1_W4_MHDR_LEN GENMASK(4, 0) +#define DCTLINFO_V1_W4_VLAN_TAG_VALID BIT(5) +#define DCTLINFO_V1_W4_VLAN_TAG_SEL GENMASK(7, 6) +#define DCTLINFO_V1_W4_HTC_ORDER BIT(8) +#define DCTLINFO_V1_W4_SEC_KEY_ID GENMASK(10, 9) +#define DCTLINFO_V1_W4_WAPI BIT(15) +#define DCTLINFO_V1_W4_SEC_ENT_MODE GENMASK(17, 16) +#define DCTLINFO_V1_W4_SEC_ENT0_KEYID GENMASK(19, 18) +#define DCTLINFO_V1_W4_SEC_ENT1_KEYID GENMASK(21, 20) +#define DCTLINFO_V1_W4_SEC_ENT2_KEYID GENMASK(23, 22) +#define DCTLINFO_V1_W4_SEC_ENT3_KEYID GENMASK(25, 24) +#define DCTLINFO_V1_W4_SEC_ENT4_KEYID GENMASK(27, 26) +#define DCTLINFO_V1_W4_SEC_ENT5_KEYID GENMASK(29, 28) +#define DCTLINFO_V1_W4_SEC_ENT6_KEYID GENMASK(31, 30) +#define DCTLINFO_V1_W4_ALL (GENMASK(31, 15) | GENMASK(10, 0)) +#define DCTLINFO_V1_W5_SEC_ENT_VALID GENMASK(7, 0) +#define DCTLINFO_V1_W5_SEC_ENT0 GENMASK(15, 8) +#define DCTLINFO_V1_W5_SEC_ENT1 GENMASK(23, 16) +#define DCTLINFO_V1_W5_SEC_ENT2 GENMASK(31, 24) +#define DCTLINFO_V1_W5_ALL GENMASK(31, 0) +#define DCTLINFO_V1_W6_SEC_ENT3 GENMASK(7, 0) +#define DCTLINFO_V1_W6_SEC_ENT4 GENMASK(15, 8) +#define DCTLINFO_V1_W6_SEC_ENT5 GENMASK(23, 16) +#define DCTLINFO_V1_W6_SEC_ENT6 GENMASK(31, 24) +#define DCTLINFO_V1_W6_ALL GENMASK(31, 0) + struct rtw89_h2c_dctlinfo_ud_v2 { __le32 c0; __le32 w0; @@ -477,7 +546,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev, void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, - u8 *cmd); + struct rtw89_h2c_dctlinfo_ud_v1 *h2c); void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index d9b66d43f32e..c443b39ab3c6 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -12,6 +12,7 @@ #define RTW89_COEX_VERSION 0x07000113 #define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/ +#define BTC_E2G_LIMIT_DEF 80 enum btc_fbtc_tdma_template { CXTD_OFF = 0x0, @@ -54,7 +55,6 @@ enum btc_mlme_state { MLME_LINKED, }; -#define FCXONESLOT_VER 1 struct btc_fbtc_1slot { u8 fver; u8 sid; /* slot id */ @@ -133,71 +133,71 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7, .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7, .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7, - .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7, - .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6, + .fwlrole = 8, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7, + .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6, }, {RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0), .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, }, {RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0), .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, }, {RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0), .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, }, {RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0), .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, }, {RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0), .fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, }, {RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0), .fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6, }, {RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0), .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, }, {RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0), .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3, .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1, .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5, }, {RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0), .fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2, .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1, .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5, }, /* keep it to be the last as default entry */ @@ -206,18 +206,51 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1, .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1, .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0, - .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5, + .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5, }, }; #define RTW89_DEFAULT_BTC_VER_IDX (ARRAY_SIZE(rtw89_btc_ver_defs) - 1) +static const union rtw89_btc_wl_state_map btc_scanning_map = { + .map = { + .scan = 1, + .connecting = 1, + .roaming = 1, + .transacting = 1, + ._4way = 1, + }, +}; + +static u32 chip_id_to_bt_rom_code_id(u32 id) +{ + switch (id) { + case RTL8852A: + case RTL8852B: + case RTL8852C: + return 0x8852; + case RTL8851B: + return 0x8851; + case RTL8922A: + return 0x8922; + default: + return 0; + } +} + struct rtw89_btc_btf_tlv { u8 type; u8 len; u8 val[]; } __packed; +struct rtw89_btc_btf_tlv_v7 { + u8 type; + u8 ver; + u8 len; + u8 val[]; +} __packed; + enum btc_btf_set_report_en { RPT_EN_TDMA, RPT_EN_CYCLE, @@ -235,13 +268,24 @@ enum btc_btf_set_report_en { RPT_EN_MONITER, }; -#define BTF_SET_REPORT_VER 1 -struct rtw89_btc_btf_set_report { +struct rtw89_btc_btf_set_report_v1 { u8 fver; __le32 enable; __le32 para; } __packed; +struct rtw89_btc_btf_set_report_v8 { + u8 type; + u8 fver; + u8 len; + __le32 map; +} __packed; + +union rtw89_fbtc_rtp_ctrl { + struct rtw89_btc_btf_set_report_v1 v1; + struct rtw89_btc_btf_set_report_v8 v8; +}; + #define BTF_SET_SLOT_TABLE_VER 1 struct rtw89_btc_btf_set_slot_table { u8 fver; @@ -249,12 +293,31 @@ struct rtw89_btc_btf_set_slot_table { struct rtw89_btc_fbtc_slot tbls[] __counted_by(tbl_num); } __packed; -struct rtw89_btc_btf_set_mon_reg { +struct rtw89_btc_btf_set_slot_table_v7 { + u8 type; + u8 ver; + u8 len; + struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX]; +} __packed; + +struct rtw89_btc_btf_set_mon_reg_v1 { u8 fver; u8 reg_num; struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num); } __packed; +struct rtw89_btc_btf_set_mon_reg_v7 { + u8 type; + u8 fver; + u8 len; + struct rtw89_btc_fbtc_mreg regs[] __counted_by(len); +} __packed; + +union rtw89_fbtc_set_mon_reg { + struct rtw89_btc_btf_set_mon_reg_v1 v1; + struct rtw89_btc_btf_set_mon_reg_v7 v7; +} __packed; + struct _wl_rinfo_now { u8 link_mode; u32 dbcc_2g_phy: 2; @@ -310,6 +373,7 @@ enum btc_ant_phase { BTC_ANT_W25G, BTC_ANT_FREERUN, BTC_ANT_WRFK, + BTC_ANT_WRFK2, BTC_ANT_BRFK, BTC_ANT_MAX }; @@ -614,6 +678,13 @@ enum btc_ctr_path { BTC_CTRL_BY_WL }; +enum btc_wlact_state { + BTC_WLACT_HW = 0, + BTC_WLACT_SW_LO, + BTC_WLACT_SW_HI, + BTC_WLACT_MAX, +}; + enum btc_wl_max_tx_time { BTC_MAX_TX_TIME_L1 = 500, BTC_MAX_TX_TIME_L2 = 1000, @@ -739,7 +810,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; - struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info; + struct rtw89_btc_wl_link_info *wl_linfo; u8 i; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__); @@ -761,16 +832,31 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type) if (type & BTC_RESET_DM) { memset(&btc->dm, 0, sizeof(btc->dm)); memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state)); - - for (i = 0; i < RTW89_PORT_NUM; i++) - memset(wl_linfo[i].rssi_state, 0, - sizeof(wl_linfo[i].rssi_state)); + for (i = 0; i < RTW89_PORT_NUM; i++) { + if (btc->ver->fwlrole == 8) + wl_linfo = &wl->rlink_info[i][0]; + else + wl_linfo = &wl->link_info[i]; + memset(wl_linfo->rssi_state, 0, sizeof(wl_linfo->rssi_state)); + } /* set the slot_now table to original */ btc->dm.tdma_now = t_def[CXTD_OFF]; btc->dm.tdma = t_def[CXTD_OFF]; - memcpy(&btc->dm.slot_now, s_def, sizeof(btc->dm.slot_now)); - memcpy(&btc->dm.slot, s_def, sizeof(btc->dm.slot)); + if (ver->fcxslots >= 7) { + for (i = 0; i < ARRAY_SIZE(s_def); i++) { + btc->dm.slot.v7[i].dur = s_def[i].dur; + btc->dm.slot.v7[i].cxtype = s_def[i].cxtype; + btc->dm.slot.v7[i].cxtbl = s_def[i].cxtbl; + } + memcpy(&btc->dm.slot_now.v7, &btc->dm.slot.v7, + sizeof(btc->dm.slot_now.v7)); + } else { + memcpy(&btc->dm.slot_now.v1, s_def, + sizeof(btc->dm.slot_now.v1)); + memcpy(&btc->dm.slot.v1, s_def, + sizeof(btc->dm.slot.v1)); + } btc->policy_len = 0; btc->bt_req_len = 0; @@ -901,14 +987,25 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_cx *cx = &btc->cx; - struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_dm *dm = &btc->dm; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): type:%d cnt:%d\n", __func__, type, cnt); switch (type) { + case BTC_DCNT_WL_FW_VER_MATCH: + if ((wl->ver_info.fw_coex & 0xffff0000) != + rtwdev->chip->wlcx_desired) { + wl->fw_ver_mismatch = true; + dm->error.map.wl_ver_mismatch = true; + } else { + wl->fw_ver_mismatch = false; + dm->error.map.wl_ver_mismatch = false; + } + break; case BTC_DCNT_RPT_HANG: if (dm->cnt_dm[BTC_DCNT_RPT] == cnt && btc->fwinfo.rpt_en_map) dm->cnt_dm[BTC_DCNT_RPT_HANG]++; @@ -1001,6 +1098,19 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt) else dm->error.map.slot_no_sync = false; break; + case BTC_DCNT_BTTX_HANG: + cnt = cx->cnt_bt[BTC_BCNT_LOPRI_TX]; + + if (cnt == 0 && bt->link_info.slave_role) + dm->cnt_dm[BTC_DCNT_BTTX_HANG]++; + else + dm->cnt_dm[BTC_DCNT_BTTX_HANG] = 0; + + if (dm->cnt_dm[BTC_DCNT_BTTX_HANG] >= BTC_CHK_HANG_MAX) + dm->error.map.bt_tx_hang = true; + else + dm->error.map.bt_tx_hang = false; + break; case BTC_DCNT_BTCNT_HANG: cnt = cx->cnt_bt[BTC_BCNT_HIPRI_RX] + cx->cnt_bt[BTC_BCNT_HIPRI_TX] + @@ -1050,27 +1160,36 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc; - struct rtw89_btc_fbtc_btver *pver = NULL; - struct rtw89_btc_fbtc_btscan_v1 *pscan_v1; - struct rtw89_btc_fbtc_btscan_v2 *pscan_v2; - struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL; + union rtw89_btc_fbtc_btver *pver = &btc->fwinfo.rpt_fbtc_btver.finfo; struct rtw89_btc_fbtc_btafh_v2 *pafh_v2 = NULL; + struct rtw89_btc_fbtc_btafh_v7 *pafh_v7 = NULL; struct rtw89_btc_fbtc_btdevinfo *pdev = NULL; + struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL; + struct rtw89_btc_fbtc_btscan_v1 *pscan_v1; + struct rtw89_btc_fbtc_btscan_v2 *pscan_v2; + struct rtw89_btc_fbtc_btscan_v7 *pscan_v7; bool scan_update = true; int i; - pver = (struct rtw89_btc_fbtc_btver *)pfinfo; - pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo; - rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rpt_type:%d\n", __func__, rpt_type); switch (rpt_type) { case BTC_RPT_TYPE_BT_VER: - bt->ver_info.fw = le32_to_cpu(pver->fw_ver); - bt->ver_info.fw_coex = le32_get_bits(pver->coex_ver, GENMASK(7, 0)); - bt->feature = le32_to_cpu(pver->feature); + if (ver->fcxbtver == 7) { + pver->v7 = *(struct rtw89_btc_fbtc_btver_v7 *)pfinfo; + bt->ver_info.fw = le32_to_cpu(pver->v7.fw_ver); + bt->ver_info.fw_coex = le32_get_bits(pver->v7.coex_ver, + GENMASK(7, 0)); + bt->feature = le32_to_cpu(pver->v7.feature); + } else { + pver->v1 = *(struct rtw89_btc_fbtc_btver_v1 *)pfinfo; + bt->ver_info.fw = le32_to_cpu(pver->v1.fw_ver); + bt->ver_info.fw_coex = le32_get_bits(pver->v1.coex_ver, + GENMASK(7, 0)); + bt->feature = le32_to_cpu(pver->v1.feature); + } break; case BTC_RPT_TYPE_BT_SCAN: if (ver->fcxbtscan == 1) { @@ -1090,6 +1209,15 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) pscan_v2->para[i].intvl == 0) scan_update = false; } + } else if (ver->fcxbtscan == 7) { + pscan_v7 = (struct rtw89_btc_fbtc_btscan_v7 *)pfinfo; + for (i = 0; i < CXSCAN_MAX; i++) { + bt->scan_info_v2[i] = pscan_v7->para[i]; + if ((pscan_v7->type & BIT(i)) && + pscan_v7->para[i].win == 0 && + pscan_v7->para[i].intvl == 0) + scan_update = false; + } } if (scan_update) bt->scan_info_update = 1; @@ -1106,6 +1234,17 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) memcpy(&bt_linfo->afh_map_le[0], pafh_v2->afh_le_a, 4); memcpy(&bt_linfo->afh_map_le[4], pafh_v2->afh_le_b, 1); } + } else if (ver->fcxbtafh == 7) { + pafh_v7 = (struct rtw89_btc_fbtc_btafh_v7 *)pfinfo; + if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LEGACY) { + memcpy(&bt_linfo->afh_map[0], pafh_v7->afh_l, 4); + memcpy(&bt_linfo->afh_map[4], pafh_v7->afh_m, 4); + memcpy(&bt_linfo->afh_map[8], pafh_v7->afh_h, 2); + } + if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LE) { + memcpy(&bt_linfo->afh_map_le[0], pafh_v7->afh_le_a, 4); + memcpy(&bt_linfo->afh_map_le[4], pafh_v7->afh_le_b, 1); + } } else if (ver->fcxbtafh == 1) { pafh_v1 = (struct rtw89_btc_fbtc_btafh *)pfinfo; memcpy(&bt_linfo->afh_map[0], pafh_v1->afh_l, 4); @@ -1114,6 +1253,7 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) } break; case BTC_RPT_TYPE_BT_DEVICE: + pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo; a2dp->device_name = le32_to_cpu(pdev->dev_name); a2dp->vendor_id = le16_to_cpu(pdev->vendor_id); a2dp->flush_time = le32_to_cpu(pdev->flush_time); @@ -1123,6 +1263,22 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo) } } +static void rtw89_btc_fw_rpt_evnt_ver(struct rtw89_dev *rtwdev, u8 *index) +{ + struct rtw89_btc *btc = &rtwdev->btc; + const struct rtw89_btc_ver *ver = btc->ver; + + if (ver->fwevntrptl == 1) + return; + + if (*index <= __BTC_RPT_TYPE_V0_SAME) + return; + else if (*index <= __BTC_RPT_TYPE_V0_MAX) + (*index)++; + else + *index = BTC_RPT_TYPE_MAX; +} + #define BTC_LEAK_AP_TH 10 #define BTC_CYSTA_CHK_PERIOD 100 @@ -1147,10 +1303,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, struct rtw89_btc_prpt *btc_prpt = NULL; void *rpt_content = NULL, *pfinfo = NULL; u8 rpt_type = 0; - u16 wl_slot_set = 0, wl_slot_real = 0; + u16 wl_slot_set = 0, wl_slot_real = 0, val16; u32 trace_step = 0, rpt_len = 0, diff_t = 0; u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr; - u8 i, val = 0; + u8 i, val = 0, val1, val2; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): index:%d\n", @@ -1170,6 +1326,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, "[BTC], %s(): rpt_type:%d\n", __func__, rpt_type); + rtw89_btc_fw_rpt_evnt_ver(rtwdev, &rpt_type); + switch (rpt_type) { case BTC_RPT_TYPE_CTRL: pcinfo = &pfwinfo->rpt_ctrl.cinfo; @@ -1188,6 +1346,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v105); pcinfo->req_fver = 5; break; + } else if (ver->fcxbtcrpt == 8) { + pfinfo = &pfwinfo->rpt_ctrl.finfo.v8; + pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8); + break; } else { goto err; } @@ -1198,7 +1360,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, if (ver->fcxtdma == 1) { pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1); - } else if (ver->fcxtdma == 3) { + } else if (ver->fcxtdma == 3 || ver->fcxtdma == 7) { pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3); } else { @@ -1208,8 +1370,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, break; case BTC_RPT_TYPE_SLOT: pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo; - pfinfo = &pfwinfo->rpt_fbtc_slots.finfo; - pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo); + if (ver->fcxslots == 1) { + pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v1; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v1); + } else if (ver->fcxslots == 7) { + pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v7); + } else { + goto err; + } pcinfo->req_fver = ver->fcxslots; break; case BTC_RPT_TYPE_CYSTA: @@ -1231,6 +1400,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v5; pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v5); + } else if (ver->fcxcysta == 7) { + pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v7; + pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v7); } else { goto err; } @@ -1264,6 +1437,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, } else if (ver->fcxnullsta == 2) { pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2); + } else if (ver->fcxnullsta == 7) { + pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v7); } else { goto err; } @@ -1277,6 +1453,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, } else if (ver->fcxmreg == 2) { pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v2; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v2); + } else if (ver->fcxmreg == 7) { + pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v7); } else { goto err; } @@ -1284,14 +1463,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, break; case BTC_RPT_TYPE_GPIO_DBG: pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; - pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo; - pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo); + if (ver->fcxgpiodbg == 7) { + pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7); + } else { + pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1); + } pcinfo->req_fver = ver->fcxgpiodbg; break; case BTC_RPT_TYPE_BT_VER: pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo; - pfinfo = &pfwinfo->rpt_fbtc_btver.finfo; - pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo); + if (ver->fcxbtver == 1) { + pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v1; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v1); + } else if (ver->fcxbtver == 7) { + pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v7); + } pcinfo->req_fver = ver->fcxbtver; break; case BTC_RPT_TYPE_BT_SCAN: @@ -1302,6 +1491,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, } else if (ver->fcxbtscan == 2) { pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v2; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v2); + } else if (ver->fcxbtscan == 7) { + pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v7); + } else { + goto err; } pcinfo->req_fver = ver->fcxbtscan; break; @@ -1460,6 +1654,44 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, pfwinfo->event[BTF_EVNT_RPT]); dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout; + } else if (ver->fcxbtcrpt == 8) { + prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8; + pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en); + wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver); + wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver); + + for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0], + sizeof(dm->gnt.band[i])); + + btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = + le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_TX_V105]); + btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = + le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_RX_V105]); + btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = + le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_TX_V105]); + btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = + le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_RX_V105]); + + val1 = le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]); + if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]) + val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */ + + btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1; + btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] = + le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]); + + val1 = pfwinfo->event[BTF_EVNT_RPT]; + if (((prpt->v8.rpt_len_max_h << 8) + + prpt->v8.rpt_len_max_l) != ver->info_buf) + dm->error.map.h2c_c2h_buffer_mismatch = true; + else + dm->error.map.h2c_c2h_buffer_mismatch = false; + + _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0); + _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1); + _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0); + _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0); } else { goto err; } @@ -1474,7 +1706,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo.v1, sizeof(dm->tdma_now))); - else if (ver->fcxtdma == 3) + else if (ver->fcxtdma == 3 || ver->fcxtdma == 7) _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC, memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma, @@ -1483,14 +1715,25 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, goto err; break; case BTC_RPT_TYPE_SLOT: - rtw89_debug(rtwdev, RTW89_DBG_BTC, - "[BTC], %s(): check %d %zu\n", - __func__, BTC_DCNT_SLOT_NONSYNC, - sizeof(dm->slot_now)); - _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC, - memcmp(dm->slot_now, - pfwinfo->rpt_fbtc_slots.finfo.slot, - sizeof(dm->slot_now))); + if (ver->fcxslots == 7) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): check %d %zu\n", + __func__, BTC_DCNT_SLOT_NONSYNC, + sizeof(dm->slot_now.v7)); + _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC, + memcmp(dm->slot_now.v7, + pfwinfo->rpt_fbtc_slots.finfo.v7.slot, + sizeof(dm->slot_now.v7))); + } else if (ver->fcxslots == 1) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): check %d %zu\n", + __func__, BTC_DCNT_SLOT_NONSYNC, + sizeof(dm->slot_now.v1)); + _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC, + memcmp(dm->slot_now.v1, + pfwinfo->rpt_fbtc_slots.finfo.v1.slot, + sizeof(dm->slot_now.v1))); + } break; case BTC_RPT_TYPE_CYSTA: if (ver->fcxcysta == 2) { @@ -1506,10 +1749,17 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, /* Check diff time between WL slot and W1/E2G slot */ if (dm->tdma_now.type == CXTDMA_OFF && - dm->tdma_now.ext_ctrl == CXECTL_EXT) - wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur); - else - wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur); + dm->tdma_now.ext_ctrl == CXECTL_EXT) { + if (ver->fcxslots == 1) + wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_E2G].dur); + else if (ver->fcxslots == 7) + wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_E2G].dur); + } else { + if (ver->fcxslots == 1) + wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur); + else if (ver->fcxslots == 7) + wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur); + } if (le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) > wl_slot_set) { diff_t = le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) - wl_slot_set; @@ -1539,7 +1789,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, /* Check diff time between real WL slot and W1 slot */ if (dm->tdma_now.type == CXTDMA_OFF) { - wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur); + if (ver->fcxslots == 1) + wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur); + else if (ver->fcxslots == 7) + wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur); wl_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_WL]); if (wl_slot_real > wl_slot_set) { diff_t = wl_slot_real - wl_slot_set; @@ -1580,7 +1833,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, /* Check diff time between real WL slot and W1 slot */ if (dm->tdma_now.type == CXTDMA_OFF) { - wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur); + if (ver->fcxslots == 1) + wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur); + else if (ver->fcxslots == 7) + wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur); wl_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_WL]); if (wl_slot_real > wl_slot_set) { diff_t = wl_slot_real - wl_slot_set; @@ -1622,7 +1878,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, /* Check diff time between real WL slot and W1 slot */ if (dm->tdma_now.type == CXTDMA_OFF) { - wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur); + if (ver->fcxslots == 1) + wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur); + else if (ver->fcxslots == 7) + wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur); wl_slot_real = le16_to_cpu(pcysta->v5.cycle_time.tavg[CXT_WL]); if (wl_slot_real > wl_slot_set) @@ -1654,11 +1913,62 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, le16_to_cpu(pcysta->v5.slot_cnt[CXST_B1])); _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG, le16_to_cpu(pcysta->v5.cycles)); + } else if (ver->fcxcysta == 7) { + if (dm->fddt_train == BTC_FDDT_ENABLE) + break; + + pcysta = &pfwinfo->rpt_fbtc_cysta.finfo; + + if (dm->tdma_now.type != CXTDMA_OFF) { + /* Check diff time between real WL slot and W1 slot */ + val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_WL]); + _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, val16); + + /* Check Leak-AP */ + val1 = le32_to_cpu(pcysta->v7.leak_slot.cnt_rximr) * + BTC_LEAK_AP_TH; + val2 = le16_to_cpu(pcysta->v7.slot_cnt[CXST_LK]); + + val16 = le16_to_cpu(pcysta->v7.cycles); + if (dm->tdma_now.rxflctrl && + val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2) + dm->leak_ap = 1; + } else if (dm->tdma_now.ext_ctrl == CXECTL_EXT) { + val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_BT]); + /* Check diff between real BT slot and EBT/E5G slot */ + _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, val16); + + /* Check bt slot length for P2P mode*/ + val1 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt_timeout) * + BTC_SLOT_REQ_TH; + val2 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt); + + val16 = le16_to_cpu(pcysta->v7.cycles); + if (val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2) + dm->slot_req_more = 1; + else if (bt->link_info.status.map.connect == 0) + dm->slot_req_more = 0; + } + + _chk_btc_err(rtwdev, BTC_DCNT_E2G_HANG, + le16_to_cpu(pcysta->v7.slot_cnt[CXST_E2G])); + _chk_btc_err(rtwdev, BTC_DCNT_W1_HANG, + le16_to_cpu(pcysta->v7.slot_cnt[CXST_W1])); + _chk_btc_err(rtwdev, BTC_DCNT_B1_HANG, + le16_to_cpu(pcysta->v7.slot_cnt[CXST_B1])); + + /* "BT_SLOT_FLOOD" error-check MUST before "CYCLE_HANG" */ + _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_FLOOD, + le16_to_cpu(pcysta->v7.cycles)); + _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG, + le16_to_cpu(pcysta->v7.cycles)); } else { goto err; } break; case BTC_RPT_TYPE_MREG: + if (ver->fcxmreg == 7) + break; _get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX_MON, &val); if (dm->wl_btg_rx == BTC_BTGCTRL_BB_GNT_FWCTRL) dm->wl_btg_rx_rb = BTC_BTGCTRL_BB_GNT_FWCTRL; @@ -1715,6 +2025,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev, } #define BTC_TLV_HDR_LEN 2 +#define BTC_TLV_HDR_LEN_V7 3 static void _append_tdma(struct rtw89_dev *rtwdev) { @@ -1722,6 +2033,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev) const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_btf_tlv *tlv; + struct rtw89_btc_btf_tlv_v7 *tlv_v7; struct rtw89_btc_fbtc_tdma *v; struct rtw89_btc_fbtc_tdma_v3 *v3; u16 len = btc->policy_len; @@ -1741,6 +2053,13 @@ static void _append_tdma(struct rtw89_dev *rtwdev) tlv->len = sizeof(*v); *v = dm->tdma; btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v); + } else if (ver->fcxtdma == 7) { + tlv_v7 = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len]; + tlv_v7->len = sizeof(dm->tdma); + tlv_v7->ver = ver->fcxtdma; + tlv_v7->type = CXPOLICY_TDMA; + memcpy(tlv_v7->val, &dm->tdma, tlv_v7->len); + btc->policy_len += BTC_TLV_HDR_LEN_V7 + tlv_v7->len; } else { tlv->len = sizeof(*v3); v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0]; @@ -1756,7 +2075,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev) dm->tdma.ext_ctrl); } -static void _append_slot(struct rtw89_dev *rtwdev) +static void _append_slot_v1(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; @@ -1771,8 +2090,8 @@ static void _append_slot(struct rtw89_dev *rtwdev) for (i = 0; i < CXST_MAX; i++) { if (!btc->update_policy_force && - !memcmp(&dm->slot[i], &dm->slot_now[i], - sizeof(dm->slot[i]))) + !memcmp(&dm->slot.v1[i], &dm->slot_now.v1[i], + sizeof(dm->slot.v1[i]))) continue; len = btc->policy_len; @@ -1782,14 +2101,14 @@ static void _append_slot(struct rtw89_dev *rtwdev) tlv->type = CXPOLICY_SLOT; tlv->len = sizeof(*v); - v->fver = FCXONESLOT_VER; + v->fver = btc->ver->fcxslots; v->sid = i; - v->slot = dm->slot[i]; + v->slot = dm->slot.v1[i]; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): slot-%d: dur=%d, table=0x%08x, type=%d\n", - __func__, i, dm->slot[i].dur, dm->slot[i].cxtbl, - dm->slot[i].cxtype); + __func__, i, dm->slot.v1[i].dur, dm->slot.v1[i].cxtbl, + dm->slot.v1[i].cxtype); cnt++; btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v); @@ -1801,6 +2120,71 @@ static void _append_slot(struct rtw89_dev *rtwdev) __func__, cnt); } +static void _append_slot_v7(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc_btf_tlv_v7 *tlv = NULL; + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + u8 i, cnt = 0; + u16 len; + + for (i = 0; i < CXST_MAX; i++) { + if (!btc->update_policy_force && + !memcmp(&dm->slot.v7[i], &dm->slot_now.v7[i], + sizeof(dm->slot.v7[i]))) + continue; + + len = btc->policy_len; + + if (!tlv) { + if ((len + BTC_TLV_HDR_LEN_V7) > RTW89_BTC_POLICY_MAXLEN) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): buff overflow!\n", __func__); + break; + } + + tlv = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len]; + tlv->type = CXPOLICY_SLOT; + tlv->ver = btc->ver->fcxslots; + tlv->len = sizeof(dm->slot.v7[0]) + BTC_TLV_SLOT_ID_LEN_V7; + len += BTC_TLV_HDR_LEN_V7; + } + + if ((len + (u16)tlv->len) > RTW89_BTC_POLICY_MAXLEN) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): buff overflow!\n", __func__); + break; + } + + btc->policy[len] = i; /* slot-id */ + memcpy(&btc->policy[len + 1], &dm->slot.v7[i], + sizeof(dm->slot.v7[0])); + len += tlv->len; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s: policy_len=%d, slot-%d: dur=%d, type=%d, table=0x%08x\n", + __func__, btc->policy_len, i, dm->slot.v7[i].dur, + dm->slot.v7[i].cxtype, dm->slot.v7[i].cxtbl); + cnt++; + btc->policy_len = len; /* update total length */ + } + + if (cnt > 0) + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s: slot update (cnt=%d, len=%d)!!\n", + __func__, cnt, btc->policy_len); +} + +static void _append_slot(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + if (btc->ver->fcxslots == 7) + _append_slot_v7(rtwdev); + else + _append_slot_v1(rtwdev); +} + static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map) { struct rtw89_btc *btc = &rtwdev->btc; @@ -1946,13 +2330,52 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map) return bit_map; } +static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + const struct rtw89_btc_ver *ver = btc->ver; + struct rtw89_btc_btf_tlv_v7 *tlv_v7 = NULL; + struct rtw89_btc_btf_set_slot_table *tbl; + struct rtw89_btc_dm *dm = &btc->dm; + u16 n, len; + + if (ver->fcxslots == 7) { + len = sizeof(*tlv_v7) + sizeof(dm->slot.v7); + tlv_v7 = kmalloc(len, GFP_KERNEL); + if (!tlv_v7) + return; + + tlv_v7->type = SET_SLOT_TABLE; + tlv_v7->ver = ver->fcxslots; + tlv_v7->len = ARRAY_SIZE(dm->slot.v7); + memcpy(tlv_v7->val, dm->slot.v7, sizeof(dm->slot.v7)); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, (u8 *)tlv_v7, len); + + kfree(tlv_v7); + } else { + n = struct_size(tbl, tbls, CXST_MAX); + tbl = kmalloc(n, GFP_KERNEL); + if (!tbl) + return; + + tbl->fver = BTF_SET_SLOT_TABLE_VER; + tbl->tbl_num = CXST_MAX; + memcpy(tbl->tbls, dm->slot.v1, flex_array_size(tbl, tbls, CXST_MAX)); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n); + + kfree(tbl); + } +} + static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, u32 rpt_map, bool rpt_state) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_wl_smap *wl_smap = &btc->cx.wl.status.map; struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo; - struct rtw89_btc_btf_set_report r = {0}; + union rtw89_fbtc_rtp_ctrl r; u32 val, bit_map; int ret; @@ -1973,41 +2396,35 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev, if (val == fwinfo->rpt_en_map) return; - r.fver = BTF_SET_REPORT_VER; - r.enable = cpu_to_le32(val); - r.para = cpu_to_le32(rpt_state); + if (btc->ver->fcxbtcrpt == 8) { + r.v8.type = SET_REPORT_EN; + r.v8.fver = btc->ver->fcxbtcrpt; + r.v8.len = sizeof(r.v8.map); + r.v8.map = cpu_to_le32(val); + ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v8, + sizeof(r.v8)); + } else { + if (btc->ver->fcxbtcrpt == 105) + r.v1.fver = 5; + else + r.v1.fver = btc->ver->fcxbtcrpt; + r.v1.enable = cpu_to_le32(val); + r.v1.para = cpu_to_le32(rpt_state); + ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v1, + sizeof(r.v1)); + } - ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r)); if (!ret) fwinfo->rpt_en_map = val; } -static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num, - struct rtw89_btc_fbtc_slot *s) -{ - struct rtw89_btc_btf_set_slot_table *tbl; - u16 n; - - n = struct_size(tbl, tbls, num); - tbl = kmalloc(n, GFP_KERNEL); - if (!tbl) - return; - - tbl->fver = BTF_SET_SLOT_TABLE_VER; - tbl->tbl_num = num; - memcpy(tbl->tbls, s, flex_array_size(tbl, tbls, num)); - - _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n); - - kfree(tbl); -} - static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_btc_ver *ver = rtwdev->btc.ver; - struct rtw89_btc_btf_set_mon_reg *monreg = NULL; - u8 n, ulen, cxmreg_max; + struct rtw89_btc_btf_set_mon_reg_v1 *v1 = NULL; + struct rtw89_btc_btf_set_mon_reg_v7 *v7 = NULL; + u8 i, n, ulen, cxmreg_max; u16 sz = 0; n = chip->mon_reg_num; @@ -2016,10 +2433,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) if (ver->fcxmreg == 1) cxmreg_max = CXMREG_MAX; - else if (ver->fcxmreg == 2) - cxmreg_max = CXMREG_MAX_V2; else - return; + cxmreg_max = CXMREG_MAX_V2; if (n > cxmreg_max) { rtw89_debug(rtwdev, RTW89_DBG_BTC, @@ -2028,21 +2443,37 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) return; } - ulen = sizeof(monreg->regs[0]); - sz = struct_size(monreg, regs, n); - monreg = kmalloc(sz, GFP_KERNEL); - if (!monreg) - return; + ulen = sizeof(struct rtw89_btc_fbtc_mreg); + + if (ver->fcxmreg == 7) { + sz = struct_size(v7, regs, n); + v7 = kmalloc(sz, GFP_KERNEL); + v7->type = RPT_EN_MREG; + v7->fver = ver->fcxmreg; + v7->len = n; + for (i = 0; i < n; i++) { + v7->regs[i].type = chip->mon_reg[i].type; + v7->regs[i].bytes = chip->mon_reg[i].bytes; + v7->regs[i].offset = chip->mon_reg[i].offset; + } + + _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v7, sz); + kfree(v7); + } else { + sz = struct_size(v1, regs, n); + v1 = kmalloc(sz, GFP_KERNEL); + v1->fver = ver->fcxmreg; + v1->reg_num = n; + memcpy(v1->regs, chip->mon_reg, flex_array_size(v1, regs, n)); + + _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v1, sz); + kfree(v1); + } - monreg->fver = ver->fcxmreg; - monreg->reg_num = n; - memcpy(monreg->regs, chip->mon_reg, flex_array_size(monreg, regs, n)); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): sz=%d ulen=%d n=%d\n", __func__, sz, ulen, n); - _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, (u8 *)monreg, sz); - kfree(monreg); rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1); } @@ -2100,7 +2531,10 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type, btc->policy, btc->policy_len); if (!ret) { memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now)); - memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now)); + if (btc->ver->fcxslots == 7) + memcpy(&dm->slot_now.v7, &dm->slot.v7, sizeof(dm->slot_now.v7)); + else + memcpy(&dm->slot_now.v1, &dm->slot.v1, sizeof(dm->slot_now.v1)); } if (btc->update_policy_force) @@ -2243,6 +2677,76 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt); } +static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map, + u8 wl_state, u8 bt_state, u8 wlact_state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_dm *dm = &btc->dm; + struct rtw89_mac_ax_gnt *g = dm->gnt.band; + u8 i, bt_idx = dm->bt_select + 1; + + if (phy_map > BTC_PHY_ALL) + return; + + for (i = 0; i < RTW89_PHY_MAX; i++) { + if (!(phy_map & BIT(i))) + continue; + + switch (wl_state) { + case BTC_GNT_HW: + g[i].gnt_wl_sw_en = 0; + g[i].gnt_wl = 0; + break; + case BTC_GNT_SW_LO: + g[i].gnt_wl_sw_en = 1; + g[i].gnt_wl = 0; + break; + case BTC_GNT_SW_HI: + g[i].gnt_wl_sw_en = 1; + g[i].gnt_wl = 1; + break; + } + + switch (bt_state) { + case BTC_GNT_HW: + g[i].gnt_bt_sw_en = 0; + g[i].gnt_bt = 0; + break; + case BTC_GNT_SW_LO: + g[i].gnt_bt_sw_en = 1; + g[i].gnt_bt = 0; + break; + case BTC_GNT_SW_HI: + g[i].gnt_bt_sw_en = 1; + g[i].gnt_bt = 1; + break; + } + } + + if (rtwdev->chip->para_ver & BTC_FEAT_WLAN_ACT_MUX) { + for (i = 0; i < 2; i++) { + if (!(bt_idx & BIT(i))) + continue; + + switch (wlact_state) { + case BTC_WLACT_HW: + dm->gnt.bt[i].wlan_act_en = 0; + dm->gnt.bt[i].wlan_act = 0; + break; + case BTC_WLACT_SW_LO: + dm->gnt.bt[i].wlan_act_en = 1; + dm->gnt.bt[i].wlan_act = 0; + break; + case BTC_WLACT_SW_HI: + dm->gnt.bt[i].wlan_act_en = 1; + dm->gnt.bt[i].wlan_act = 1; + break; + } + } + } + rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt); +} + #define BTC_TDMA_WLROLE_MAX 2 static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable) @@ -2493,9 +2997,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_wl_active_role *r; struct rtw89_btc_wl_active_role_v1 *r1; struct rtw89_btc_wl_active_role_v2 *r2; + struct rtw89_btc_wl_rlink *rlink; u8 en = 0, i, ch = 0, bw = 0; u8 mode, connect_cnt; @@ -2511,6 +3017,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) } else if (ver->fwlrole == 2) { mode = wl_rinfo_v2->link_mode; connect_cnt = wl_rinfo_v2->connect_cnt; + } else if (ver->fwlrole == 8) { + mode = wl_rinfo_v8->link_mode; + connect_cnt = wl_rinfo_v8->connect_cnt; } else { return; } @@ -2526,6 +3035,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) r = &wl_rinfo->active_role[i]; r1 = &wl_rinfo_v1->active_role_v1[i]; r2 = &wl_rinfo_v2->active_role_v2[i]; + rlink = &wl_rinfo_v8->rlink[i][0]; if (ver->fwlrole == 0 && (r->role == RTW89_WIFI_ROLE_P2P_GO || @@ -2545,6 +3055,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) ch = r2->ch; bw = r2->bw; break; + } else if (ver->fwlrole == 8 && + (rlink->role == RTW89_WIFI_ROLE_P2P_GO || + rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) { + ch = rlink->ch; + bw = rlink->bw; + break; } } } else { @@ -2554,6 +3070,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) r = &wl_rinfo->active_role[i]; r1 = &wl_rinfo_v1->active_role_v1[i]; r2 = &wl_rinfo_v2->active_role_v2[i]; + rlink = &wl_rinfo_v8->rlink[i][0]; if (ver->fwlrole == 0 && r->connected && r->band == RTW89_BAND_2G) { @@ -2570,6 +3087,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev) ch = r2->ch; bw = r2->bw; break; + } else if (ver->fwlrole == 8 && + rlink->connected && rlink->rf_band == RTW89_BAND_2G) { + ch = rlink->ch; + bw = rlink->bw; + break; } } } @@ -2622,25 +3144,35 @@ static bool _check_freerun(struct rtw89_dev *rtwdev) struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; + struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc; union rtw89_btc_module_info *md = &btc->mdinfo; const struct rtw89_btc_ver *ver = btc->ver; - u8 isolation; + u8 isolation, connect_cnt = 0; if (ver->fcxinit == 7) isolation = md->md_v7.ant.isolation; else isolation = md->md.ant.isolation; + if (ver->fwlrole == 0) + connect_cnt = wl_rinfo->connect_cnt; + else if (ver->fwlrole == 1) + connect_cnt = wl_rinfo_v1->connect_cnt; + else if (ver->fwlrole == 2) + connect_cnt = wl_rinfo_v2->connect_cnt; + else if (ver->fwlrole == 8) + connect_cnt = wl_rinfo_v8->connect_cnt; + if (btc->ant_type == BTC_ANT_SHARED) { btc->dm.trx_para_level = 0; return false; } /* The below is dedicated antenna case */ - if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX || - wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) { + if (connect_cnt > BTC_TDMA_WLROLE_MAX) { btc->dm.trx_para_level = 5; return true; } @@ -2693,19 +3225,6 @@ static bool _check_freerun(struct rtw89_dev *rtwdev) #define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; }) #define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; }) -#define _slot_set(btc, sid, dura, tbl, type) \ - do { \ - typeof(sid) _sid = (sid); \ - typeof(btc) _btc = (btc); \ - _btc->dm.slot[_sid].dur = cpu_to_le16(dura);\ - _btc->dm.slot[_sid].cxtbl = cpu_to_le32(tbl); \ - _btc->dm.slot[_sid].cxtype = cpu_to_le16(type); \ - } while (0) - -#define _slot_set_dur(btc, sid, dura) (btc)->dm.slot[sid].dur = cpu_to_le16(dura) -#define _slot_set_tbl(btc, sid, tbl) (btc)->dm.slot[sid].cxtbl = cpu_to_le32(tbl) -#define _slot_set_type(btc, sid, type) (btc)->dm.slot[sid].cxtype = cpu_to_le16(type) - struct btc_btinfo_lb2 { u8 connect: 1; u8 sco_busy: 1; @@ -2780,7 +3299,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type) struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_fbtc_tdma *t = &dm->tdma; - struct rtw89_btc_fbtc_slot *s = dm->slot; + struct rtw89_btc_fbtc_slot *s = dm->slot.v1; u8 type; u32 tbl_w1, tbl_b1, tbl_b4; @@ -3091,7 +3610,6 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_fbtc_tdma *t = &dm->tdma; - struct rtw89_btc_fbtc_slot *s = dm->slot; struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1; struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc; struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc; @@ -3139,13 +3657,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) case BTC_CXP_USERDEF0: btc->update_policy_force = true; *t = t_def[CXTD_OFF]; - s[CXST_OFF] = s_def[CXST_OFF]; + _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur, + s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype); _slot_set_tbl(btc, CXST_OFF, cxtbl[2]); break; case BTC_CXP_OFF: /* TDMA off */ _write_scbd(rtwdev, BTC_WSCB_TDMA, false); *t = t_def[CXTD_OFF]; - s[CXST_OFF] = s_def[CXST_OFF]; + _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur, + s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype); switch (policy_type) { case BTC_CXP_OFF_BT: @@ -3186,7 +3706,8 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) case BTC_CXP_OFFB: /* TDMA off + beacon protect */ _write_scbd(rtwdev, BTC_WSCB_TDMA, false); *t = t_def[CXTD_OFF_B2]; - s[CXST_OFF] = s_def[CXST_OFF]; + _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur, + s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype); switch (policy_type) { case BTC_CXP_OFFB_BWB0: @@ -3207,21 +3728,29 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) switch (policy_type) { case BTC_CXP_OFFE_DEF: - s[CXST_E2G] = s_def[CXST_E2G]; - s[CXST_E5G] = s_def[CXST_E5G]; - s[CXST_EBT] = s_def[CXST_EBT]; - s[CXST_ENULL] = s_def[CXST_ENULL]; + _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur, + s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype); + _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur, + s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, + s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); break; case BTC_CXP_OFFE_DEF2: _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO); - s[CXST_E5G] = s_def[CXST_E5G]; - s[CXST_EBT] = s_def[CXST_EBT]; - s[CXST_ENULL] = s_def[CXST_ENULL]; + _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur, + s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, + s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); break; default: break; } - s[CXST_OFF] = s_def[CXST_OFF]; + _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur, + s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype); break; case BTC_CXP_FIX: /* TDMA Fix-Slot */ _write_scbd(rtwdev, BTC_WSCB_TDMA, true); @@ -3481,25 +4010,36 @@ EXPORT_SYMBOL(rtw89_btc_set_policy_v1); static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map, u8 tx_val, u8 rx_val) { + struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl; struct rtw89_mac_ax_plt plt; - plt.band = RTW89_MAC_0; plt.tx = tx_val; plt.rx = rx_val; - if (phy_map & BTC_PHY_0) + if (rtwdev->btc.ver->fwlrole == 8) { + plt.band = wl->pta_req_mac; + if (wl->bt_polut_type[plt.band] == tx_val) + return; + + wl->bt_polut_type[plt.band] = tx_val; rtw89_mac_cfg_plt(rtwdev, &plt); + } else { + plt.band = RTW89_MAC_0; - if (!rtwdev->dbcc_en) - return; + if (phy_map & BTC_PHY_0) + rtw89_mac_cfg_plt(rtwdev, &plt); - plt.band = RTW89_MAC_1; - if (phy_map & BTC_PHY_1) - rtw89_mac_cfg_plt(rtwdev, &plt); + if (!rtwdev->dbcc_en) + return; + + plt.band = RTW89_MAC_1; + if (phy_map & BTC_PHY_1) + rtw89_mac_cfg_plt(rtwdev, &plt); + } } -static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec, - u8 phy_map, u8 type) +static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec, + u8 phy_map, u8 type) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; @@ -3508,13 +4048,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec, struct rtw89_btc_bt_info *bt = &cx->bt; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; u8 gnt_wl_ctrl, gnt_bt_ctrl, plt_ctrl, i, b2g = 0; + bool dbcc_chg = false; u32 ant_path_type; ant_path_type = ((phy_map << 8) + type); + if (btc->ver->fwlrole == 1) + dbcc_chg = wl->role_info_v1.dbcc_chg; + else if (btc->ver->fwlrole == 2) + dbcc_chg = wl->role_info_v2.dbcc_chg; + else if (btc->ver->fwlrole == 8) + dbcc_chg = wl->role_info_v8.dbcc_chg; + if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF || btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE || - btc->dm.run_reason == BTC_RSN_CMD_SET_COEX) + btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || dbcc_chg) force_exec = FC_EXEC; if (!force_exec && ant_path_type == dm->set_ant_path) { @@ -3617,6 +4165,117 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec, } } +static void _set_ant_v1(struct rtw89_dev *rtwdev, bool force_exec, + u8 phy_map, u8 type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8; + u32 ant_path_type = rtw89_get_antpath_type(phy_map, type); + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + struct rtw89_btc_dm *dm = &btc->dm; + u8 gwl = BTC_GNT_HW; + + if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF || + btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE || + btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || wl_rinfo->dbcc_chg) + force_exec = FC_EXEC; + + if (wl_rinfo->link_mode != BTC_WLINK_25G_MCC && + btc->dm.wl_btg_rx == 2) + force_exec = FC_EXEC; + + if (!force_exec && ant_path_type == dm->set_ant_path) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by no change!!\n", + __func__); + return; + } else if (bt->rfk_info.map.run) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by bt rfk!!\n", __func__); + return; + } else if (btc->dm.run_reason != BTC_RSN_NTFY_WL_RFK && + wl->rfk_info.state != BTC_WRFK_STOP) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): return by wl rfk!!\n", __func__); + return; + } + + dm->set_ant_path = ant_path_type; + + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): path=0x%x, set_type=0x%x\n", + __func__, phy_map, dm->set_ant_path & 0xff); + + switch (type) { + case BTC_ANT_WINIT: + /* To avoid BT MP driver case (bt_enable but no mailbox) */ + if (bt->enable.now && bt->run_patch_code) + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI, + BTC_WLACT_SW_LO); + else + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO, + BTC_WLACT_SW_HI); + break; + case BTC_ANT_WONLY: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO, + BTC_WLACT_SW_HI); + break; + case BTC_ANT_WOFF: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI, + BTC_WLACT_SW_LO); + break; + case BTC_ANT_W2G: + case BTC_ANT_W25G: + if (wl_rinfo->dbcc_en) { + if (wl_dinfo->real_band[RTW89_PHY_0] == RTW89_BAND_2G) + gwl = BTC_GNT_HW; + else + gwl = BTC_GNT_SW_HI; + _set_gnt_v1(rtwdev, BTC_PHY_0, gwl, BTC_GNT_HW, BTC_WLACT_HW); + + if (wl_dinfo->real_band[RTW89_PHY_1] == RTW89_BAND_2G) + gwl = BTC_GNT_HW; + else + gwl = BTC_GNT_SW_HI; + _set_gnt_v1(rtwdev, BTC_PHY_1, gwl, BTC_GNT_HW, BTC_WLACT_HW); + } else { + gwl = BTC_GNT_HW; + _set_gnt_v1(rtwdev, phy_map, gwl, BTC_GNT_HW, BTC_WLACT_HW); + } + break; + case BTC_ANT_W5G: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW, BTC_WLACT_HW); + break; + case BTC_ANT_FREERUN: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI, + BTC_WLACT_SW_LO); + break; + case BTC_ANT_WRFK: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO, + BTC_WLACT_HW); + break; + case BTC_ANT_WRFK2: + _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO, + BTC_WLACT_SW_HI); /* no BT-Tx */ + break; + default: + return; + } + + _set_bt_plut(rtwdev, phy_map, BTC_PLT_GNT_WL, BTC_PLT_GNT_WL); +} + +static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec, + u8 phy_map, u8 type) +{ + if (rtwdev->chip->chip_id == RTL8922A) + _set_ant_v1(rtwdev, force_exec, phy_map, type); + else + _set_ant_v0(rtwdev, force_exec, phy_map, type); +} + static void _action_wl_only(struct rtw89_dev *rtwdev) { _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY); @@ -3641,7 +4300,7 @@ static void _action_wl_off(struct rtw89_dev *rtwdev, u8 mode) if (wl->status.map.rf_off || btc->dm.bt_only) { _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF); } else if (wl->status.map.lps == BTC_LPS_RF_ON) { - if (wl->role_info.link_mode == BTC_WLINK_5G) + if (mode == BTC_WLINK_5G) _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W5G); else _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); @@ -4070,6 +4729,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; const struct rtw89_chip_info *chip = rtwdev->chip; @@ -4090,6 +4750,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) wl_rinfo.link_mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) wl_rinfo.link_mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 8) + wl_rinfo.link_mode = wl_rinfo_v8->link_mode; else return; @@ -4103,6 +4765,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy; } else if (ver->fwlrole == 2) { wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy; + } else if (ver->fwlrole == 8) { + wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy; } else { return; } @@ -4223,7 +4887,10 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta) u16 enable = iter_data->enable; bool reenable = iter_data->reenable; - plink = &wl->link_info[port]; + if (btc->ver->fwlrole == 8) + plink = &wl->rlink_info[port][0]; + else + plink = &wl->link_info[port]; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): port = %d\n", __func__, port); @@ -4276,6 +4943,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_txtime_data data = {.rtwdev = rtwdev}; u8 mode, igno_bt, tx_retry; u32 tx_time; @@ -4291,6 +4959,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 8) + mode = wl_rinfo_v8->link_mode; else return; @@ -4348,6 +5018,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; struct rtw89_btc_bt_info *bt = &btc->cx.bt; bool bt_hi_lna_rx = false; u8 mode; @@ -4358,6 +5029,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 8) + mode = wl_rinfo_v8->link_mode; else return; @@ -4378,11 +5051,14 @@ static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev) _write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri)); } -/* TODO add these functions */ static void _action_common(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_wl_smap *wl_smap = &wl->status.map; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_dm *dm = &btc->dm; + u32 bt_rom_code_id, bt_fw_ver; _set_btg_ctrl(rtwdev); _set_wl_preagc_ctrl(rtwdev); @@ -4392,6 +5068,26 @@ static void _action_common(struct rtw89_dev *rtwdev) _set_rf_trx_para(rtwdev); _set_bt_rx_scan_pri(rtwdev); + bt_rom_code_id = chip_id_to_bt_rom_code_id(rtwdev->btc.ver->chip_id); + bt_fw_ver = bt->ver_info.fw & 0xffff; + if (bt->enable.now && + (bt_fw_ver == 0 || + (bt_fw_ver == bt_rom_code_id && bt->run_patch_code && rtwdev->chip->scbd))) + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 1); + else + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 0); + + if (dm->run_reason == BTC_RSN_NTFY_INIT || + dm->run_reason == BTC_RSN_NTFY_RADIO_STATE || + dm->run_reason == BTC_RSN_NTFY_POWEROFF) { + _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); + + if (wl_smap->rf_off == 1 || wl_smap->lps != BTC_LPS_OFF) + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, 0); + else + rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1); + } + if (wl->scbd_change) { rtw89_mac_cfg_sb(rtwdev, wl->scbd); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n", @@ -4488,6 +5184,30 @@ static void _action_wl_2g_sta(struct rtw89_dev *rtwdev) _action_by_bt(rtwdev); } +static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + u16 policy_type = BTC_CXP_OFF_BT; + + if (btc->ant_type == BTC_ANT_SHARED) { + if (btc->cx.wl.status.map._4way) + policy_type = BTC_CXP_OFFE_WL; + else if (btc->cx.wl.status.val & btc_scanning_map.val) + policy_type = BTC_CXP_OFFE_2GBWMIXB; + else if (btc->cx.bt.link_info.profile_cnt.now == 0) + policy_type = BTC_CXP_OFFE_2GISOB; + else + policy_type = BTC_CXP_OFFE_2GBWISOB; + } else { /* dedicated-antenna */ + policy_type = BTC_CXP_OFF_EQ0; + } + + btc->dm.e2g_slot_limit = BTC_E2G_LIMIT_DEF; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G); + _set_policy(rtwdev, policy_type, BTC_ACT_WL_25G_MCC); +} + static void _action_wl_scan(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; @@ -4495,14 +5215,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { - _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G); - if (btc->ant_type == BTC_ANT_SHARED) - _set_policy(rtwdev, BTC_CXP_OFFE_DEF, - BTC_RSN_NTFY_SCAN_START); - else - _set_policy(rtwdev, BTC_CXP_OFF_EQ0, - BTC_RSN_NTFY_SCAN_START); - + _action_wl_25g_mcc(rtwdev); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n"); } else if (rtwdev->dbcc_en) { if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G && @@ -4518,24 +5231,6 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev) } } -static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev) -{ - struct rtw89_btc *btc = &rtwdev->btc; - - _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G); - - if (btc->ant_type == BTC_ANT_SHARED) { - if (btc->cx.bt.link_info.profile_cnt.now == 0) - _set_policy(rtwdev, BTC_CXP_OFFE_DEF2, - BTC_ACT_WL_25G_MCC); - else - _set_policy(rtwdev, BTC_CXP_OFFE_DEF, - BTC_ACT_WL_25G_MCC); - } else { /* dedicated-antenna */ - _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_25G_MCC); - } -} - static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; @@ -4695,6 +5390,31 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev) _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC); } +static void _action_wl_2g_scc_v8(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_bt_info *bt = &btc->cx.bt; + struct rtw89_btc_dm *dm = &btc->dm; + u16 policy_type = BTC_CXP_OFF_BT; + + if (btc->ant_type == BTC_ANT_SHARED) { + if (wl->status.map._4way) + policy_type = BTC_CXP_OFFE_WL; + else if (bt->link_info.status.map.connect == 0) + policy_type = BTC_CXP_OFFE_2GISOB; + else + policy_type = BTC_CXP_OFFE_2GBWISOB; + } else { + policy_type = BTC_CXP_OFF_EQ0; + } + + dm->e2g_slot_limit = BTC_E2G_LIMIT_DEF; + + _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC); +} + static void _action_wl_2g_ap(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; @@ -5287,6 +6007,312 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev) #define BTC_CHK_HANG_MAX 3 #define BTC_SCB_INV_VALUE GENMASK(31, 0) +static u8 _get_role_link_mode(u8 role) +{ + switch (role) { + case RTW89_WIFI_ROLE_STATION: + return BTC_WLINK_2G_STA; + case RTW89_WIFI_ROLE_P2P_GO: + return BTC_WLINK_2G_GO; + case RTW89_WIFI_ROLE_P2P_CLIENT: + return BTC_WLINK_2G_GC; + case RTW89_WIFI_ROLE_AP: + return BTC_WLINK_2G_AP; + default: + return BTC_WLINK_OTHER; + } +} + +static bool _chk_role_ch_group(const struct rtw89_btc_chdef *r1, + const struct rtw89_btc_chdef *r2) +{ + if (r1->chan != r2->chan) { /* primary ch is different */ + return false; + } else if (r1->bw == RTW89_CHANNEL_WIDTH_40 && + r2->bw == RTW89_CHANNEL_WIDTH_40) { + if (r1->offset != r2->offset) + return false; + } + return true; +} + +static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch, + u8 *phy, u8 *role, u8 *dbcc_2g_phy) +{ + struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8; + bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false; + u8 j, k, dbcc_2g_cid, dbcc_2g_cid2; + + /* find out the 2G-PHY by connect-id ->ch */ + for (j = 0; j < wl_rinfo->connect_cnt; j++) { + if (ch[j].center_ch <= 14) { + is_2g_ch_exist = true; + break; + } + } + + /* If no any 2G-port exist, it's impossible because 5G-exclude */ + if (!is_2g_ch_exist) + return BTC_WLINK_OTHER; + + dbcc_2g_cid = j; + *dbcc_2g_phy = phy[dbcc_2g_cid]; + + /* connect_cnt <= 2 */ + if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX) + return (_get_role_link_mode((role[dbcc_2g_cid]))); + + /* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */ + for (k = 0; k < wl_rinfo->connect_cnt; k++) { + if (k == dbcc_2g_cid) + continue; + + if (phy[k] == *dbcc_2g_phy) { + is_multi_role_in_2g_phy = true; + dbcc_2g_cid2 = k; + break; + } + } + + /* Single-role in 2G-PHY */ + if (!is_multi_role_in_2g_phy) + return (_get_role_link_mode(role[dbcc_2g_cid])); + + /* 2-role in 2G-PHY */ + if (ch[dbcc_2g_cid2].center_ch > 14) + return BTC_WLINK_25G_MCC; + else if (_chk_role_ch_group(&ch[dbcc_2g_cid], &ch[dbcc_2g_cid2])) + return BTC_WLINK_2G_SCC; + else + return BTC_WLINK_2G_MCC; +} + +static void _update_role_link_mode(struct rtw89_dev *rtwdev, + bool client_joined, u32 noa) +{ + struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8; + u32 type = BTC_WLMROLE_NONE, dur = 0; + u32 wl_role = wl_rinfo->role_map; + + /* if no client_joined, don't care P2P-GO/AP role */ + if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) || + (wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) { + if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) { + wl_rinfo->link_mode = BTC_WLINK_2G_STA; + wl_rinfo->connect_cnt--; + } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO || + wl_rinfo->link_mode == BTC_WLINK_2G_AP) { + wl_rinfo->link_mode = BTC_WLINK_NOLINK; + wl_rinfo->connect_cnt--; + } + } + + /* Identify 2-Role type */ + if (wl_rinfo->connect_cnt >= 2 && + (wl_rinfo->link_mode == BTC_WLINK_2G_SCC || + wl_rinfo->link_mode == BTC_WLINK_2G_MCC || + wl_rinfo->link_mode == BTC_WLINK_25G_MCC || + wl_rinfo->link_mode == BTC_WLINK_5G)) { + if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) || + (wl_role & BIT(RTW89_WIFI_ROLE_AP))) + type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO; + else if (wl_role & BIT(RTW89_WIFI_ROLE_P2P_CLIENT)) + type = noa ? BTC_WLMROLE_STA_GC_NOA : BTC_WLMROLE_STA_GC; + else + type = BTC_WLMROLE_STA_STA; + + dur = noa; + } + + wl_rinfo->mrole_type = type; + wl_rinfo->mrole_noa_duration = dur; +} + +static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id, + enum btc_role_state state) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_info *wl = &btc->cx.wl; + struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER]; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8; + struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; + bool client_joined = false, b2g = false, b5g = false; + u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; + u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; + u8 dbcc_en = 0, pta_req_band = RTW89_MAC_0; + u8 i, j, cnt = 0, cnt_2g = 0, cnt_5g = 0; + struct rtw89_btc_wl_link_info *wl_linfo; + struct rtw89_btc_wl_rlink *rlink = NULL; + u8 dbcc_2g_phy = RTW89_PHY_0; + u8 mode = BTC_WLINK_NOLINK; + u32 noa_dur = 0; + + if (role_id >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER || rlink_id > RTW89_MAC_1) + return; + + /* Extract wl->link_info[role_id][rlink_id] to wl->role_info + * role_id: role index + * rlink_id: rlink index (= HW-band index) + * pid: port_index + */ + + wl_linfo = &wl->rlink_info[role_id][rlink_id]; + if (wl_linfo->connected == MLME_LINKING) + return; + + rlink = &wl_rinfo->rlink[role_id][rlink_id]; + rlink->role = wl_linfo->role; + rlink->active = wl_linfo->active; /* Doze or not */ + rlink->pid = wl_linfo->pid; + rlink->phy = wl_linfo->phy; + rlink->rf_band = wl_linfo->band; + rlink->ch = wl_linfo->ch; + rlink->bw = wl_linfo->bw; + rlink->noa = wl_linfo->noa; + rlink->noa_dur = wl_linfo->noa_duration / 1000; + rlink->client_cnt = wl_linfo->client_cnt; + rlink->mode = wl_linfo->mode; + + switch (wl_linfo->connected) { + case MLME_NO_LINK: + rlink->connected = 0; + if (rlink->role == RTW89_WIFI_ROLE_STATION) + btc->dm.leak_ap = 0; + break; + case MLME_LINKED: + rlink->connected = 1; + break; + default: + return; + } + + wl->is_5g_hi_channel = false; + wl->bg_mode = false; + wl_rinfo->role_map = 0; + wl_rinfo->p2p_2g = 0; + memset(cid_ch, 0, sizeof(cid_ch)); + + for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) { + for (j = RTW89_MAC_0; j <= RTW89_MAC_1; j++) { + rlink = &wl_rinfo->rlink[i][j]; + + if (!rlink->active || !rlink->connected) + continue; + + cnt++; + wl_rinfo->role_map |= BIT(rlink->role); + + /* only if client connect for p2p-Go/AP */ + if ((rlink->role == RTW89_WIFI_ROLE_P2P_GO || + rlink->role == RTW89_WIFI_ROLE_AP) && + rlink->client_cnt > 1) + client_joined = true; + + /* Identufy if P2P-Go (GO/GC/AP) exist at 2G band*/ + if (rlink->rf_band == RTW89_BAND_2G && + (client_joined || rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) + wl_rinfo->p2p_2g = 1; + + /* only one noa-role exist */ + if (rlink->noa && rlink->noa_dur > 0) + noa_dur = rlink->noa_dur; + + /* for WL 5G-Rx interfered with BT issue */ + if (rlink->rf_band == RTW89_BAND_5G && rlink->ch >= 100) + wl->is_5g_hi_channel = 1; + + if ((rlink->mode & BIT(BTC_WL_MODE_11B)) || + (rlink->mode & BIT(BTC_WL_MODE_11G))) + wl->bg_mode = 1; + + if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) + continue; + + cid_ch[cnt - 1] = wl_linfo->chdef; + cid_phy[cnt - 1] = rlink->phy; + cid_role[cnt - 1] = rlink->role; + + if (rlink->rf_band != RTW89_BAND_2G) { + cnt_5g++; + b5g = true; + } else { + cnt_2g++; + b2g = true; + } + } + } + + if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g); + rtw89_warn(rtwdev, "not support MLO feature yet"); + } else { + dbcc_en = rtwdev->dbcc_en; + + /* Be careful to change the following sequence!! */ + if (cnt == 0) { + mode = BTC_WLINK_NOLINK; + } else if (!b2g && b5g) { + mode = BTC_WLINK_5G; + } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) { + mode = BTC_WLINK_2G_NAN; + } else if (cnt > BTC_TDMA_WLROLE_MAX) { + mode = BTC_WLINK_OTHER; + } else if (dbcc_en) { + mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, + &dbcc_2g_phy); + } else if (b2g && b5g && cnt == 2) { + mode = BTC_WLINK_25G_MCC; + } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */ + if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1])) + mode = BTC_WLINK_2G_SCC; + else + mode = BTC_WLINK_2G_MCC; + } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */ + mode = _get_role_link_mode(cid_role[0]); + } + } + + wl_rinfo->link_mode = mode; + wl_rinfo->connect_cnt = cnt; + if (wl_rinfo->connect_cnt == 0) + wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE); + _update_role_link_mode(rtwdev, client_joined, noa_dur); + + wl_rinfo->dbcc_2g_phy = dbcc_2g_phy; + if (wl_rinfo->dbcc_en != dbcc_en) { + wl_rinfo->dbcc_en = dbcc_en; + wl_rinfo->dbcc_chg = 1; + btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++; + } else { + wl_rinfo->dbcc_chg = 0; + } + + if (wl_rinfo->dbcc_en) { + memset(wl_dinfo, 0, sizeof(struct rtw89_btc_wl_dbcc_info)); + + if (mode == BTC_WLINK_5G) { + pta_req_band = RTW89_PHY_0; + wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G; + wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G; + } else if (wl_rinfo->dbcc_2g_phy == RTW89_PHY_1) { + pta_req_band = RTW89_PHY_1; + wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G; + wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G; + } else { + pta_req_band = RTW89_PHY_0; + wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_2G; + wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_5G; + } + _update_dbcc_band(rtwdev, RTW89_PHY_0); + _update_dbcc_band(rtwdev, RTW89_PHY_1); + } + + wl_rinfo->pta_req_band = pta_req_band; + _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); +} + void rtw89_coex_act1_work(struct work_struct *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, @@ -5445,6 +6471,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; u8 mode, igno_bt, always_freerun; lockdep_assert_held(&rtwdev->mutex); @@ -5459,6 +6486,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 8) + mode = wl_rinfo_v8->link_mode; else return; @@ -5605,6 +6634,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) _action_wl_2g_scc_v1(rtwdev); else if (ver->fwlrole == 2) _action_wl_2g_scc_v2(rtwdev); + else if (ver->fwlrole == 8) + _action_wl_2g_scc_v8(rtwdev); break; case BTC_WLINK_2G_MCC: bt->scan_rx_low_pri = true; @@ -5736,8 +6767,8 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode) _set_init_info(rtwdev); _set_wl_tx_power(rtwdev, RTW89_BTC_WL_DEF_TX_PWR); - rtw89_btc_fw_set_slots(rtwdev, CXST_MAX, dm->slot); btc_fw_set_monreg(rtwdev); + rtw89_btc_fw_set_slots(rtwdev); _fw_set_drv_info(rtwdev, CXDRVINFO_INIT); _fw_set_drv_info(rtwdev, CXDRVINFO_CTRL); @@ -5956,6 +6987,22 @@ static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi) return rssi_level; } +static void _update_zb_coex_tbl(struct rtw89_dev *rtwdev) +{ + u8 mode = rtwdev->btc.cx.wl.role_info.link_mode; + u32 zb_tbl0 = 0xda5a5a5a, zb_tbl1 = 0xda5a5a5a; + + if (mode == BTC_WLINK_5G || rtwdev->btc.dm.freerun) { + zb_tbl0 = 0xffffffff; + zb_tbl1 = 0xffffffff; + } else if (mode == BTC_WLINK_25G_MCC) { + zb_tbl0 = 0xffffffff; /* for E5G slot */ + zb_tbl1 = 0xda5a5a5a; /* for E2G slot */ + } + rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, zb_tbl0); + rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, zb_tbl1); +} + #define BT_PROFILE_PROTOCOL_MASK GENMASK(7, 4) static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len) @@ -6093,13 +7140,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len) _run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO); } -enum btc_wl_mode { - BTC_WL_MODE_HT = 0, - BTC_WL_MODE_VHT = 1, - BTC_WL_MODE_HE = 2, - BTC_WL_MODE_NUM, -}; - void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta, enum btc_role_state state) { @@ -6112,7 +7152,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_link_info r = {0}; struct rtw89_btc_wl_link_info *wlinfo = NULL; - u8 mode = 0; + u8 mode = 0, rlink_id, link_mode_ori, pta_req_mac_ori, wa_type; rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state); rtw89_debug(rtwdev, RTW89_DBG_BTC, @@ -6162,6 +7202,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif r.band = chan->band_type; r.ch = chan->channel; r.bw = chan->band_width; + r.chdef.band = chan->band_type; + r.chdef.center_ch = chan->channel; + r.chdef.bw = chan->band_width; + r.chdef.chan = chan->primary_channel; ether_addr_copy(r.mac_addr, rtwvif->mac_addr); if (rtwsta && vif->type == NL80211_IFTYPE_STATION) @@ -6171,13 +7215,37 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif wlinfo = &wl->link_info[r.pid]; - memcpy(wlinfo, &r, sizeof(*wlinfo)); - if (ver->fwlrole == 0) + rlink_id = 0; /* to do */ + if (ver->fwlrole == 0) { + *wlinfo = r; _update_wl_info(rtwdev); - else if (ver->fwlrole == 1) + } else if (ver->fwlrole == 1) { + *wlinfo = r; _update_wl_info_v1(rtwdev); - else if (ver->fwlrole == 2) + } else if (ver->fwlrole == 2) { + *wlinfo = r; _update_wl_info_v2(rtwdev); + } else if (ver->fwlrole == 8) { + wlinfo = &wl->rlink_info[r.pid][rlink_id]; + *wlinfo = r; + link_mode_ori = wl->role_info_v8.link_mode; + pta_req_mac_ori = wl->pta_req_mac; + _update_wl_info_v8(rtwdev, r.pid, rlink_id, state); + + if (wl->role_info_v8.link_mode != link_mode_ori) { + wl->role_info_v8.link_mode_chg = 1; + if (ver->fcxinit == 7) + wa_type = btc->mdinfo.md_v7.wa_type; + else + wa_type = btc->mdinfo.md.wa_type; + + if (wa_type & BTC_WA_HFP_ZB) + _update_zb_coex_tbl(rtwdev); + } + + if (wl->pta_req_mac != pta_req_mac_ori) + wl->pta_reg_mac_chg = 1; + } if (wlinfo->role == RTW89_WIFI_ROLE_STATION && wlinfo->connected == MLME_NO_LINK) @@ -6715,7 +7783,10 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m) } for (i = 0; i < RTW89_PORT_NUM; i++) { - plink = &btc->cx.wl.link_info[i]; + if (btc->ver->fwlrole == 8) + plink = &btc->cx.wl.rlink_info[i][0]; + else + plink = &btc->cx.wl.link_info[i]; if (!plink->active) continue; @@ -6760,6 +7831,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info; struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1; struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; + struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; u8 mode; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL)) @@ -6773,6 +7845,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) mode = wl_rinfo_v1->link_mode; else if (ver->fwlrole == 2) mode = wl_rinfo_v2->link_mode; + else if (ver->fwlrole == 8) + mode = wl_rinfo_v8->link_mode; else return; @@ -6985,11 +8059,6 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) seq_puts(m, "\n"); } - if (bt->enable.now && bt->ver_info.fw == 0) - rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true); - else - rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false); - if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect) rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true); else @@ -7017,6 +8086,79 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) #define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e #define CASE_BTC_INIT(e) case BTC_MODE_## e: return #e #define CASE_BTC_ANTPATH_STR(e) case BTC_ANT_##e: return #e +#define CASE_BTC_POLUT_STR(e) case BTC_PLT_## e: return #e +#define CASE_BTC_REGTYPE_STR(e) case REG_## e: return #e +#define CASE_BTC_GDBG_STR(e) case BTC_DBG_## e: return #e + +static const char *id_to_polut(u32 id) +{ + switch (id) { + CASE_BTC_POLUT_STR(NONE); + CASE_BTC_POLUT_STR(GNT_BT_TX); + CASE_BTC_POLUT_STR(GNT_BT_RX); + CASE_BTC_POLUT_STR(GNT_WL); + CASE_BTC_POLUT_STR(BT); + CASE_BTC_POLUT_STR(ALL); + default: + return "unknown"; + } +} + +static const char *id_to_regtype(u32 id) +{ + switch (id) { + CASE_BTC_REGTYPE_STR(MAC); + CASE_BTC_REGTYPE_STR(BB); + CASE_BTC_REGTYPE_STR(RF); + CASE_BTC_REGTYPE_STR(BT_RF); + CASE_BTC_REGTYPE_STR(BT_MODEM); + CASE_BTC_REGTYPE_STR(BT_BLUEWIZE); + CASE_BTC_REGTYPE_STR(BT_VENDOR); + CASE_BTC_REGTYPE_STR(BT_LE); + default: + return "unknown"; + } +} + +static const char *id_to_gdbg(u32 id) +{ + switch (id) { + CASE_BTC_GDBG_STR(GNT_BT); + CASE_BTC_GDBG_STR(GNT_WL); + CASE_BTC_GDBG_STR(BCN_EARLY); + CASE_BTC_GDBG_STR(WL_NULL0); + CASE_BTC_GDBG_STR(WL_NULL1); + CASE_BTC_GDBG_STR(WL_RXISR); + CASE_BTC_GDBG_STR(TDMA_ENTRY); + CASE_BTC_GDBG_STR(A2DP_EMPTY); + CASE_BTC_GDBG_STR(BT_RETRY); + CASE_BTC_GDBG_STR(BT_RELINK); + CASE_BTC_GDBG_STR(SLOT_WL); + CASE_BTC_GDBG_STR(SLOT_BT); + CASE_BTC_GDBG_STR(WL_ERR); + CASE_BTC_GDBG_STR(WL_OK); + CASE_BTC_GDBG_STR(SLOT_B2W); + CASE_BTC_GDBG_STR(SLOT_W1); + CASE_BTC_GDBG_STR(SLOT_W2); + CASE_BTC_GDBG_STR(SLOT_W2B); + CASE_BTC_GDBG_STR(SLOT_B1); + CASE_BTC_GDBG_STR(SLOT_B2); + CASE_BTC_GDBG_STR(SLOT_B3); + CASE_BTC_GDBG_STR(SLOT_B4); + CASE_BTC_GDBG_STR(SLOT_LK); + CASE_BTC_GDBG_STR(SLOT_E2G); + CASE_BTC_GDBG_STR(SLOT_E5G); + CASE_BTC_GDBG_STR(SLOT_EBT); + CASE_BTC_GDBG_STR(SLOT_WLK); + CASE_BTC_GDBG_STR(SLOT_B1FDD); + CASE_BTC_GDBG_STR(BT_CHANGE); + CASE_BTC_GDBG_STR(WL_CCA); + CASE_BTC_GDBG_STR(BT_LEAUDIO); + CASE_BTC_GDBG_STR(USER_DEF); + default: + return "unknown"; + } +} static const char *steps_to_str(u16 step) { @@ -7354,6 +8496,10 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m) pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5; except_cnt = pcysta->v5.except_cnt; exception_map = le32_to_cpu(pcysta->v5.except_map); + } else if (ver->fcxcysta == 7) { + pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7; + except_cnt = pcysta->v7.except_cnt; + exception_map = le32_to_cpu(pcysta->v7.except_map); } else { return; } @@ -7430,22 +8576,35 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; - struct rtw89_btc_fbtc_slot *s; + u16 dur, cxtype; + u32 tbl; u8 i = 0; for (i = 0; i < CXST_MAX; i++) { - s = &dm->slot_now[i]; + if (btc->ver->fcxslots == 1) { + dur = le16_to_cpu(dm->slot_now.v1[i].dur); + tbl = le32_to_cpu(dm->slot_now.v1[i].cxtbl); + cxtype = le16_to_cpu(dm->slot_now.v1[i].cxtype); + } else if (btc->ver->fcxslots == 7) { + dur = le16_to_cpu(dm->slot_now.v7[i].dur); + tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl); + cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype); + } else { + return; + } + if (i % 5 == 0) seq_printf(m, " %-15s : %5s[%03d/0x%x/%d]", "[slot_list]", id_to_slot((u32)i), - s->dur, s->cxtbl, s->cxtype); + dur, tbl, cxtype); else seq_printf(m, ", %5s[%03d/0x%x/%d]", id_to_slot((u32)i), - s->dur, s->cxtbl, s->cxtype); + dur, tbl, cxtype); + if (i % 5 == 4) seq_puts(m, "\n"); } @@ -7973,6 +9132,136 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m) } } +static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt; + struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; + struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_rpt_cmn_info *pcinfo; + u16 cycle, c_begin, c_end, s_id; + u8 i, cnt = 0, divide_cnt; + u8 slot_pair; + + pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; + if (!pcinfo->valid) + return; + + pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7; + seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]", + le16_to_cpu(pcysta->cycles)); + + for (i = 0; i < CXST_MAX; i++) { + if (!le16_to_cpu(pcysta->slot_cnt[i])) + continue; + seq_printf(m, ", %s:%d", + id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i])); + } + + if (dm->tdma_now.rxflctrl) + seq_printf(m, ", leak_rx:%d", + le32_to_cpu(pcysta->leak_slot.cnt_rximr)); + + if (pcysta->collision_cnt) + seq_printf(m, ", collision:%d", pcysta->collision_cnt); + + if (pcysta->skip_cnt) + seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt)); + + seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_stat]", + le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tavg) / 1000, + le16_to_cpu(pcysta->leak_slot.tavg) % 1000); + seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]", + le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), + dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD], + le16_to_cpu(pcysta->leak_slot.tamx) / 1000, + le16_to_cpu(pcysta->leak_slot.tamx) % 1000); + seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]", + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); + + if (a2dp->exist) { + seq_printf(m, + "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)", + "[a2dp_stat]", + le16_to_cpu(pcysta->a2dp_ept.cnt), + le16_to_cpu(pcysta->a2dp_ept.cnt_timeout), + a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max); + + seq_printf(m, ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta->a2dp_ept.tavg), + le16_to_cpu(pcysta->a2dp_ept.tmax)); + } + + if (le16_to_cpu(pcysta->cycles) <= 1) + return; + + /* 1 cycle = 1 wl-slot + 1 bt-slot */ + slot_pair = BTC_CYCLE_SLOT_MAX / 2; + + if (le16_to_cpu(pcysta->cycles) <= slot_pair) + c_begin = 1; + else + c_begin = le16_to_cpu(pcysta->cycles) - slot_pair + 1; + + c_end = le16_to_cpu(pcysta->cycles); + + if (a2dp->exist) + divide_cnt = 2; + else + divide_cnt = 6; + + if (c_begin > c_end) + return; + + for (cycle = c_begin; cycle <= c_end; cycle++) { + cnt++; + s_id = ((cycle - 1) % slot_pair) * 2; + + if (cnt % divide_cnt == 1) { + if (a2dp->exist) + seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]"); + else + seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]"); + } + + seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id])); + + if (a2dp->exist) + seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)", + pcysta->wl_rx_err_ratio[s_id], + pcysta->a2dp_trx[s_id].empty_cnt, + pcysta->a2dp_trx[s_id].retry_cnt, + (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2), + pcysta->a2dp_trx[s_id].tx_cnt, + pcysta->a2dp_trx[s_id].ack_cnt, + pcysta->a2dp_trx[s_id].nack_cnt); + else + seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]); + + seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1])); + + if (a2dp->exist) + seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)", + pcysta->wl_rx_err_ratio[s_id + 1], + pcysta->a2dp_trx[s_id + 1].empty_cnt, + pcysta->a2dp_trx[s_id + 1].retry_cnt, + (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2), + pcysta->a2dp_trx[s_id + 1].tx_cnt, + pcysta->a2dp_trx[s_id + 1].ack_cnt, + pcysta->a2dp_trx[s_id + 1].nack_cnt); + else + seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]); + } +} + static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m) { struct rtw89_btc *btc = &rtwdev->btc; @@ -8009,6 +9298,27 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m) le32_to_cpu(ns->v1.max_t[i]) / 1000, le32_to_cpu(ns->v1.max_t[i]) % 1000); } + } else if (ver->fcxnullsta == 7) { + for (i = 0; i < 2; i++) { + seq_printf(m, " %-15s : ", "[NULL-STA]"); + seq_printf(m, "null-%d", i); + seq_printf(m, "[Tx:%d/", + le32_to_cpu(ns->v7.result[i][4])); + seq_printf(m, "[ok:%d/", + le32_to_cpu(ns->v7.result[i][1])); + seq_printf(m, "fail:%d/", + le32_to_cpu(ns->v7.result[i][0])); + seq_printf(m, "on_time:%d/", + le32_to_cpu(ns->v7.result[i][2])); + seq_printf(m, "retry:%d/", + le32_to_cpu(ns->v7.result[i][3])); + seq_printf(m, "avg_t:%d.%03d/", + le32_to_cpu(ns->v7.tavg[i]) / 1000, + le32_to_cpu(ns->v7.tavg[i]) % 1000); + seq_printf(m, "max_t:%d.%03d]\n", + le32_to_cpu(ns->v7.tmax[i]) / 1000, + le32_to_cpu(ns->v7.tmax[i]) % 1000); + } } else { for (i = 0; i < 2; i++) { seq_printf(m, " %-15s : ", "[NULL-STA]"); @@ -8185,6 +9495,8 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m) _show_fbtc_cysta_v4(rtwdev, m); else if (ver->fcxcysta == 5) _show_fbtc_cysta_v5(rtwdev, m); + else if (ver->fcxcysta == 7) + _show_fbtc_cysta_v7(rtwdev, m); _show_fbtc_nullsta(rtwdev, m); @@ -8237,6 +9549,47 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt } } +static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; + const struct rtw89_btc_ver *ver = rtwdev->btc.ver; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL; + u8 *gpio_map, i; + u32 en_map; + + pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; + gdbg = &rtwdev->btc.fwinfo.rpt_fbtc_gpio_dbg.finfo; + if (!pcinfo->valid) { + rtw89_debug(rtwdev, RTW89_DBG_BTC, + "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n", + __func__); + seq_puts(m, "\n"); + return; + } + + if (ver->fcxgpiodbg == 7) { + en_map = le32_to_cpu(gdbg->v7.en_map); + gpio_map = gdbg->v7.gpio_map; + } else { + en_map = le32_to_cpu(gdbg->v1.en_map); + gpio_map = gdbg->v1.gpio_map; + } + + if (!en_map) + return; + + seq_printf(m, " %-15s : enable_map:0x%08x", + "[gpio_dbg]", en_map); + + for (i = 0; i < BTC_DBG_MAX1; i++) { + if (!(en_map & BIT(i))) + continue; + seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]); + } + seq_puts(m, "\n"); +} + static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -8244,7 +9597,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; struct rtw89_btc_fbtc_mreg_val_v1 *pmreg = NULL; - struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL; struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_bt_info *bt = &btc->cx.bt; @@ -8314,29 +9666,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) if (i >= pmreg->reg_num) seq_puts(m, "\n"); } - - pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; - if (!pcinfo->valid) { - rtw89_debug(rtwdev, RTW89_DBG_BTC, - "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n", - __func__); - seq_puts(m, "\n"); - return; - } - - gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo; - if (!gdbg->en_map) - return; - - seq_printf(m, " %-15s : enable_map:0x%08x", - "[gpio_dbg]", gdbg->en_map); - - for (i = 0; i < BTC_DBG_MAX1; i++) { - if (!(gdbg->en_map & BIT(i))) - continue; - seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]); - } - seq_puts(m, "\n"); } static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) @@ -8346,7 +9675,6 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; struct rtw89_btc_fbtc_mreg_val_v2 *pmreg = NULL; - struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL; struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_bt_info *bt = &btc->cx.bt; @@ -8371,12 +9699,13 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) gnt = gnt_cfg.band[0]; seq_printf(m, - " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ", + " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s", "[gnt_status]", chip->chip_id == RTL8852C ? "HW" : btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl, - gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt); + gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt, + id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); gnt = gnt_cfg.band[1]; seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", @@ -8416,27 +9745,74 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) if (i >= pmreg->reg_num) seq_puts(m, "\n"); } +} - pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo; - if (!pcinfo->valid) { - rtw89_debug(rtwdev, RTW89_DBG_BTC, - "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n", - __func__); - seq_puts(m, "\n"); +static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; + struct rtw89_btc_fbtc_mreg_val_v7 *pmreg = NULL; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_cx *cx = &btc->cx; + struct rtw89_btc_wl_info *wl = &cx->wl; + struct rtw89_btc_bt_info *bt = &cx->bt; + struct rtw89_mac_ax_gnt *gnt = NULL; + struct rtw89_btc_dm *dm = &btc->dm; + u8 i, type, cnt = 0; + u32 val, offset; + + if (!(dm->coex_info_map & BTC_COEX_INFO_MREG)) return; + + seq_puts(m, "\n\r========== [HW Status] =========="); + + seq_printf(m, + "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)", + "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE], + bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], + cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); + + /* To avoid I/O if WL LPS or power-off */ + dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev); + + seq_printf(m, + "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s", + "[gnt_status]", + rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" : + dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", + wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); + + gnt = &dm->gnt.band[RTW89_PHY_0]; + + seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]", + gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, + gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); + + if (rtwdev->dbcc_en) { + gnt = &dm->gnt.band[RTW89_PHY_1]; + seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]", + gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, + gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); } - gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo; - if (!gdbg->en_map) + pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; + if (!pcinfo->valid) return; - seq_printf(m, " %-15s : enable_map:0x%08x", - "[gpio_dbg]", gdbg->en_map); + pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7; - for (i = 0; i < BTC_DBG_MAX1; i++) { - if (!(gdbg->en_map & BIT(i))) - continue; - seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]); + for (i = 0; i < pmreg->reg_num; i++) { + type = (u8)le16_to_cpu(rtwdev->chip->mon_reg[i].type); + offset = le32_to_cpu(rtwdev->chip->mon_reg[i].offset); + val = le32_to_cpu(pmreg->mreg_val[i]); + + if (cnt % 6 == 0) + seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]", + id_to_regtype(type), offset, val); + else + seq_printf(m, ", %s_0x%x=0x%x", + id_to_regtype(type), offset, val); + cnt++; } seq_puts(m, "\n"); } @@ -8887,6 +10263,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m) cnt[BTC_NCNT_CUSTOMERIZE]); } +static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m) +{ + struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; + struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; + struct rtw89_btc_fbtc_rpt_ctrl_v8 *prptctrl = NULL; + struct rtw89_btc_cx *cx = &rtwdev->btc.cx; + struct rtw89_btc_dm *dm = &rtwdev->btc.dm; + struct rtw89_btc_wl_info *wl = &cx->wl; + u32 *cnt = rtwdev->btc.dm.cnt_notify; + u32 cnt_sum = 0; + u8 i; + + if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) + return; + + seq_printf(m, "%s", "\n\r========== [Statistics] =========="); + + pcinfo = &pfwinfo->rpt_ctrl.cinfo; + if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF && + !wl->status.map.rf_off) { + prptctrl = &pfwinfo->rpt_ctrl.finfo.v8; + + seq_printf(m, + "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ", + "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h), + (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l, + rtwdev->btc.ver->info_buf); + + seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); + + if (dm->error.map.wl_fw_hang) + seq_puts(m, " (WL FW Hang!!)"); + + seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + seq_printf(m, + "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], + wl->rfk_info.proc_time); + + seq_printf(m, ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + } else { + seq_printf(m, + "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", + "[summary]", + pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + pfwinfo->cnt_c2h, + wl->status.map.lps, wl->status.map.rf_off); + } + + for (i = 0; i < BTC_NCNT_NUM; i++) + cnt_sum += dm->cnt_notify[i]; + + seq_printf(m, + "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + seq_printf(m, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); + + seq_printf(m, + "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], + cnt[BTC_NCNT_SPECIAL_PACKET]); + + seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], + rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], + cnt[BTC_NCNT_COUNTRYCODE]); +} + void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) { struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal; @@ -8924,6 +10402,10 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) _show_mreg_v1(rtwdev, m); else if (ver->fcxmreg == 2) _show_mreg_v2(rtwdev, m); + else if (ver->fcxmreg == 7) + _show_mreg_v7(rtwdev, m); + + _show_gpio_dbg(rtwdev, m); if (ver->fcxbtcrpt == 1) _show_summary_v1(rtwdev, m); @@ -8933,6 +10415,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) _show_summary_v5(rtwdev, m); else if (ver->fcxbtcrpt == 105) _show_summary_v105(rtwdev, m); + else if (ver->fcxbtcrpt == 8) + _show_summary_v8(rtwdev, m); } void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index 13303830684e..0e5f268616f7 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -8,6 +8,8 @@ #include "core.h" #define BTC_H2C_MAXLEN 2020 +#define BTC_TLV_SLOT_ID_LEN_V7 1 +#define BTC_SLOT_REQ_TH 2 enum btc_mode { BTC_MODE_NORMAL, @@ -201,6 +203,60 @@ enum btc_3cx_type { BTC_3CX_MAX, }; +enum btc_chip_feature { + BTC_FEAT_PTA_ONOFF_CTRL = BIT(0), /* on/off ctrl by HW (not 0x73[2]) */ + BTC_FEAT_NONBTG_GWL_THRU = BIT(1), /* non-BTG GNT_WL!=0 if GNT_BT = 1 */ + BTC_FEAT_WLAN_ACT_MUX = BIT(2), /* separate wlan_act/gnt mux */ + BTC_FEAT_NEW_BBAPI_FLOW = BIT(3), /* new btg_ctrl/pre_agc_ctrl */ + BTC_FEAT_MLO_SUPPORT = BIT(4), + BTC_FEAT_H2C_MACRO = BIT(5), +}; + +enum btc_wl_mode { + BTC_WL_MODE_11B = 0, + BTC_WL_MODE_11A = 1, + BTC_WL_MODE_11G = 2, + BTC_WL_MODE_HT = 3, + BTC_WL_MODE_VHT = 4, + BTC_WL_MODE_HE = 5, + BTC_WL_MODE_NUM, +}; + +enum btc_wl_gpio_debug { + BTC_DBG_GNT_BT = 0, + BTC_DBG_GNT_WL = 1, + BTC_DBG_BCN_EARLY = 2, + BTC_DBG_WL_NULL0 = 3, + BTC_DBG_WL_NULL1 = 4, + BTC_DBG_WL_RXISR = 5, + BTC_DBG_TDMA_ENTRY = 6, + BTC_DBG_A2DP_EMPTY = 7, + BTC_DBG_BT_RETRY = 8, + BTC_DBG_BT_RELINK = 9, + BTC_DBG_SLOT_WL = 10, + BTC_DBG_SLOT_BT = 11, + BTC_DBG_WL_ERR = 12, + BTC_DBG_WL_OK = 13, + BTC_DBG_SLOT_B2W = 14, + BTC_DBG_SLOT_W1 = 15, + BTC_DBG_SLOT_W2 = 16, + BTC_DBG_SLOT_W2B = 17, + BTC_DBG_SLOT_B1 = 18, + BTC_DBG_SLOT_B2 = 19, + BTC_DBG_SLOT_B3 = 20, + BTC_DBG_SLOT_B4 = 21, + BTC_DBG_SLOT_LK = 22, + BTC_DBG_SLOT_E2G = 23, + BTC_DBG_SLOT_E5G = 24, + BTC_DBG_SLOT_EBT = 25, + BTC_DBG_SLOT_WLK = 26, + BTC_DBG_SLOT_B1FDD = 27, + BTC_DBG_BT_CHANGE = 28, + BTC_DBG_WL_CCA = 29, + BTC_DBG_BT_LEAUDIO = 30, + BTC_DBG_USER_DEF = 31, +}; + void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev); void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev); void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode); @@ -261,4 +317,56 @@ static inline u16 rtw89_coex_query_bt_req_len(struct rtw89_dev *rtwdev, return btc->bt_req_len; } +static inline u32 rtw89_get_antpath_type(u8 phy_map, u8 type) +{ + return ((phy_map << 8) + type); +} + +static inline +void _slot_set_le(struct rtw89_btc *btc, u8 sid, __le16 dura, __le32 tbl, __le16 type) +{ + if (btc->ver->fcxslots == 1) { + btc->dm.slot.v1[sid].dur = dura; + btc->dm.slot.v1[sid].cxtbl = tbl; + btc->dm.slot.v1[sid].cxtype = type; + } else if (btc->ver->fcxslots == 7) { + btc->dm.slot.v7[sid].dur = dura; + btc->dm.slot.v7[sid].cxtype = type; + btc->dm.slot.v7[sid].cxtbl = tbl; + } +} + +static inline +void _slot_set(struct rtw89_btc *btc, u8 sid, u16 dura, u32 tbl, u16 type) +{ + _slot_set_le(btc, sid, cpu_to_le16(dura), cpu_to_le32(tbl), cpu_to_le16(type)); +} + +static inline +void _slot_set_dur(struct rtw89_btc *btc, u8 sid, u16 dura) +{ + if (btc->ver->fcxslots == 1) + btc->dm.slot.v1[sid].dur = cpu_to_le16(dura); + else if (btc->ver->fcxslots == 7) + btc->dm.slot.v7[sid].dur = cpu_to_le16(dura); +} + +static inline +void _slot_set_type(struct rtw89_btc *btc, u8 sid, u16 type) +{ + if (btc->ver->fcxslots == 1) + btc->dm.slot.v1[sid].cxtype = cpu_to_le16(type); + else if (btc->ver->fcxslots == 7) + btc->dm.slot.v7[sid].cxtype = cpu_to_le16(type); +} + +static inline +void _slot_set_tbl(struct rtw89_btc *btc, u8 sid, u32 tbl) +{ + if (btc->ver->fcxslots == 1) + btc->dm.slot.v1[sid].cxtbl = cpu_to_le32(tbl); + else if (btc->ver->fcxslots == 7) + btc->dm.slot.v7[sid].cxtbl = cpu_to_le32(tbl); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index d474b8d5df3d..ddc390d24ec1 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -18,6 +18,7 @@ #include "ser.h" #include "txrx.h" #include "util.h" +#include "wow.h" static bool rtw89_disable_ps_mode; module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644); @@ -82,6 +83,9 @@ static struct ieee80211_channel rtw89_channels_5ghz[] = { RTW89_DEF_CHAN_5G(5885, 177), }; +static_assert(RTW89_5GHZ_UNII4_START_INDEX + RTW89_5GHZ_UNII4_CHANNEL_NUM == + ARRAY_SIZE(rtw89_channels_5ghz)); + static struct ieee80211_channel rtw89_channels_6ghz[] = { RTW89_DEF_CHAN_6G(5955, 1), RTW89_DEF_CHAN_6G(5975, 5), @@ -252,6 +256,9 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + if (tx && ieee80211_is_assoc_req(hdr->frame_control)) + rtw89_wow_parse_akm(rtwdev, skb); + if (!ieee80211_is_data(hdr->frame_control)) return; @@ -4069,6 +4076,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve } } +void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks) +{ + const struct dmi_system_id *match; + enum rtw89_quirks quirk; + + if (!quirks) + return; + + for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) { + quirk = (uintptr_t)match->driver_data; + if (quirk >= NUM_OF_RTW89_QUIRKS) + continue; + + set_bit(quirk, rtwdev->quirks); + } +} +EXPORT_SYMBOL(rtw89_check_quirks); + int rtw89_core_start(struct rtw89_dev *rtwdev) { int ret; @@ -4486,7 +4511,15 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_TDLS_EXTERNAL_SETUP | - WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_SPLIT_SCAN_6GHZ; + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; + + if (!chip->support_rnr) + hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; + + if (chip->chip_gen == RTW89_CHIP_BE) + hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 2e854c9af709..112bdd95fc6e 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -7,6 +7,7 @@ #include <linux/average.h> #include <linux/bitfield.h> +#include <linux/dmi.h> #include <linux/firmware.h> #include <linux/iopoll.h> #include <linux/workqueue.h> @@ -799,6 +800,7 @@ struct rtw89_rx_phy_ppdu { enum rtw89_mac_idx { RTW89_MAC_0 = 0, RTW89_MAC_1 = 1, + RTW89_MAC_NUM, }; enum rtw89_phy_idx { @@ -1178,9 +1180,13 @@ enum rtw89_btc_ncnt { BTC_NCNT_CUSTOMERIZE, BTC_NCNT_WL_RFK, BTC_NCNT_WL_STA, + BTC_NCNT_WL_STA_LAST, BTC_NCNT_FWINFO, BTC_NCNT_TIMER, - BTC_NCNT_NUM + BTC_NCNT_SWITCH_CHBW, + BTC_NCNT_RESUME_DL_FW, + BTC_NCNT_COUNTRYCODE, + BTC_NCNT_NUM, }; enum rtw89_btc_btinfo { @@ -1209,6 +1215,7 @@ enum rtw89_btc_dcnt { BTC_DCNT_TDMA_NONSYNC, BTC_DCNT_SLOT_NONSYNC, BTC_DCNT_BTCNT_HANG, + BTC_DCNT_BTTX_HANG, BTC_DCNT_WL_SLOT_DRIFT, BTC_DCNT_WL_STA_LAST, BTC_DCNT_BT_SLOT_DRIFT, @@ -1216,7 +1223,10 @@ enum rtw89_btc_dcnt { BTC_DCNT_FDDT_TRIG, BTC_DCNT_E2G, BTC_DCNT_E2G_HANG, - BTC_DCNT_NUM + BTC_DCNT_WL_FW_VER_MATCH, + BTC_DCNT_NULL_TX_FAIL, + BTC_DCNT_WL_STA_NTFY, + BTC_DCNT_NUM, }; enum rtw89_btc_wl_state_cnt { @@ -1230,6 +1240,13 @@ enum rtw89_btc_wl_state_cnt { BTC_WCNT_RFK_REJECT, BTC_WCNT_RFK_TIMEOUT, BTC_WCNT_CH_UPDATE, + BTC_WCNT_DBCC_ALL_2G, + BTC_WCNT_DBCC_CHG, + BTC_WCNT_RX_OK_LAST, + BTC_WCNT_RX_OK_LAST2S, + BTC_WCNT_RX_ERR_LAST, + BTC_WCNT_RX_ERR_LAST2S, + BTC_WCNT_RX_LAST, BTC_WCNT_NUM }; @@ -1253,8 +1270,10 @@ enum rtw89_btc_bt_state_cnt { BTC_BCNT_LOPRI_TX, BTC_BCNT_LOPRI_RX, BTC_BCNT_POLUT, + BTC_BCNT_POLUT_NOW, + BTC_BCNT_POLUT_DIFF, BTC_BCNT_RATECHG, - BTC_BCNT_NUM + BTC_BCNT_NUM, }; enum rtw89_btc_bt_profile { @@ -1299,6 +1318,7 @@ struct rtw89_btc_wl_smap { u32 scan: 1; u32 connecting: 1; u32 roaming: 1; + u32 transacting: 1; u32 _4way: 1; u32 rf_off: 1; u32 lps: 2; @@ -1307,6 +1327,8 @@ struct rtw89_btc_wl_smap { u32 traffic_dir : 2; u32 rf_off_pre: 1; u32 lps_pre: 2; + u32 lps_exiting: 1; + u32 emlsr: 1; }; enum rtw89_tfc_lv { @@ -1349,6 +1371,14 @@ struct rtw89_traffic_stats { u16 rx_rate; }; +struct rtw89_btc_chdef { + u8 center_ch; + u8 band; + u8 chan; + enum rtw89_sc_offset offset; + enum rtw89_bandwidth bw; +}; + struct rtw89_btc_statistic { u8 rssi; /* 0%~110% (dBm = rssi -110) */ struct rtw89_traffic_stats traffic; @@ -1357,6 +1387,7 @@ struct rtw89_btc_statistic { #define BTC_WL_RSSI_THMAX 4 struct rtw89_btc_wl_link_info { + struct rtw89_btc_chdef chdef; struct rtw89_btc_statistic stat; enum rtw89_tfc_dir dir; u8 rssi_state[BTC_WL_RSSI_THMAX]; @@ -1370,6 +1401,7 @@ struct rtw89_btc_wl_link_info { u8 phy; u8 dtim_period; u8 mode; + u8 tx_1ss_limit; u8 mac_id; u8 tx_retry; @@ -1379,6 +1411,7 @@ struct rtw89_btc_wl_link_info { u32 tx_time; u32 client_cnt; u32 rx_rate_drop_cnt; + u32 noa_duration; u32 active: 1; u32 noa: 1; @@ -1412,6 +1445,11 @@ struct rtw89_btc_bt_a2dp_desc { u8 type: 3; u8 active: 1; u8 sink: 1; + u32 handle_update: 1; + u32 devinfo_query: 1; + u32 no_empty_streak_2s: 8; + u32 no_empty_streak_max: 8; + u32 rsvd: 6; u8 bitpool; u16 vendor_id; @@ -1589,6 +1627,42 @@ struct rtw89_btc_wl_role_info_v2 { /* struct size must be n*4 bytes */ u32 rsvd: 27; }; +struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */ + u8 connected; + u8 pid; + u8 phy; + u8 noa; + + u8 rf_band; /* enum band_type RF band: 2.4G/5G/6G */ + u8 active; /* 0:rlink is under doze */ + u8 bw; /* enum channel_width */ + u8 role; /*enum role_type */ + + u8 ch; + u8 noa_dur; /* ms */ + u8 client_cnt; /* for Role = P2P-Go/AP */ + u8 mode; /* wifi protocol */ +} __packed; + +#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6 +struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */ + u8 connect_cnt; + u8 link_mode; + u8 link_mode_chg; + u8 p2p_2g; + + u8 pta_req_band; + u8 dbcc_en; /* 1+1 and 2.4G-included */ + u8 dbcc_chg; + u8 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */ + + struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM]; + + u32 role_map; + u32 mrole_type; /* btc_wl_mrole_type */ + u32 mrole_noa_duration; /* ms */ +} __packed; + struct rtw89_btc_wl_ver_info { u32 fw_coex; /* match with which coex_ver */ u32 fw; @@ -1611,6 +1685,9 @@ struct rtw89_btc_wl_rfk_info { u32 band: 2; u32 type: 8; u32 rsvd: 14; + + u32 start_time; + u32 proc_time; }; struct rtw89_btc_bt_smap { @@ -1724,12 +1801,14 @@ struct rtw89_btc_wl_nhm { struct rtw89_btc_wl_info { struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM]; + struct rtw89_btc_wl_link_info rlink_info[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM]; struct rtw89_btc_wl_rfk_info rfk_info; struct rtw89_btc_wl_ver_info ver_info; struct rtw89_btc_wl_afh_info afh_info; struct rtw89_btc_wl_role_info role_info; struct rtw89_btc_wl_role_info_v1 role_info_v1; struct rtw89_btc_wl_role_info_v2 role_info_v2; + struct rtw89_btc_wl_role_info_v8 role_info_v8; struct rtw89_btc_wl_scan_info scan_info; struct rtw89_btc_wl_dbcc_info dbcc_info; struct rtw89_btc_rf_para rf_para; @@ -1740,9 +1819,14 @@ struct rtw89_btc_wl_info { u8 rssi_level; u8 cn_report; u8 coex_mode; + u8 pta_req_mac; + u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */ + bool is_5g_hi_channel; + bool pta_reg_mac_chg; bool bg_mode; bool scbd_change; + bool fw_ver_mismatch; u32 scbd; }; @@ -1870,9 +1954,18 @@ struct rtw89_btc_fbtc_btscan_v2 { struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX]; } __packed; +struct rtw89_btc_fbtc_btscan_v7 { + u8 fver; /* btc_ver::fcxbtscan */ + u8 type; + u8 rsvd0; + u8 rsvd1; + struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX]; +} __packed; + union rtw89_btc_fbtc_btscan { struct rtw89_btc_fbtc_btscan_v1 v1; struct rtw89_btc_fbtc_btscan_v2 v2; + struct rtw89_btc_fbtc_btscan_v7 v7; }; struct rtw89_btc_bt_info { @@ -2014,6 +2107,20 @@ struct rtw89_btc_fbtc_rpt_ctrl_info_v5 { __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */ } __packed; +struct rtw89_btc_fbtc_rpt_ctrl_info_v8 { + __le16 cnt; /* fw report counter */ + __le16 cnt_c2h; /* fw send c2h counter */ + __le16 cnt_h2c; /* fw recv h2c counter */ + __le16 len_c2h; /* The total length of the last C2H */ + + __le16 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */ + __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */ + + __le32 cx_ver; /* match which driver's coex version */ + __le32 fw_ver; + __le32 en; /* report map */ +} __packed; + struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info { __le32 cx_ver; /* match which driver's coex version */ __le32 cx_offload; @@ -2070,11 +2177,25 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 { struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info; } __packed; +struct rtw89_btc_fbtc_rpt_ctrl_v8 { + u8 fver; + u8 rsvd0; + u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */ + u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */ + + u8 gnt_val[RTW89_PHY_MAX][4]; + __le16 bt_cnt[BTC_BCNT_STA_MAX_V105]; + + struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info; + struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info; +} __packed; + union rtw89_btc_fbtc_rpt_ctrl_ver_info { struct rtw89_btc_fbtc_rpt_ctrl_v1 v1; struct rtw89_btc_fbtc_rpt_ctrl_v4 v4; struct rtw89_btc_fbtc_rpt_ctrl_v5 v5; struct rtw89_btc_fbtc_rpt_ctrl_v105 v105; + struct rtw89_btc_fbtc_rpt_ctrl_v8 v8; }; enum rtw89_fbtc_ext_ctrl_type { @@ -2181,15 +2302,32 @@ enum rtw89_btc_afh_map_type { /*AFH MAP TYPE */ }; #define BTC_DBG_MAX1 32 -struct rtw89_btc_fbtc_gpio_dbg { +struct rtw89_btc_fbtc_gpio_dbg_v1 { u8 fver; /* btc_ver::fcxgpiodbg */ u8 rsvd; - u16 rsvd2; - u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */ - u32 pre_state; /* the debug signal is 1 or 0 */ + __le16 rsvd2; + __le32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */ + __le32 pre_state; /* the debug signal is 1 or 0 */ u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */ } __packed; +struct rtw89_btc_fbtc_gpio_dbg_v7 { + u8 fver; + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + + u8 gpio_map[BTC_DBG_MAX1]; + + __le32 en_map; + __le32 pre_state; +} __packed; + +union rtw89_btc_fbtc_gpio_dbg { + struct rtw89_btc_fbtc_gpio_dbg_v1 v1; + struct rtw89_btc_fbtc_gpio_dbg_v7 v7; +}; + struct rtw89_btc_fbtc_mreg_val_v1 { u8 fver; /* btc_ver::fcxmreg */ u8 reg_num; @@ -2204,9 +2342,18 @@ struct rtw89_btc_fbtc_mreg_val_v2 { __le32 mreg_val[CXMREG_MAX_V2]; } __packed; +struct rtw89_btc_fbtc_mreg_val_v7 { + u8 fver; + u8 reg_num; + u8 rsvd0; + u8 rsvd1; + __le32 mreg_val[CXMREG_MAX_V2]; +} __packed; + union rtw89_btc_fbtc_mreg_val { struct rtw89_btc_fbtc_mreg_val_v1 v1; struct rtw89_btc_fbtc_mreg_val_v2 v2; + struct rtw89_btc_fbtc_mreg_val_v7 v7; }; #define RTW89_DEF_FBTC_MREG(__type, __bytes, __offset) \ @@ -2233,6 +2380,40 @@ struct rtw89_btc_fbtc_slots { struct rtw89_btc_fbtc_slot slot[CXST_MAX]; } __packed; +struct rtw89_btc_fbtc_slot_v7 { + __le16 dur; /* slot duration */ + __le16 cxtype; + __le32 cxtbl; +} __packed; + +struct rtw89_btc_fbtc_slot_u16 { + __le16 dur; /* slot duration */ + __le16 cxtype; + __le16 cxtbl_l16; /* coex table [15:0] */ + __le16 cxtbl_h16; /* coex table [31:16] */ +} __packed; + +struct rtw89_btc_fbtc_1slot_v7 { + u8 fver; + u8 sid; /* slot id */ + __le16 rsvd; + struct rtw89_btc_fbtc_slot_v7 slot; +} __packed; + +struct rtw89_btc_fbtc_slots_v7 { + u8 fver; + u8 slot_cnt; + u8 rsvd0; + u8 rsvd1; + struct rtw89_btc_fbtc_slot_u16 slot[CXST_MAX]; + __le32 update_map; +} __packed; + +union rtw89_btc_fbtc_slots_info { + struct rtw89_btc_fbtc_slots v1; + struct rtw89_btc_fbtc_slots_v7 v7; +} __packed; + struct rtw89_btc_fbtc_step { u8 type; u8 val; @@ -2339,6 +2520,12 @@ struct rtw89_btc_fbtc_cycle_leak_info { __le16 tmax; /* max leak-slot time */ } __packed; +struct rtw89_btc_fbtc_cycle_leak_info_v7 { + __le16 tavg; + __le16 tamx; + __le32 cnt_rximr; +} __packed; + #define RTW89_BTC_FDDT_PHASE_CYCLE GENMASK(9, 0) #define RTW89_BTC_FDDT_TRAIN_STEP GENMASK(15, 10) @@ -2451,11 +2638,36 @@ struct rtw89_btc_fbtc_cysta_v5 { /* statistics for cycles */ __le32 except_map; } __packed; +struct rtw89_btc_fbtc_cysta_v7 { /* statistics for cycles */ + u8 fver; + u8 rsvd; + u8 collision_cnt; /* counter for event/timer occur at the same time */ + u8 except_cnt; + + u8 wl_rx_err_ratio[BTC_CYCLE_SLOT_MAX]; + + struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX]; + + __le16 skip_cnt; + __le16 cycles; /* total cycle number */ + + __le16 slot_step_time[BTC_CYCLE_SLOT_MAX]; /* record the wl/bt slot time */ + __le16 slot_cnt[CXST_MAX]; /* slot count */ + __le16 bcn_cnt[CXBCN_MAX]; + + struct rtw89_btc_fbtc_cycle_time_info_v5 cycle_time; + struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept; + struct rtw89_btc_fbtc_cycle_leak_info_v7 leak_slot; + + __le32 except_map; +} __packed; + union rtw89_btc_fbtc_cysta_info { struct rtw89_btc_fbtc_cysta_v2 v2; struct rtw89_btc_fbtc_cysta_v3 v3; struct rtw89_btc_fbtc_cysta_v4 v4; struct rtw89_btc_fbtc_cysta_v5 v5; + struct rtw89_btc_fbtc_cysta_v7 v7; }; struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */ @@ -2476,12 +2688,24 @@ struct rtw89_btc_fbtc_cynullsta_v2 { /* cycle null statistics */ __le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */ } __packed; +struct rtw89_btc_fbtc_cynullsta_v7 { /* cycle null statistics */ + u8 fver; + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + + __le32 tmax[2]; + __le32 tavg[2]; + __le32 result[2][5]; +} __packed; + union rtw89_btc_fbtc_cynullsta_info { struct rtw89_btc_fbtc_cynullsta_v1 v1; /* info from fw */ struct rtw89_btc_fbtc_cynullsta_v2 v2; + struct rtw89_btc_fbtc_cynullsta_v7 v7; }; -struct rtw89_btc_fbtc_btver { +struct rtw89_btc_fbtc_btver_v1 { u8 fver; /* btc_ver::fcxbtver */ u8 rsvd; __le16 rsvd2; @@ -2490,6 +2714,22 @@ struct rtw89_btc_fbtc_btver { __le32 feature; } __packed; +struct rtw89_btc_fbtc_btver_v7 { + u8 fver; + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + + __le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */ + __le32 fw_ver; + __le32 feature; +} __packed; + +union rtw89_btc_fbtc_btver { + struct rtw89_btc_fbtc_btver_v1 v1; + struct rtw89_btc_fbtc_btver_v7 v7; +} __packed; + struct rtw89_btc_fbtc_btafh { u8 fver; /* btc_ver::fcxbtafh */ u8 rsvd; @@ -2511,6 +2751,18 @@ struct rtw89_btc_fbtc_btafh_v2 { u8 afh_le_b[4]; } __packed; +struct rtw89_btc_fbtc_btafh_v7 { + u8 fver; + u8 map_type; + u8 rsvd0; + u8 rsvd1; + u8 afh_l[4]; /*bit0:2402, bit1:2403.... bit31:2433 */ + u8 afh_m[4]; /*bit0:2434, bit1:2435.... bit31:2465 */ + u8 afh_h[4]; /*bit0:2466, bit1:2467.....bit14:2480 */ + u8 afh_le_a[4]; + u8 afh_le_b[4]; +} __packed; + struct rtw89_btc_fbtc_btdevinfo { u8 fver; /* btc_ver::fcxbtdevinfo */ u8 rsvd; @@ -2551,9 +2803,14 @@ struct rtw89_btc_trx_info { u32 rx_err_ratio; }; +union rtw89_btc_fbtc_slot_u { + struct rtw89_btc_fbtc_slot v1[CXST_MAX]; + struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX]; +}; + struct rtw89_btc_dm { - struct rtw89_btc_fbtc_slot slot[CXST_MAX]; - struct rtw89_btc_fbtc_slot slot_now[CXST_MAX]; + union rtw89_btc_fbtc_slot_u slot; + union rtw89_btc_fbtc_slot_u slot_now; struct rtw89_btc_fbtc_tdma tdma; struct rtw89_btc_fbtc_tdma tdma_now; struct rtw89_mac_ax_coex_gnt gnt; @@ -2569,6 +2826,8 @@ struct rtw89_btc_dm { u32 update_slot_map; u32 set_ant_path; + u32 e2g_slot_limit; + u32 e2g_slot_nulltx_time; u32 wl_only: 1; u32 wl_fw_cx_offload: 1; @@ -2589,6 +2848,7 @@ struct rtw89_btc_dm { u32 wl_btg_rx_rb: 2; u16 slot_dur[CXST_MAX]; + u16 bt_slot_flood; u8 run_reason; u8 run_action; @@ -2596,6 +2856,8 @@ struct rtw89_btc_dm { u8 wl_pre_agc: 2; u8 wl_lna2: 1; u8 wl_pre_agc_rb: 2; + u8 bt_select: 2; /* 0:s0, 1:s1, 2:s0 & s1, refer to enum btc_bt_index */ + u8 slot_req_more: 1; }; struct rtw89_btc_ctrl { @@ -2643,6 +2905,7 @@ enum btf_fw_event_report { BTC_RPT_TYPE_CYSTA, BTC_RPT_TYPE_STEP, BTC_RPT_TYPE_NULLSTA, + BTC_RPT_TYPE_FDDT, /* added by ver->fwevntrptl == 1 */ BTC_RPT_TYPE_MREG, BTC_RPT_TYPE_GPIO_DBG, BTC_RPT_TYPE_BT_VER, @@ -2650,7 +2913,10 @@ enum btf_fw_event_report { BTC_RPT_TYPE_BT_AFH, BTC_RPT_TYPE_BT_DEVICE, BTC_RPT_TYPE_TEST, - BTC_RPT_TYPE_MAX = 31 + BTC_RPT_TYPE_MAX = 31, + + __BTC_RPT_TYPE_V0_SAME = BTC_RPT_TYPE_NULLSTA, + __BTC_RPT_TYPE_V0_MAX = 12, }; enum rtw_btc_btf_reg_type { @@ -2691,7 +2957,7 @@ struct rtw89_btc_rpt_fbtc_tdma { struct rtw89_btc_rpt_fbtc_slots { struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ - struct rtw89_btc_fbtc_slots finfo; /* info from fw */ + union rtw89_btc_fbtc_slots_info finfo; /* info from fw */ }; struct rtw89_btc_rpt_fbtc_cysta { @@ -2716,12 +2982,12 @@ struct rtw89_btc_rpt_fbtc_mreg { struct rtw89_btc_rpt_fbtc_gpio_dbg { struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ - struct rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */ + union rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */ }; struct rtw89_btc_rpt_fbtc_btver { struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */ - struct rtw89_btc_fbtc_btver finfo; /* info from fw */ + union rtw89_btc_fbtc_btver finfo; /* info from fw */ }; struct rtw89_btc_rpt_fbtc_btscan { @@ -2792,6 +3058,7 @@ struct rtw89_btc_ver { u8 fcxctrl; u8 fcxinit; + u8 fwevntrptl; u8 drvinfo_type; u16 info_buf; u8 max_role_num; @@ -2821,6 +3088,7 @@ struct rtw89_btc { u8 btg_pos; u16 policy_len; u16 policy_type; + u32 hubmsg_cnt; bool bt_req_en; bool update_policy_force; bool lps; @@ -3122,6 +3390,7 @@ struct rtw89_vif { u8 port; u8 mac_addr[ETH_ALEN]; u8 bssid[ETH_ALEN]; + __be32 ip_addr; u8 phy_idx; u8 mac_idx; u8 net_type; @@ -3879,6 +4148,7 @@ struct rtw89_chip_info { u8 support_bands; u16 support_bandwidths; bool support_unii4; + bool support_rnr; bool ul_tb_waveform_ctrl; bool ul_tb_pwr_diff; bool hw_sec_hdr; @@ -3977,6 +4247,7 @@ union rtw89_bus_info { struct rtw89_driver_info { const struct rtw89_chip_info *chip; + const struct dmi_system_id *quirks; union rtw89_bus_info bus; }; @@ -4324,6 +4595,12 @@ enum rtw89_flags { NUM_OF_RTW89_FLAGS, }; +enum rtw89_quirks { + RTW89_QUIRK_PCI_BER, + + NUM_OF_RTW89_QUIRKS, +}; + enum rtw89_pkt_drop_sel { RTW89_PKT_DROP_SEL_MACID_BE_ONCE, RTW89_PKT_DROP_SEL_MACID_BK_ONCE, @@ -4641,11 +4918,15 @@ struct rtw89_regd { }; #define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX +#define RTW89_5GHZ_UNII4_CHANNEL_NUM 3 +#define RTW89_5GHZ_UNII4_START_INDEX 25 struct rtw89_regulatory_info { const struct rtw89_regd *regd; enum rtw89_reg_6ghz_power reg_6ghz_power; + DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM); + DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM); }; enum rtw89_ifs_clm_application { @@ -4925,11 +5206,61 @@ struct rtw89_wow_cam_info { bool valid; }; +struct rtw89_wow_key_info { + u8 ptk_tx_iv[8]; + u8 valid_check; + u8 symbol_check_en; + u8 gtk_keyidx; + u8 rsvd[5]; + u8 ptk_rx_iv[8]; + u8 gtk_rx_iv[4][8]; +} __packed; + +struct rtw89_wow_gtk_info { + u8 kck[32]; + u8 kek[32]; + u8 tk1[16]; + u8 txmickey[8]; + u8 rxmickey[8]; + __le32 igtk_keyid; + __le64 ipn; + u8 igtk[2][32]; + u8 psk[32]; +} __packed; + +struct rtw89_wow_aoac_report { + u8 rpt_ver; + u8 sec_type; + u8 key_idx; + u8 pattern_idx; + u8 rekey_ok; + u8 ptk_tx_iv[8]; + u8 eapol_key_replay_count[8]; + u8 gtk[32]; + u8 ptk_rx_iv[8]; + u8 gtk_rx_iv[4][8]; + u64 igtk_key_id; + u64 igtk_ipn; + u8 igtk[32]; + u8 csa_pri_ch; + u8 csa_bw; + u8 csa_ch_offset; + u8 csa_chsw_failed; + u8 csa_ch_band; +}; + struct rtw89_wow_param { struct ieee80211_vif *wow_vif; DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM); struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM]; + struct rtw89_wow_key_info key_info; + struct rtw89_wow_gtk_info gtk_info; + struct rtw89_wow_aoac_report aoac_rpt; u8 pattern_cnt; + u8 ptk_alg; + u8 gtk_alg; + u8 ptk_keyidx; + u8 akm; }; struct rtw89_mcc_limit { @@ -5084,6 +5415,7 @@ struct rtw89_dev { DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM); DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS); DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM); + DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS); struct rtw89_phy_stat phystat; struct rtw89_rfk_wait_info rfk_wait; @@ -6129,6 +6461,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, struct cfg80211_tid_config *tid_config); +void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks); int rtw89_core_init(struct rtw89_dev *rtwdev); void rtw89_core_deinit(struct rtw89_dev *rtwdev); int rtw89_core_register(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 185cd339c085..044a5b90c7f4 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include <linux/if_arp.h> #include "cam.h" #include "chan.h" #include "coex.h" @@ -13,6 +14,30 @@ #include "reg.h" #include "util.h" +struct rtw89_eapol_2_of_2 { + struct ieee80211_hdr_3addr hdr; + u8 gtkbody[14]; + u8 key_des_ver; + u8 rsvd[92]; +} __packed __aligned(2); + +struct rtw89_sa_query { + struct ieee80211_hdr_3addr hdr; + u8 category; + u8 action; +} __packed __aligned(2); + +struct rtw89_arp_rsp { + struct ieee80211_hdr_3addr addr; + u8 llc_hdr[sizeof(rfc1042_header)]; + __be16 llc_type; + struct arphdr arp_hdr; + u8 sender_hw[ETH_ALEN]; + __be32 sender_ip; + u8 target_hw[ETH_ALEN]; + __be32 target_ip; +} __packed __aligned(2); + static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; union rtw89_fw_element_arg { @@ -637,6 +662,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -1349,13 +1375,12 @@ dump: static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) { u32 val32; - u16 val16; val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); - val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); - rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); + val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); + rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); rtw89_fw_prog_cnt_dump(rtwdev); } @@ -1394,8 +1419,9 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, return 0; } -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, - bool include_bb) +static +int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; struct rtw89_fw_info *fw_info = &rtwdev->fw; @@ -1433,7 +1459,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); if (ret) { rtw89_warn(rtwdev, "download firmware fail\n"); - return ret; + goto fwdl_err; } return ret; @@ -1443,6 +1469,21 @@ fwdl_err: return ret; } +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, + bool include_bb) +{ + int retry; + int ret; + + for (retry = 0; retry < 5; retry++) { + ret = __rtw89_fw_download(rtwdev, type, include_bb); + if (!ret) + return 0; + } + + return ret; +} + int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) { struct rtw89_fw_info *fw = &rtwdev->fw; @@ -1710,28 +1751,30 @@ fail: return ret; } -#define H2C_DCTL_SEC_CAM_LEN 68 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, struct rtw89_sta *rtwsta) { + struct rtw89_h2c_dctlinfo_ud_v1 *h2c; + u32 len = sizeof(*h2c); struct sk_buff *skb; int ret; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); return -ENOMEM; } - skb_put(skb, H2C_DCTL_SEC_CAM_LEN); + skb_put(skb, len); + h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; - rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); + rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, - H2C_DCTL_SEC_CAM_LEN); + len); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { @@ -2129,6 +2172,111 @@ fail: return ret; } +static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, + 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_eapol_2_of_2 *eapol_pkt; + struct sk_buff *skb; + u8 key_des_ver; + + if (rtw_wow->ptk_alg == 3) + key_des_ver = 1; + else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) + key_des_ver = 2; + else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) + key_des_ver = 3; + else + key_des_ver = 0; + + skb = dev_alloc_skb(sizeof(*eapol_pkt)); + if (!skb) + return NULL; + + eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); + eapol_pkt->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_FCTL_TODS | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(eapol_pkt->hdr.addr1, bss_conf->bssid); + ether_addr_copy(eapol_pkt->hdr.addr2, vif->addr); + ether_addr_copy(eapol_pkt->hdr.addr3, bss_conf->bssid); + memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); + eapol_pkt->key_des_ver = key_des_ver; + + return skb; +} + +static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct rtw89_sa_query *sa_query; + struct sk_buff *skb; + + skb = dev_alloc_skb(sizeof(*sa_query)); + if (!skb) + return NULL; + + sa_query = skb_put_zero(skb, sizeof(*sa_query)); + sa_query->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(sa_query->hdr.addr1, bss_conf->bssid); + ether_addr_copy(sa_query->hdr.addr2, vif->addr); + ether_addr_copy(sa_query->hdr.addr3, bss_conf->bssid); + sa_query->category = WLAN_CATEGORY_SA_QUERY; + sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; + + return skb; +} + +static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_arp_rsp *arp_skb; + struct arphdr *arp_hdr; + struct sk_buff *skb; + __le16 fc; + + skb = dev_alloc_skb(sizeof(struct rtw89_arp_rsp)); + if (!skb) + return NULL; + + arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); + + if (rtw_wow->ptk_alg) + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | + IEEE80211_FCTL_PROTECTED); + else + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); + + arp_skb->addr.frame_control = fc; + ether_addr_copy(arp_skb->addr.addr1, rtwvif->bssid); + ether_addr_copy(arp_skb->addr.addr2, rtwvif->mac_addr); + ether_addr_copy(arp_skb->addr.addr3, rtwvif->bssid); + + memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); + arp_skb->llc_type = htons(ETH_P_ARP); + + arp_hdr = &arp_skb->arp_hdr; + arp_hdr->ar_hrd = htons(ARPHRD_ETHER); + arp_hdr->ar_pro = htons(ETH_P_IP); + arp_hdr->ar_hln = ETH_ALEN; + arp_hdr->ar_pln = 4; + arp_hdr->ar_op = htons(ARPOP_REPLY); + + ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr); + arp_skb->sender_ip = rtwvif->ip_addr; + + return skb; +} + static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, enum rtw89_fw_pkt_ofld_type type, @@ -2156,6 +2304,15 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, case RTW89_PKT_OFLD_TYPE_QOS_NULL: skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); break; + case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: + skb = rtw89_eapol_get(rtwdev, rtwvif); + break; + case RTW89_PKT_OFLD_TYPE_SA_QUERY: + skb = rtw89_sa_query_get(rtwdev, rtwvif); + break; + case RTW89_PKT_OFLD_TYPE_ARP_RSP: + skb = rtw89_arp_response_get(rtwdev, rtwvif); + break; default: goto err; } @@ -4120,6 +4277,48 @@ fail: return ret; } +int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) +{ + struct rtw89_btc *btc = &rtwdev->btc; + struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; + struct rtw89_h2c_cxrole_v8 *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + int ret; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); + return -ENOMEM; + } + skb_put(skb, len); + h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; + + h2c->hdr.type = type; + h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; + memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); + h2c->_u32.role_map = cpu_to_le32(role->role_map); + h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); + h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_OUTSRC, BTFC_SET, + SET_DRV_INFO, 0, 0, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} + #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) { @@ -4631,6 +4830,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, u8 i, idx; sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; + if (!sband) { + option->prohib_chan = U64_MAX; + return; + } for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; @@ -4650,6 +4853,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, struct rtw89_h2c_scanofld_be_macc_role *macc_role; struct rtw89_chan *op = &scan_info->op_chan; struct rtw89_h2c_scanofld_be_opch *opch; + struct rtw89_pktofld_info *pkt_info; struct rtw89_h2c_scanofld_be *h2c; struct sk_buff *skb; u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; @@ -4674,6 +4878,16 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, h2c = (struct rtw89_h2c_scanofld_be *)skb->data; ptr = skb->data; + memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); + + list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { + if (pkt_info->wildcard_6ghz) { + /* Provide wildcard as template */ + probe_id[NL80211_BAND_6GHZ] = pkt_info->id; + break; + } + } + h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | le32_encode_bits(option->scan_mode, RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | @@ -5511,6 +5725,7 @@ static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, info->ssid_len = req->ssids[ssid_idx].ssid_len; return false; } else { + info->wildcard_6ghz = true; return true; } } @@ -5545,12 +5760,8 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, goto out; } - if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, - ssid_idx)) { - kfree_skb(new); - kfree(info); - goto out; - } + rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, + ssid_idx); ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); if (ret) { @@ -5708,6 +5919,10 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, continue; else if (info->channel_6ghz && probe_count != 0) ch_info->period += RTW89_CHANNEL_TIME_6G; + + if (info->wildcard_6ghz) + continue; + ch_info->pkt_id[probe_count++] = info->id; if (probe_count >= RTW89_SCANOFLD_MAX_SSID) break; @@ -5762,6 +5977,10 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, if (info->channel_6ghz && ch_info->pri_ch != info->channel_6ghz) continue; + + if (info->wildcard_6ghz) + continue; + ch_info->pkt_id[probe_count++] = info->id; if (probe_count >= RTW89_SCANOFLD_MAX_SSID) break; @@ -6228,6 +6447,57 @@ fail: return ret; } +int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool enable) +{ + struct rtw89_h2c_arp_offload *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + u8 pkt_id = 0; + int ret; + + if (enable) { + ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, + RTW89_PKT_OFLD_TYPE_ARP_RSP, + &pkt_id); + if (ret) + return ret; + } + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); + return -ENOMEM; + } + + skb_put(skb, len); + h2c = (struct rtw89_h2c_arp_offload *)skb->data; + + h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | + le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | + le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | + le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_WOW, + H2C_FUNC_ARP_OFLD, 0, 1, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; + +fail: + dev_kfree_skb_any(skb); + + return ret; +} + #define H2C_DISCONNECT_DETECT_LEN 8 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable) @@ -6273,30 +6543,38 @@ fail: return ret; } -#define H2C_WOW_GLOBAL_LEN 8 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable) { - struct sk_buff *skb; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_h2c_wow_global *h2c; u8 macid = rtwvif->mac_id; + u32 len = sizeof(*h2c); + struct sk_buff *skb; int ret; - skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { - rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); + rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); return -ENOMEM; } - skb_put(skb, H2C_WOW_GLOBAL_LEN); + skb_put(skb, len); + h2c = (struct rtw89_h2c_wow_global *)skb->data; - RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); - RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); + h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | + le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | + le32_encode_bits(rtw_wow->ptk_alg, + RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | + le32_encode_bits(rtw_wow->gtk_alg, + RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); + h2c->key_info = rtw_wow->key_info; rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_WOW, H2C_FUNC_WOW_GLOBAL, 0, 1, - H2C_WOW_GLOBAL_LEN); + len); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { @@ -6324,7 +6602,7 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); if (!skb) { - rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); + rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); return -ENOMEM; } @@ -6411,6 +6689,112 @@ fail: return ret; } +int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + bool enable) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; + struct rtw89_h2c_wow_gtk_ofld *h2c; + u8 macid = rtwvif->mac_id; + u32 len = sizeof(*h2c); + u8 pkt_id_sa_query = 0; + struct sk_buff *skb; + u8 pkt_id_eapol = 0; + int ret; + + if (!rtw_wow->gtk_alg) + return 0; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); + return -ENOMEM; + } + + skb_put(skb, len); + h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; + + if (!enable) { + skb_put_zero(skb, sizeof(*gtk_info)); + goto hdr; + } + + ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, + RTW89_PKT_OFLD_TYPE_EAPOL_KEY, + &pkt_id_eapol); + if (ret) + goto fail; + + if (gtk_info->igtk_keyid) { + ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, + RTW89_PKT_OFLD_TYPE_SA_QUERY, + &pkt_id_sa_query); + if (ret) + goto fail; + } + + /* not support TKIP yet */ + h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | + le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | + le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, + RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | + le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | + le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); + h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, + RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | + le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); + h2c->gtk_info = rtw_wow->gtk_info; + +hdr: + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_WOW, + H2C_FUNC_GTK_OFLD, 0, 1, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + return 0; + +fail: + dev_kfree_skb_any(skb); + + return ret; +} + +int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) +{ + struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct rtw89_h2c_wow_aoac *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + unsigned int cond; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); + return -ENOMEM; + } + + skb_put(skb, len); + + /* This H2C only nofity firmware to generate AOAC report C2H, + * no need any parameter. + */ + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, + H2C_CL_MAC_WOW, + H2C_FUNC_AOAC_REPORT_REQ, 1, 0, + len); + + cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); + return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); +} + /* Return < 0, if failures happen during waiting for the condition. * Return 0, when waiting for the condition succeeds. * Return > 0, if the wait is considered unreachable due to driver/FW design, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 44311f65b4fa..4151c9d566bd 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -48,6 +48,32 @@ struct rtw89_c2hreg_phycap { #define RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM GENMASK(15, 8) #define RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_0 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_1 GENMASK(15, 8) +#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_2 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_3 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_4 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_5 GENMASK(15, 8) +#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_6 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_7 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1 GENMASK(15, 8) +#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7 GENMASK(15, 8) +#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3 GENMASK(15, 8) +#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4 GENMASK(23, 16) +#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5 GENMASK(31, 24) +#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6 GENMASK(7, 0) +#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7 GENMASK(15, 8) + struct rtw89_h2creg_hdr { u32 w0; }; @@ -98,8 +124,11 @@ enum rtw89_mac_h2c_type { RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE, RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM, RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN, - RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6, - RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA, + RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP, + RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1, + RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2, + RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_3_REQ, + RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL, }; enum rtw89_mac_c2h_type { @@ -340,8 +369,9 @@ struct rtw89_mac_chinfo_be { struct rtw89_pktofld_info { struct list_head list; u8 id; + bool wildcard_6ghz; - /* Below fields are for 6 GHz RNR use only */ + /* Below fields are for WiFi 6 chips 6 GHz RNR use only */ u8 ssid[IEEE80211_MAX_SSID_LEN]; u8 ssid_len; u8 bssid[ETH_ALEN]; @@ -1432,308 +1462,6 @@ struct rtw89_h2c_cctlinfo_ud_g7 { #define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16) #define CCTLINFO_G7_W15_ALL GENMASK(27, 0) -static inline void SET_DCTL_MACID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)); -} - -static inline void SET_DCTL_OPERATION_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7)); -} - -#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0) -static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0)); - le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1, - GENMASK(7, 0)); -} - -#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0) -static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8)); - le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID, - GENMASK(14, 8)); -} - -#define SET_DCTL_MASK_QOS_DATA BIT(0) -static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); - le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA, - BIT(15)); -} - -#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0) -static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16)); - le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L, - GENMASK(31, 16)); -} - -#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0) -static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0)); - le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H, - GENMASK(31, 0)); -} - -#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0) -static inline void SET_DCTL_SEQ0_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0, - GENMASK(11, 0)); -} - -#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0) -static inline void SET_DCTL_SEQ1_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1, - GENMASK(23, 12)); -} - -#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0) -static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN, - GENMASK(26, 24)); -} - -#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0) -static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN, - BIT(27)); -} - -#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0) -static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN, - BIT(28)); -} - -#define SET_DCTL_MASK_WITH_LLC BIT(0) -static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29)); - le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC, - BIT(29)); -} - -#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0) -static inline void SET_DCTL_SEQ2_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0)); - le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2, - GENMASK(11, 0)); -} - -#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0) -static inline void SET_DCTL_SEQ3_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12)); - le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3, - GENMASK(23, 12)); -} - -#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0) -static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24)); - le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND, - GENMASK(27, 24)); -} - -#define SET_DCTL_MASK_TGT_IND_EN BIT(0) -static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28)); - le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN, - BIT(28)); -} - -#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0) -static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29)); - le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB, - GENMASK(31, 29)); -} - -#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0) -static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN, - GENMASK(4, 0)); -} - -#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0) -static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID, - BIT(5)); -} - -#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0) -static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL, - GENMASK(7, 6)); -} - -#define SET_DCTL_MASK_HTC_ORDER BIT(0) -static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER, - BIT(8)); -} - -#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0) -static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID, - GENMASK(10, 9)); -} - -#define SET_DCTL_MASK_WAPI BIT(0) -static inline void SET_DCTL_WAPI_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI, - BIT(15)); -} - -#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0) -static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE, - GENMASK(17, 16)); -} - -#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0) -static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(19, 18)); -} - -static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(21, 20)); -} - -static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(23, 22)); -} - -static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(25, 24)); -} - -static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(27, 26)); -} - -static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(29, 28)); -} - -static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30)); - le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID, - GENMASK(31, 30)); -} - -#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0) -static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0)); - le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID, - GENMASK(7, 0)); -} - -#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0) -static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8)); - le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, - GENMASK(15, 8)); -} - -static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16)); - le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, - GENMASK(23, 16)); -} - -static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24)); - le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX, - GENMASK(31, 24)); -} - -static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); - le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, - GENMASK(7, 0)); -} - -static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8)); - le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, - GENMASK(15, 8)); -} - -static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16)); - le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, - GENMASK(23, 16)); -} - -static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val) -{ - le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24)); - le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX, - GENMASK(31, 24)); -} - struct rtw89_h2c_bcn_upd { __le32 w0; __le32 w1; @@ -2157,45 +1885,18 @@ static inline void RTW89_SET_DISCONNECT_DETECT_TRYOK_BCNFAIL_COUNT_LIMIT(void *h le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0)); } -static inline void RTW89_SET_WOW_GLOBAL_ENABLE(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, BIT(0)); -} - -static inline void RTW89_SET_WOW_GLOBAL_DROP_ALL_PKT(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, BIT(1)); -} - -static inline void RTW89_SET_WOW_GLOBAL_RX_PARSE_AFTER_WAKE(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, BIT(2)); -} - -static inline void RTW89_SET_WOW_GLOBAL_WAKE_BAR_PULLED(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, BIT(3)); -} - -static inline void RTW89_SET_WOW_GLOBAL_MAC_ID(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8)); -} - -static inline void RTW89_SET_WOW_GLOBAL_PAIRWISE_SEC_ALGO(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16)); -} - -static inline void RTW89_SET_WOW_GLOBAL_GROUP_SEC_ALGO(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24)); -} +struct rtw89_h2c_wow_global { + __le32 w0; + struct rtw89_wow_key_info key_info; +} __packed; -static inline void RTW89_SET_WOW_GLOBAL_REMOTECTRL_INFO_CONTENT(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0)); -} +#define RTW89_H2C_WOW_GLOBAL_W0_ENABLE BIT(0) +#define RTW89_H2C_WOW_GLOBAL_W0_DROP_ALL_PKT BIT(1) +#define RTW89_H2C_WOW_GLOBAL_W0_RX_PARSE_AFTER_WAKE BIT(2) +#define RTW89_H2C_WOW_GLOBAL_W0_WAKE_BAR_PULLED BIT(3) +#define RTW89_H2C_WOW_GLOBAL_W0_MAC_ID GENMASK(15, 8) +#define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16) +#define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24) static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val) { @@ -2307,6 +2008,34 @@ static inline void RTW89_SET_WOW_CAM_UPD_VALID(void *h2c, u32 val) le32p_replace_bits((__le32 *)h2c + 5, val, BIT(31)); } +struct rtw89_h2c_wow_gtk_ofld { + __le32 w0; + __le32 w1; + struct rtw89_wow_gtk_info gtk_info; +} __packed; + +#define RTW89_H2C_WOW_GTK_OFLD_W0_EN BIT(0) +#define RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN BIT(1) +#define RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN BIT(2) +#define RTW89_H2C_WOW_GTK_OFLD_W0_PAIRWISE_WAKEUP BIT(3) +#define RTW89_H2C_WOW_GTK_OFLD_W0_NOREKEY_WAKEUP BIT(4) +#define RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID GENMASK(23, 16) +#define RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID GENMASK(31, 24) +#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID GENMASK(7, 0) +#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_BIP_SEC_ALGO GENMASK(9, 8) +#define RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT GENMASK(17, 10) + +struct rtw89_h2c_arp_offload { + __le32 w0; + __le32 w1; +} __packed; + +#define RTW89_H2C_ARP_OFFLOAD_W0_ENABLE BIT(0) +#define RTW89_H2C_ARP_OFFLOAD_W0_ACTION BIT(1) +#define RTW89_H2C_ARP_OFFLOAD_W0_MACID GENMASK(23, 16) +#define RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID GENMASK(31, 24) +#define RTW89_H2C_ARP_OFFLOAD_W1_CONTENT GENMASK(31, 0) + enum rtw89_btc_btf_h2c_class { BTFC_SET = 0x10, BTFC_GET = 0x11, @@ -2395,6 +2124,32 @@ struct rtw89_h2c_cxctrl_v7 { #define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr) #define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7) +struct rtw89_btc_wl_role_info_v8_u8 { + u8 connect_cnt; + u8 link_mode; + u8 link_mode_chg; + u8 p2p_2g; + + u8 pta_req_band; + u8 dbcc_en; + u8 dbcc_chg; + u8 dbcc_2g_phy; + + struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM]; +} __packed; + +struct rtw89_btc_wl_role_info_v8_u32 { + __le32 role_map; + __le32 mrole_type; + __le32 mrole_noa_duration; +} __packed; + +struct rtw89_h2c_cxrole_v8 { + struct rtw89_h2c_cxhdr hdr; + struct rtw89_btc_wl_role_info_v8_u8 _u8; + struct rtw89_btc_wl_role_info_v8_u32 _u32; +} __packed; + struct rtw89_h2c_cxinit { struct rtw89_h2c_cxhdr hdr; u8 ant_type; @@ -2955,6 +2710,7 @@ struct rtw89_h2c_scanofld_be { __le32 w5; __le32 w6; __le32 w7; + __le32 w8; struct rtw89_h2c_scanofld_be_macc_role role[]; } __packed; @@ -3636,6 +3392,10 @@ struct rtw89_h2c_mrc_upd_duration { #define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0) #define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16) +struct rtw89_h2c_wow_aoac { + __le32 w0; +} __packed; + #define RTW89_C2H_HEADER_LEN 8 struct rtw89_c2h_hdr { @@ -3864,6 +3624,30 @@ struct rtw89_c2h_pkt_ofld_rsp { #define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP GENMASK(10, 8) #define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN GENMASK(31, 16) +struct rtw89_c2h_wow_aoac_report { + struct rtw89_c2h_hdr c2h_hdr; + u8 rpt_ver; + u8 sec_type; + u8 key_idx; + u8 pattern_idx; + u8 rekey_ok; + u8 rsvd1[3]; + u8 ptk_tx_iv[8]; + u8 eapol_key_replay_count[8]; + u8 gtk[32]; + u8 ptk_rx_iv[8]; + u8 gtk_rx_iv[4][8]; + __le64 igtk_key_id; + __le64 igtk_ipn; + u8 igtk[32]; + u8 csa_pri_ch; + u8 csa_bw_ch_offset; + u8 csa_ch_band_chsw_failed; + u8 csa_rsvd1; +} __packed; + +#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0) + struct rtw89_h2c_bcnfltr { __le32 w0; } __packed; @@ -4141,11 +3925,21 @@ struct rtw89_fw_h2c_rf_reg_info { /* CLASS 1 - WOW */ #define H2C_CL_MAC_WOW 0x1 -#define H2C_FUNC_KEEP_ALIVE 0x0 -#define H2C_FUNC_DISCONNECT_DETECT 0x1 -#define H2C_FUNC_WOW_GLOBAL 0x2 -#define H2C_FUNC_WAKEUP_CTRL 0x8 -#define H2C_FUNC_WOW_CAM_UPD 0xC +enum rtw89_wow_h2c_func { + H2C_FUNC_KEEP_ALIVE = 0x0, + H2C_FUNC_DISCONNECT_DETECT = 0x1, + H2C_FUNC_WOW_GLOBAL = 0x2, + H2C_FUNC_GTK_OFLD = 0x3, + H2C_FUNC_ARP_OFLD = 0x4, + H2C_FUNC_WAKEUP_CTRL = 0x8, + H2C_FUNC_WOW_CAM_UPD = 0xC, + H2C_FUNC_AOAC_REPORT_REQ = 0xD, + + NUM_OF_RTW89_WOW_H2C_FUNC, +}; + +#define RTW89_WOW_WAIT_COND(func) \ + (NUM_OF_RTW89_WOW_H2C_FUNC + (func)) /* CLASS 2 - PS */ #define H2C_CL_MAC_PS 0x2 @@ -4568,6 +4362,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type); +int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type); int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type); @@ -4653,6 +4448,8 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); +int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, @@ -4661,6 +4458,10 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool enable); int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, struct rtw89_wow_cam_info *cam_info); +int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, + bool enable); +int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev); int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, const struct rtw89_fw_mcc_add_req *p); int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index aa5b396b5d2b..3fe0046f6eaa 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -3644,6 +3644,7 @@ static int set_host_rpr_ax(struct rtw89_dev *rtwdev) static int trx_init_ax(struct rtw89_dev *rtwdev) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; int ret; @@ -3687,6 +3688,10 @@ static int trx_init_ax(struct rtw89_dev *rtwdev) return ret; } + if (chip_id == RTL8852C) + rtw89_write32_clr(rtwdev, R_AX_RSP_CHK_SIG, + B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN); + return 0; } @@ -5132,6 +5137,37 @@ rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len } static void +rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + const struct rtw89_c2h_wow_aoac_report *c2h = + (const struct rtw89_c2h_wow_aoac_report *)skb->data; + struct rtw89_completion_data data = {}; + unsigned int cond; + + aoac_rpt->rpt_ver = c2h->rpt_ver; + aoac_rpt->sec_type = c2h->sec_type; + aoac_rpt->key_idx = c2h->key_idx; + aoac_rpt->pattern_idx = c2h->pattern_idx; + aoac_rpt->rekey_ok = u8_get_bits(c2h->rekey_ok, + RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX); + memcpy(aoac_rpt->ptk_tx_iv, c2h->ptk_tx_iv, sizeof(aoac_rpt->ptk_tx_iv)); + memcpy(aoac_rpt->eapol_key_replay_count, c2h->eapol_key_replay_count, + sizeof(aoac_rpt->eapol_key_replay_count)); + memcpy(aoac_rpt->gtk, c2h->gtk, sizeof(aoac_rpt->gtk)); + memcpy(aoac_rpt->ptk_rx_iv, c2h->ptk_rx_iv, sizeof(aoac_rpt->ptk_rx_iv)); + memcpy(aoac_rpt->gtk_rx_iv, c2h->gtk_rx_iv, sizeof(aoac_rpt->gtk_rx_iv)); + aoac_rpt->igtk_key_id = le64_to_cpu(c2h->igtk_key_id); + aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn); + memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk)); + + cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); + rtw89_complete_cond(wait, cond, &data); +} + +static void rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { struct rtw89_wait_info *wait = &rtwdev->mcc.wait; @@ -5218,6 +5254,12 @@ void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev, [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt, }; +static +void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev, + struct sk_buff *c2h, u32 len) = { + [RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt, +}; + static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev, struct sk_buff *skb) { @@ -5270,6 +5312,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h, return true; case RTW89_MAC_C2H_CLASS_MRC: return true; + case RTW89_MAC_C2H_CLASS_WOW: + return true; } } @@ -5296,6 +5340,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC) handler = rtw89_mac_c2h_mrc_handler[func]; break; + case RTW89_MAC_C2H_CLASS_WOW: + if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW) + handler = rtw89_mac_c2h_wow_handler[func]; + break; case RTW89_MAC_C2H_CLASS_FWDBG: return; default: @@ -5705,7 +5753,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) const struct rtw89_chip_info *chip = rtwdev->chip; u8 val = 0; - if (chip->chip_id == RTL8852C) + if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) return false; else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 6fb457153a11..a580cb719233 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -419,6 +419,13 @@ enum rtw89_mac_c2h_mrc_func { NUM_OF_RTW89_MAC_C2H_FUNC_MRC, }; +enum rtw89_mac_c2h_wow_func { + RTW89_MAC_C2H_FUNC_AOAC_REPORT, + RTW89_MAC_C2H_FUNC_READ_WOW_CAM, + + NUM_OF_RTW89_MAC_C2H_FUNC_WOW, +}; + enum rtw89_mac_c2h_class { RTW89_MAC_C2H_CLASS_INFO = 0x0, RTW89_MAC_C2H_CLASS_OFLD = 0x1, diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 31d1ffb16e83..1ec97250e88e 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev, u8 sifs; slot_time = vif->bss_conf.use_short_slot ? 9 : 20; - sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10; + sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16; return aifsn * slot_time + sifs; } @@ -473,6 +473,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) rtw89_recalc_lps(rtwdev); + if (changed & BSS_CHANGED_ARP_FILTER) + rtwvif->ip_addr = vif->cfg.arp_addr_list[0]; + mutex_unlock(&rtwdev->mutex); } @@ -1106,6 +1109,28 @@ static void rtw89_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled) device_set_wakeup_enable(rtwdev->dev, enabled); } + +static void rtw89_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; + + if (data->kek_len > sizeof(gtk_info->kek) || + data->kck_len > sizeof(gtk_info->kck)) { + rtw89_warn(rtwdev, "kek or kck length over fw limit\n"); + return; + } + + mutex_lock(&rtwdev->mutex); + + memcpy(gtk_info->kek, data->kek, data->kek_len); + memcpy(gtk_info->kck, data->kck, data->kck_len); + + mutex_unlock(&rtwdev->mutex); +} #endif const struct ieee80211_ops rtw89_ops = { @@ -1151,6 +1176,7 @@ const struct ieee80211_ops rtw89_ops = { .suspend = rtw89_ops_suspend, .resume = rtw89_ops_resume, .set_wakeup = rtw89_ops_set_wakeup, + .set_rekey_data = rtw89_set_rekey_data, #endif }; EXPORT_SYMBOL(rtw89_ops); diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index f16467377eab..934bdf3b398f 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -1751,6 +1751,7 @@ static int set_host_rpr_be(struct rtw89_dev *rtwdev) static int trx_init_be(struct rtw89_dev *rtwdev) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; int ret; @@ -1794,6 +1795,10 @@ static int trx_init_be(struct rtw89_dev *rtwdev) return ret; } + if (chip_id == RTL8922A) + rtw89_write32_clr(rtwdev, R_BE_RSP_CHK_SIG, + B_BE_RSP_STATIC_RTS_CHK_SERV_BW_EN); + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 19001130ad94..7b00476a5dee 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -19,6 +19,31 @@ MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); +static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, + u32 *phy_offset) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + struct pci_dev *pdev = rtwpci->pdev; + u32 val; + int ret; + + ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); + if (ret) + return ret; + + val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); + if (val == RTW89_PCIE_GEN1_SPEED) { + *phy_offset = R_RAC_DIRECT_OFFSET_G1; + } else if (val == RTW89_PCIE_GEN2_SPEED) { + *phy_offset = R_RAC_DIRECT_OFFSET_G2; + } else { + rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); + return -EFAULT; + } + + return 0; +} + static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) { u32 val; @@ -1089,7 +1114,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, spin_lock_bh(&rtwpci->trx_lock); cnt = rtw89_pci_get_avail_txbd_num(tx_ring); - cnt = min(cnt, wd_ring->curr_num); + if (txch != RTW89_TXCH_CH12) + cnt = min(cnt, wd_ring->curr_num); spin_unlock_bh(&rtwpci->trx_lock); return cnt; @@ -2298,6 +2324,68 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) return 0; } +static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) +{ + u16 g1_oobs, g2_oobs; + u32 backup_aspm; + u32 phy_offset; + u16 oobs_val; + u16 val16; + int ret; + + if (rtwdev->chip->chip_id != RTL8852C) + return; + + backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); + rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); + + g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); + g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); + if (g1_oobs && g2_oobs) + goto out; + + ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); + if (ret) + goto out; + + rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); + rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); + rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); + + val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, + OOBS_LEVEL_MASK); + oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK); + + rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, + BAC_OOBS_SEL); + + rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, + BAC_OOBS_SEL); + +out: + rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); +} + +static void rtw89_pci_ber(struct rtw89_dev *rtwdev) +{ + u32 phy_offset; + + if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) + return; + + phy_offset = R_RAC_DIRECT_OFFSET_G1; + rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); + rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); + + phy_offset = R_RAC_DIRECT_OFFSET_G2; + rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); + rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); +} + static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) { if (rtwdev->chip->chip_id != RTL8852A) @@ -2695,6 +2783,8 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; + rtw89_pci_disable_eq(rtwdev); + rtw89_pci_ber(rtwdev); rtw89_pci_rxdma_prefth(rtwdev); rtw89_pci_l1off_pwroff(rtwdev); rtw89_pci_deglitch_setting(rtwdev); @@ -4171,6 +4261,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; + rtw89_check_quirks(rtwdev, info->quirks); + SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); ret = rtw89_core_init(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index a63b6b7c9bfa..7666753ae983 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -12,11 +12,18 @@ #define MDIO_PG0_G2 2 #define MDIO_PG1_G2 3 #define RAC_CTRL_PPR 0x00 +#define RAC_ANA03 0x03 +#define OOBS_SEN_MASK GENMASK(5, 1) +#define RAC_ANA09 0x09 +#define BAC_OOBS_SEL BIT(4) #define RAC_ANA0A 0x0A #define B_BAC_EQ_SEL BIT(5) #define RAC_ANA0C 0x0C #define B_PCIE_BIT_PSAVE BIT(15) +#define RAC_ANA0D 0x0D +#define BAC_RX_TEST_EN BIT(6) #define RAC_ANA10 0x10 +#define ADDR_SEL_PINOUT_DIS_VAL 0x3C4 #define B_PCIE_BIT_PINOUT_DIS BIT(3) #define RAC_REG_REV2 0x1B #define BAC_CMU_EN_DLY_MASK GENMASK(15, 12) @@ -26,11 +33,17 @@ #define RAC_REG_FLD_0 0x1D #define BAC_AUTOK_N_MASK GENMASK(3, 2) #define PCIE_AUTOK_4 0x3 +#define RAC_ANA1E 0x1E +#define RAC_ANA1E_G1_VAL 0x66EA +#define RAC_ANA1E_G2_VAL 0x6EEA #define RAC_ANA1F 0x1F +#define OOBS_LEVEL_MASK GENMASK(12, 8) #define RAC_ANA24 0x24 #define B_AX_DEGLITCH GENMASK(11, 8) #define RAC_ANA26 0x26 #define B_AX_RXEN GENMASK(15, 14) +#define RAC_ANA2E 0x2E +#define RAC_ANA2E_VAL 0xFFFE #define RAC_CTRL_PPR_V1 0x30 #define B_AX_CLK_CALIB_EN BIT(12) #define B_AX_CALIB_EN BIT(13) diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 12da63d64307..a82b4c56a6f4 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -122,6 +122,7 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta) struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; + u8 *he_phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info; switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: @@ -132,15 +133,19 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta) mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; /* MCS 9, 11, 13 */ return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); + case IEEE80211_STA_RX_BW_20: + if (!(he_phy_cap[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; + /* MCS 7, 9, 11, 13 */ + return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); + } + fallthrough; case IEEE80211_STA_RX_BW_80: default: mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; /* MCS 9, 11, 13 */ return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); - case IEEE80211_STA_RX_BW_20: - mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; - /* MCS 7, 9, 11, 13 */ - return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); } } @@ -6398,10 +6403,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, return RF_D; case MLO_0_PLUS_2_1RF: case MLO_2_PLUS_0_1RF: - if (phy_idx == RTW89_PHY_0) - return RF_AB; - else - return RF_AB; + /* for both PHY 0/1 */ + return RF_AB; case MLO_0_PLUS_2_2RF: case MLO_2_PLUS_0_2RF: case MLO_2_PLUS_2_2RF: diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c index be0148f2b96f..72eda9bbd3ae 100644 --- a/drivers/net/wireless/realtek/rtw89/phy_be.c +++ b/drivers/net/wireless/realtek/rtw89/phy_be.c @@ -381,6 +381,23 @@ static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev, rtw89_write32_mask(rtwdev, addr, 0x7, 0); } +static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u8 mac_idx; + u32 addr; + + if (chip_id != RTL8922A) + return; + + for (mac_idx = 0; mac_idx < RTW89_MAC_NUM; mac_idx++) { + addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RSSI_TARGET_LMT, mac_idx); + rtw89_write32(rtwdev, addr, 0x0201FE00); + addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_TH, mac_idx); + rtw89_write32(rtwdev, addr, 0x00FFEC7E); + } +} + static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev) { enum rtw89_mac_idx mac_idx = RTW89_MAC_0; @@ -391,6 +408,7 @@ static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev) rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx); rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx); rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx); + rtw89_phy_bb_wrap_ul_pwr(rtwdev); } static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 31290d8cb7f7..92074b73ebeb 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev, static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter) { - if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode)) + if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) && + !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) rtw89_ps_power_mode_change_with_hci(rtwdev, enter); else rtw89_mac_power_mode_change(rtwdev, enter); diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 72e448e91b6f..01cbd0312102 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -235,6 +235,9 @@ #define R_AX_SPSANA_ON_CTRL1 0x0224 +#define R_AX_SPS_ANA_ON_CTRL2 0x0228 +#define RTL8852B_RFE_05_SPS_ANA 0x4A82 + #define R_AX_WLAN_XTAL_SI_CTRL 0x0270 #define B_AX_WL_XTAL_SI_CMD_POLL BIT(31) #define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30) @@ -1891,7 +1894,6 @@ B_AX_B0_IMR_ERR_USRCTL_NOINIT | \ B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \ B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \ - B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \ B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \ B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \ B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \ @@ -7497,6 +7499,9 @@ #define B_BE_PWR_BT_VAL GENMASK(8, 0) #define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27) +#define R_BE_PWR_TH 0x11A78 +#define R_BE_PWR_RSSI_TARGET_LMT 0x11A84 + #define R_BE_PWR_OFST_SW 0x11AE8 #define B_BE_PWR_OFST_SW_DB GENMASK(27, 24) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index d0857ef60ea6..1a133914f673 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -341,51 +341,60 @@ do { \ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev, struct wiphy *wiphy) { + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_chip_info *chip = rtwdev->chip; - bool regd_allow_unii_4 = chip->support_unii4; struct ieee80211_supported_band *sband; struct rtw89_acpi_dsm_result res = {}; + bool enable_by_fcc; + bool enable_by_ic; int ret; u8 val; + int i; - if (!chip->support_unii4) - goto bottom; + sband = wiphy->bands[NL80211_BAND_5GHZ]; + if (!sband) + return; + + if (!chip->support_unii4) { + sband->n_channels -= RTW89_5GHZ_UNII4_CHANNEL_NUM; + return; + } + + bitmap_fill(regulatory->block_unii4, RTW89_REGD_MAX_COUNTRY_NUM); - ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_59G_EN, &res); + ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_UNII4_SUP, &res); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_REGD, "acpi: cannot eval unii 4: %d\n", ret); + enable_by_fcc = true; + enable_by_ic = false; goto bottom; } val = res.u.value; + enable_by_fcc = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_FCC); + enable_by_ic = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_IC); rtw89_debug(rtwdev, RTW89_DBG_REGD, - "acpi: eval if allow unii 4: %d\n", val); - - switch (val) { - case 0: - regd_allow_unii_4 = false; - break; - case 1: - regd_allow_unii_4 = true; - break; - default: - break; - } + "acpi: eval if allow unii-4: 0x%x\n", val); bottom: - rtw89_debug(rtwdev, RTW89_DBG_REGD, "regd: allow unii 4: %d\n", - regd_allow_unii_4); - - if (regd_allow_unii_4) - return; - - sband = wiphy->bands[NL80211_BAND_5GHZ]; - if (!sband) - return; + for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { + const struct rtw89_regd *regd = &rtw89_regd_map[i]; - sband->n_channels -= 3; + switch (regd->txpwr_regd[RTW89_BAND_5G]) { + case RTW89_FCC: + if (enable_by_fcc) + clear_bit(i, regulatory->block_unii4); + break; + case RTW89_IC: + if (enable_by_ic) + clear_bit(i, regulatory->block_unii4); + break; + default: + break; + } + } } static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block, @@ -459,6 +468,51 @@ out: kfree(ptr); } +static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_acpi_policy_6ghz_sp *ptr; + struct rtw89_acpi_dsm_result res = {}; + bool enable_by_us; + int ret; + int i; + + ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP, &res); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "acpi: cannot eval policy 6ghz-sp: %d\n", ret); + return; + } + + ptr = res.u.policy_6ghz_sp; + + switch (ptr->override) { + default: + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "%s: unknown override case: %d\n", __func__, + ptr->override); + fallthrough; + case 0: + goto out; + case 1: + break; + } + + bitmap_fill(regulatory->block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM); + + enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US); + + for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { + const struct rtw89_regd *tmp = &rtw89_regd_map[i]; + + if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0) + clear_bit(i, regulatory->block_6ghz_sp); + } + +out: + kfree(ptr); +} + static void rtw89_regd_setup_6ghz(struct rtw89_dev *rtwdev, struct wiphy *wiphy) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -501,6 +555,7 @@ bottom: if (regd_allow_6ghz) { rtw89_regd_setup_policy_6ghz(rtwdev); + rtw89_regd_setup_policy_6ghz_sp(rtwdev); return; } @@ -562,20 +617,47 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev, return 0; } -static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev, - struct wiphy *wiphy) +static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev, + struct wiphy *wiphy) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_regd *regd = regulatory->regd; struct ieee80211_supported_band *sband; u8 index; int i; + sband = wiphy->bands[NL80211_BAND_5GHZ]; + if (!sband) + return; + + if (!chip->support_unii4) + return; + index = rtw89_regd_get_index(regd); - if (index == RTW89_REGD_MAX_COUNTRY_NUM) + if (index != RTW89_REGD_MAX_COUNTRY_NUM && + !test_bit(index, regulatory->block_unii4)) return; - if (!test_bit(index, regulatory->block_6ghz)) + rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c unii-4 is blocked by policy\n", + regd->alpha2[0], regd->alpha2[1]); + + for (i = RTW89_5GHZ_UNII4_START_INDEX; i < sband->n_channels; i++) + sband->channels[i].flags |= IEEE80211_CHAN_DISABLED; +} + +static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev, + struct wiphy *wiphy) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd *regd = regulatory->regd; + struct ieee80211_supported_band *sband; + u8 index; + int i; + + index = rtw89_regd_get_index(regd); + if (index != RTW89_REGD_MAX_COUNTRY_NUM && + !test_bit(index, regulatory->block_6ghz)) return; rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is blocked by policy\n", @@ -604,6 +686,7 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev, else wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE; + rtw89_regd_apply_policy_unii4(rtwdev, wiphy); rtw89_regd_apply_policy_6ghz(rtwdev, wiphy); } @@ -634,10 +717,12 @@ exit: static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd *regd = regulatory->regd; enum rtw89_reg_6ghz_power sel; const struct rtw89_chan *chan; struct rtw89_vif *rtwvif; int count = 0; + u8 index; rtw89_for_each_rtwvif(rtwdev, rtwvif) { chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); @@ -654,6 +739,17 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) if (count != 1) sel = RTW89_REG_6GHZ_POWER_DFLT; + if (sel == RTW89_REG_6GHZ_POWER_STD) { + index = rtw89_regd_get_index(regd); + if (index == RTW89_REGD_MAX_COUNTRY_NUM || + test_bit(index, regulatory->block_6ghz_sp)) { + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "%c%c 6 GHz SP is blocked by policy\n", + regd->alpha2[0], regd->alpha2[1]); + sel = RTW89_REG_6GHZ_POWER_DFLT; + } + } + if (regulatory->reg_6ghz_power == sel) return; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 51d3e61eaa1d..87b51823244d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -2320,6 +2320,7 @@ static int rtw8851b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) u8 wl_rfc_s1; int ret; + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); @@ -2447,6 +2448,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .dig_regs = &rtw8851b_dig_regs, .tssi_dbw_table = NULL, .support_chanctx_num = 0, + .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c index ca1374a71727..ec3629d95fda 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c @@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = { static const struct rtw89_driver_info rtw89_8851be_info = { .chip = &rtw8851b_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8851b_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 2deadec715cf..e93cee1456bd 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2163,6 +2163,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .dig_regs = &rtw8852a_dig_regs, .tssi_dbw_table = NULL, .support_chanctx_num = 1, + .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 7c6ffedb77e2..fdee5dd4ba14 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { static const struct rtw89_driver_info rtw89_8852ae_info = { .chip = &rtw8852a_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8852a_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index d025c4135e1c..d351096fa4b4 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -390,6 +390,14 @@ static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852b_mon_reg[] = { static const u8 rtw89_btc_8852b_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40}; static const u8 rtw89_btc_8852b_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20}; +static void rtw8852b_pwr_sps_ana(struct rtw89_dev *rtwdev) +{ + struct rtw89_efuse *efuse = &rtwdev->efuse; + + if (efuse->rfe_type == 0x5) + rtw89_write16(rtwdev, R_AX_SPS_ANA_ON_CTRL2, RTL8852B_RFE_05_SPS_ANA); +} + static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; @@ -522,6 +530,10 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) u32 val32; u32 ret; + /* Only do once during probe stage after reading efuse */ + if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) + rtw8852b_pwr_sps_ana(rtwdev); + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); if (ret) @@ -550,6 +562,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) return ret; rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB); rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3); @@ -2469,6 +2482,7 @@ static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) u8 wl_rfc_s1; int ret; + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); @@ -2597,6 +2611,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .dig_regs = &rtw8852b_dig_regs, .tssi_dbw_table = NULL, .support_chanctx_num = 0, + .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) | diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c index ed71364e6437..5f941122655c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c @@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = { static const struct rtw89_driver_info rtw89_8852be_info = { .chip = &rtw8852b_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8852b_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 17e6164855fa..3571b41786d7 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -203,6 +203,9 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN); rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, + B_AX_OCP_L1_MASK, 0x7); + ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR, 1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL); if (ret) @@ -266,7 +269,7 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC); if (ret) return ret; - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS); + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0x10, XTAL_SI_LDO_LPS); if (ret) return ret; ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP); @@ -338,6 +341,7 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev) return ret; rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB); rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1); @@ -360,8 +364,11 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev) if (ret) return ret; - rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0); + rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION); rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE); + rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, + B_AX_REG_ZCDC_H_MASK, 0x3); rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS); return 0; @@ -2816,6 +2823,7 @@ static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev) static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) { + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); @@ -2934,6 +2942,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dig_regs = &rtw8852c_dig_regs, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, .support_chanctx_num = 2, + .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c index ab1a0aadc869..24c390b6f3d3 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c @@ -34521,7 +34521,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_FCC][48] = 72, [0][0][1][0][RTW89_ETSI][48] = 127, [0][0][1][0][RTW89_MKK][48] = 127, - [0][0][1][0][RTW89_IC][48] = 127, + [0][0][1][0][RTW89_IC][48] = 72, [0][0][1][0][RTW89_KCC][48] = 127, [0][0][1][0][RTW89_ACMA][48] = 127, [0][0][1][0][RTW89_CN][48] = 127, @@ -34534,7 +34534,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_FCC][50] = 72, [0][0][1][0][RTW89_ETSI][50] = 127, [0][0][1][0][RTW89_MKK][50] = 127, - [0][0][1][0][RTW89_IC][50] = 127, + [0][0][1][0][RTW89_IC][50] = 72, [0][0][1][0][RTW89_KCC][50] = 127, [0][0][1][0][RTW89_ACMA][50] = 127, [0][0][1][0][RTW89_CN][50] = 127, @@ -34547,7 +34547,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_FCC][52] = 72, [0][0][1][0][RTW89_ETSI][52] = 127, [0][0][1][0][RTW89_MKK][52] = 127, - [0][0][1][0][RTW89_IC][52] = 127, + [0][0][1][0][RTW89_IC][52] = 72, [0][0][1][0][RTW89_KCC][52] = 127, [0][0][1][0][RTW89_ACMA][52] = 127, [0][0][1][0][RTW89_CN][52] = 127, @@ -34885,7 +34885,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_FCC][48] = 48, [0][1][1][0][RTW89_ETSI][48] = 127, [0][1][1][0][RTW89_MKK][48] = 127, - [0][1][1][0][RTW89_IC][48] = 127, + [0][1][1][0][RTW89_IC][48] = 48, [0][1][1][0][RTW89_KCC][48] = 127, [0][1][1][0][RTW89_ACMA][48] = 127, [0][1][1][0][RTW89_CN][48] = 127, @@ -34898,7 +34898,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_FCC][50] = 48, [0][1][1][0][RTW89_ETSI][50] = 127, [0][1][1][0][RTW89_MKK][50] = 127, - [0][1][1][0][RTW89_IC][50] = 127, + [0][1][1][0][RTW89_IC][50] = 48, [0][1][1][0][RTW89_KCC][50] = 127, [0][1][1][0][RTW89_ACMA][50] = 127, [0][1][1][0][RTW89_CN][50] = 127, @@ -34911,7 +34911,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_FCC][52] = 48, [0][1][1][0][RTW89_ETSI][52] = 127, [0][1][1][0][RTW89_MKK][52] = 127, - [0][1][1][0][RTW89_IC][52] = 127, + [0][1][1][0][RTW89_IC][52] = 48, [0][1][1][0][RTW89_KCC][52] = 127, [0][1][1][0][RTW89_ACMA][52] = 127, [0][1][1][0][RTW89_CN][52] = 127, @@ -35249,7 +35249,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_FCC][48] = 72, [0][0][2][0][RTW89_ETSI][48] = 127, [0][0][2][0][RTW89_MKK][48] = 127, - [0][0][2][0][RTW89_IC][48] = 127, + [0][0][2][0][RTW89_IC][48] = 72, [0][0][2][0][RTW89_KCC][48] = 127, [0][0][2][0][RTW89_ACMA][48] = 127, [0][0][2][0][RTW89_CN][48] = 127, @@ -35262,7 +35262,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_FCC][50] = 72, [0][0][2][0][RTW89_ETSI][50] = 127, [0][0][2][0][RTW89_MKK][50] = 127, - [0][0][2][0][RTW89_IC][50] = 127, + [0][0][2][0][RTW89_IC][50] = 72, [0][0][2][0][RTW89_KCC][50] = 127, [0][0][2][0][RTW89_ACMA][50] = 127, [0][0][2][0][RTW89_CN][50] = 127, @@ -35275,7 +35275,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_FCC][52] = 72, [0][0][2][0][RTW89_ETSI][52] = 127, [0][0][2][0][RTW89_MKK][52] = 127, - [0][0][2][0][RTW89_IC][52] = 127, + [0][0][2][0][RTW89_IC][52] = 72, [0][0][2][0][RTW89_KCC][52] = 127, [0][0][2][0][RTW89_ACMA][52] = 127, [0][0][2][0][RTW89_CN][52] = 127, @@ -35613,7 +35613,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_FCC][48] = 48, [0][1][2][0][RTW89_ETSI][48] = 127, [0][1][2][0][RTW89_MKK][48] = 127, - [0][1][2][0][RTW89_IC][48] = 127, + [0][1][2][0][RTW89_IC][48] = 48, [0][1][2][0][RTW89_KCC][48] = 127, [0][1][2][0][RTW89_ACMA][48] = 127, [0][1][2][0][RTW89_CN][48] = 127, @@ -35626,7 +35626,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_FCC][50] = 50, [0][1][2][0][RTW89_ETSI][50] = 127, [0][1][2][0][RTW89_MKK][50] = 127, - [0][1][2][0][RTW89_IC][50] = 127, + [0][1][2][0][RTW89_IC][50] = 50, [0][1][2][0][RTW89_KCC][50] = 127, [0][1][2][0][RTW89_ACMA][50] = 127, [0][1][2][0][RTW89_CN][50] = 127, @@ -35639,7 +35639,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_FCC][52] = 48, [0][1][2][0][RTW89_ETSI][52] = 127, [0][1][2][0][RTW89_MKK][52] = 127, - [0][1][2][0][RTW89_IC][52] = 127, + [0][1][2][0][RTW89_IC][52] = 48, [0][1][2][0][RTW89_KCC][52] = 127, [0][1][2][0][RTW89_ACMA][52] = 127, [0][1][2][0][RTW89_CN][52] = 127, @@ -35977,7 +35977,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_FCC][48] = 48, [0][1][2][1][RTW89_ETSI][48] = 127, [0][1][2][1][RTW89_MKK][48] = 127, - [0][1][2][1][RTW89_IC][48] = 127, + [0][1][2][1][RTW89_IC][48] = 48, [0][1][2][1][RTW89_KCC][48] = 127, [0][1][2][1][RTW89_ACMA][48] = 127, [0][1][2][1][RTW89_CN][48] = 127, @@ -35990,7 +35990,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_FCC][50] = 50, [0][1][2][1][RTW89_ETSI][50] = 127, [0][1][2][1][RTW89_MKK][50] = 127, - [0][1][2][1][RTW89_IC][50] = 127, + [0][1][2][1][RTW89_IC][50] = 50, [0][1][2][1][RTW89_KCC][50] = 127, [0][1][2][1][RTW89_ACMA][50] = 127, [0][1][2][1][RTW89_CN][50] = 127, @@ -36003,7 +36003,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_FCC][52] = 48, [0][1][2][1][RTW89_ETSI][52] = 127, [0][1][2][1][RTW89_MKK][52] = 127, - [0][1][2][1][RTW89_IC][52] = 127, + [0][1][2][1][RTW89_IC][52] = 48, [0][1][2][1][RTW89_KCC][52] = 127, [0][1][2][1][RTW89_ACMA][52] = 127, [0][1][2][1][RTW89_CN][52] = 127, @@ -36172,7 +36172,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][47] = 68, [1][0][2][0][RTW89_ETSI][47] = 127, [1][0][2][0][RTW89_MKK][47] = 127, - [1][0][2][0][RTW89_IC][47] = 127, + [1][0][2][0][RTW89_IC][47] = 68, [1][0][2][0][RTW89_KCC][47] = 127, [1][0][2][0][RTW89_ACMA][47] = 127, [1][0][2][0][RTW89_CN][47] = 127, @@ -36185,7 +36185,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_FCC][51] = 68, [1][0][2][0][RTW89_ETSI][51] = 127, [1][0][2][0][RTW89_MKK][51] = 127, - [1][0][2][0][RTW89_IC][51] = 127, + [1][0][2][0][RTW89_IC][51] = 68, [1][0][2][0][RTW89_KCC][51] = 127, [1][0][2][0][RTW89_ACMA][51] = 127, [1][0][2][0][RTW89_CN][51] = 127, @@ -36354,7 +36354,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_FCC][47] = 62, [1][1][2][0][RTW89_ETSI][47] = 127, [1][1][2][0][RTW89_MKK][47] = 127, - [1][1][2][0][RTW89_IC][47] = 127, + [1][1][2][0][RTW89_IC][47] = 62, [1][1][2][0][RTW89_KCC][47] = 127, [1][1][2][0][RTW89_ACMA][47] = 127, [1][1][2][0][RTW89_CN][47] = 127, @@ -36367,7 +36367,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_FCC][51] = 60, [1][1][2][0][RTW89_ETSI][51] = 127, [1][1][2][0][RTW89_MKK][51] = 127, - [1][1][2][0][RTW89_IC][51] = 127, + [1][1][2][0][RTW89_IC][51] = 60, [1][1][2][0][RTW89_KCC][51] = 127, [1][1][2][0][RTW89_ACMA][51] = 127, [1][1][2][0][RTW89_CN][51] = 127, @@ -36536,7 +36536,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_FCC][47] = 62, [1][1][2][1][RTW89_ETSI][47] = 127, [1][1][2][1][RTW89_MKK][47] = 127, - [1][1][2][1][RTW89_IC][47] = 127, + [1][1][2][1][RTW89_IC][47] = 62, [1][1][2][1][RTW89_KCC][47] = 127, [1][1][2][1][RTW89_ACMA][47] = 127, [1][1][2][1][RTW89_CN][47] = 127, @@ -36549,7 +36549,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_FCC][51] = 60, [1][1][2][1][RTW89_ETSI][51] = 127, [1][1][2][1][RTW89_MKK][51] = 127, - [1][1][2][1][RTW89_IC][51] = 127, + [1][1][2][1][RTW89_IC][51] = 60, [1][1][2][1][RTW89_KCC][51] = 127, [1][1][2][1][RTW89_ACMA][51] = 127, [1][1][2][1][RTW89_CN][51] = 127, @@ -36640,7 +36640,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_FCC][49] = 62, [2][0][2][0][RTW89_ETSI][49] = 127, [2][0][2][0][RTW89_MKK][49] = 127, - [2][0][2][0][RTW89_IC][49] = 127, + [2][0][2][0][RTW89_IC][49] = 62, [2][0][2][0][RTW89_KCC][49] = 127, [2][0][2][0][RTW89_ACMA][49] = 127, [2][0][2][0][RTW89_CN][49] = 127, @@ -36731,7 +36731,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_FCC][49] = 62, [2][1][2][0][RTW89_ETSI][49] = 127, [2][1][2][0][RTW89_MKK][49] = 127, - [2][1][2][0][RTW89_IC][49] = 127, + [2][1][2][0][RTW89_IC][49] = 62, [2][1][2][0][RTW89_KCC][49] = 127, [2][1][2][0][RTW89_ACMA][49] = 127, [2][1][2][0][RTW89_CN][49] = 127, @@ -36822,7 +36822,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_FCC][49] = 62, [2][1][2][1][RTW89_ETSI][49] = 127, [2][1][2][1][RTW89_MKK][49] = 127, - [2][1][2][1][RTW89_IC][49] = 127, + [2][1][2][1][RTW89_IC][49] = 62, [2][1][2][1][RTW89_KCC][49] = 127, [2][1][2][1][RTW89_ACMA][49] = 127, [2][1][2][1][RTW89_CN][49] = 127, @@ -36861,7 +36861,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_FCC][45] = 52, [3][0][2][0][RTW89_ETSI][45] = 127, [3][0][2][0][RTW89_MKK][45] = 127, - [3][0][2][0][RTW89_IC][45] = 127, + [3][0][2][0][RTW89_IC][45] = 52, [3][0][2][0][RTW89_KCC][45] = 127, [3][0][2][0][RTW89_ACMA][45] = 127, [3][0][2][0][RTW89_CN][45] = 127, @@ -36900,7 +36900,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_FCC][45] = 46, [3][1][2][0][RTW89_ETSI][45] = 127, [3][1][2][0][RTW89_MKK][45] = 127, - [3][1][2][0][RTW89_IC][45] = 127, + [3][1][2][0][RTW89_IC][45] = 46, [3][1][2][0][RTW89_KCC][45] = 127, [3][1][2][0][RTW89_ACMA][45] = 127, [3][1][2][0][RTW89_CN][45] = 127, @@ -36939,7 +36939,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_FCC][45] = 46, [3][1][2][1][RTW89_ETSI][45] = 127, [3][1][2][1][RTW89_MKK][45] = 127, - [3][1][2][1][RTW89_IC][45] = 127, + [3][1][2][1][RTW89_IC][45] = 46, [3][1][2][1][RTW89_KCC][45] = 127, [3][1][2][1][RTW89_ACMA][45] = 127, [3][1][2][1][RTW89_CN][45] = 127, @@ -36958,1476 +36958,986 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [RTW89_6G_CH_NUM] = { [0][0][1][0][RTW89_WW][0][0] = 24, [0][0][1][0][RTW89_WW][1][0] = 24, - [0][0][1][0][RTW89_WW][2][0] = 56, [0][0][1][0][RTW89_WW][0][2] = 22, [0][0][1][0][RTW89_WW][1][2] = 22, - [0][0][1][0][RTW89_WW][2][2] = 56, [0][0][1][0][RTW89_WW][0][4] = 22, [0][0][1][0][RTW89_WW][1][4] = 22, - [0][0][1][0][RTW89_WW][2][4] = 56, [0][0][1][0][RTW89_WW][0][6] = 22, [0][0][1][0][RTW89_WW][1][6] = 22, - [0][0][1][0][RTW89_WW][2][6] = 56, [0][0][1][0][RTW89_WW][0][8] = 22, [0][0][1][0][RTW89_WW][1][8] = 22, - [0][0][1][0][RTW89_WW][2][8] = 56, [0][0][1][0][RTW89_WW][0][10] = 22, [0][0][1][0][RTW89_WW][1][10] = 22, - [0][0][1][0][RTW89_WW][2][10] = 56, [0][0][1][0][RTW89_WW][0][12] = 22, [0][0][1][0][RTW89_WW][1][12] = 22, - [0][0][1][0][RTW89_WW][2][12] = 56, [0][0][1][0][RTW89_WW][0][14] = 22, [0][0][1][0][RTW89_WW][1][14] = 22, - [0][0][1][0][RTW89_WW][2][14] = 56, [0][0][1][0][RTW89_WW][0][15] = 22, [0][0][1][0][RTW89_WW][1][15] = 22, - [0][0][1][0][RTW89_WW][2][15] = 56, [0][0][1][0][RTW89_WW][0][17] = 22, [0][0][1][0][RTW89_WW][1][17] = 22, - [0][0][1][0][RTW89_WW][2][17] = 56, [0][0][1][0][RTW89_WW][0][19] = 22, [0][0][1][0][RTW89_WW][1][19] = 22, - [0][0][1][0][RTW89_WW][2][19] = 56, [0][0][1][0][RTW89_WW][0][21] = 22, [0][0][1][0][RTW89_WW][1][21] = 22, - [0][0][1][0][RTW89_WW][2][21] = 56, [0][0][1][0][RTW89_WW][0][23] = 22, [0][0][1][0][RTW89_WW][1][23] = 22, - [0][0][1][0][RTW89_WW][2][23] = 70, [0][0][1][0][RTW89_WW][0][25] = 22, [0][0][1][0][RTW89_WW][1][25] = 22, - [0][0][1][0][RTW89_WW][2][25] = 70, [0][0][1][0][RTW89_WW][0][27] = 22, [0][0][1][0][RTW89_WW][1][27] = 22, - [0][0][1][0][RTW89_WW][2][27] = 70, [0][0][1][0][RTW89_WW][0][29] = 22, [0][0][1][0][RTW89_WW][1][29] = 22, - [0][0][1][0][RTW89_WW][2][29] = 70, [0][0][1][0][RTW89_WW][0][30] = 22, [0][0][1][0][RTW89_WW][1][30] = 22, - [0][0][1][0][RTW89_WW][2][30] = 70, [0][0][1][0][RTW89_WW][0][32] = 22, [0][0][1][0][RTW89_WW][1][32] = 22, - [0][0][1][0][RTW89_WW][2][32] = 70, [0][0][1][0][RTW89_WW][0][34] = 22, [0][0][1][0][RTW89_WW][1][34] = 22, - [0][0][1][0][RTW89_WW][2][34] = 70, [0][0][1][0][RTW89_WW][0][36] = 22, [0][0][1][0][RTW89_WW][1][36] = 22, - [0][0][1][0][RTW89_WW][2][36] = 70, [0][0][1][0][RTW89_WW][0][38] = 22, [0][0][1][0][RTW89_WW][1][38] = 22, - [0][0][1][0][RTW89_WW][2][38] = 70, [0][0][1][0][RTW89_WW][0][40] = 22, [0][0][1][0][RTW89_WW][1][40] = 22, - [0][0][1][0][RTW89_WW][2][40] = 70, [0][0][1][0][RTW89_WW][0][42] = 22, [0][0][1][0][RTW89_WW][1][42] = 22, - [0][0][1][0][RTW89_WW][2][42] = 70, [0][0][1][0][RTW89_WW][0][44] = 22, [0][0][1][0][RTW89_WW][1][44] = 22, - [0][0][1][0][RTW89_WW][2][44] = 70, [0][0][1][0][RTW89_WW][0][45] = 22, [0][0][1][0][RTW89_WW][1][45] = 22, - [0][0][1][0][RTW89_WW][2][45] = 70, [0][0][1][0][RTW89_WW][0][47] = 22, [0][0][1][0][RTW89_WW][1][47] = 22, - [0][0][1][0][RTW89_WW][2][47] = 70, [0][0][1][0][RTW89_WW][0][49] = 24, [0][0][1][0][RTW89_WW][1][49] = 24, - [0][0][1][0][RTW89_WW][2][49] = 70, [0][0][1][0][RTW89_WW][0][51] = 22, [0][0][1][0][RTW89_WW][1][51] = 22, - [0][0][1][0][RTW89_WW][2][51] = 70, [0][0][1][0][RTW89_WW][0][53] = 22, [0][0][1][0][RTW89_WW][1][53] = 22, - [0][0][1][0][RTW89_WW][2][53] = 70, [0][0][1][0][RTW89_WW][0][55] = 22, [0][0][1][0][RTW89_WW][1][55] = 22, - [0][0][1][0][RTW89_WW][2][55] = 68, [0][0][1][0][RTW89_WW][0][57] = 22, [0][0][1][0][RTW89_WW][1][57] = 22, - [0][0][1][0][RTW89_WW][2][57] = 68, [0][0][1][0][RTW89_WW][0][59] = 22, [0][0][1][0][RTW89_WW][1][59] = 22, - [0][0][1][0][RTW89_WW][2][59] = 68, [0][0][1][0][RTW89_WW][0][60] = 22, [0][0][1][0][RTW89_WW][1][60] = 22, - [0][0][1][0][RTW89_WW][2][60] = 68, [0][0][1][0][RTW89_WW][0][62] = 22, [0][0][1][0][RTW89_WW][1][62] = 22, - [0][0][1][0][RTW89_WW][2][62] = 68, [0][0][1][0][RTW89_WW][0][64] = 22, [0][0][1][0][RTW89_WW][1][64] = 22, - [0][0][1][0][RTW89_WW][2][64] = 68, [0][0][1][0][RTW89_WW][0][66] = 22, [0][0][1][0][RTW89_WW][1][66] = 22, - [0][0][1][0][RTW89_WW][2][66] = 68, [0][0][1][0][RTW89_WW][0][68] = 22, [0][0][1][0][RTW89_WW][1][68] = 22, - [0][0][1][0][RTW89_WW][2][68] = 68, [0][0][1][0][RTW89_WW][0][70] = 24, [0][0][1][0][RTW89_WW][1][70] = 24, - [0][0][1][0][RTW89_WW][2][70] = 68, [0][0][1][0][RTW89_WW][0][72] = 22, [0][0][1][0][RTW89_WW][1][72] = 22, - [0][0][1][0][RTW89_WW][2][72] = 68, [0][0][1][0][RTW89_WW][0][74] = 22, [0][0][1][0][RTW89_WW][1][74] = 22, - [0][0][1][0][RTW89_WW][2][74] = 68, [0][0][1][0][RTW89_WW][0][75] = 22, [0][0][1][0][RTW89_WW][1][75] = 22, - [0][0][1][0][RTW89_WW][2][75] = 68, [0][0][1][0][RTW89_WW][0][77] = 22, [0][0][1][0][RTW89_WW][1][77] = 22, - [0][0][1][0][RTW89_WW][2][77] = 68, [0][0][1][0][RTW89_WW][0][79] = 22, [0][0][1][0][RTW89_WW][1][79] = 22, - [0][0][1][0][RTW89_WW][2][79] = 68, [0][0][1][0][RTW89_WW][0][81] = 22, [0][0][1][0][RTW89_WW][1][81] = 22, - [0][0][1][0][RTW89_WW][2][81] = 68, [0][0][1][0][RTW89_WW][0][83] = 22, [0][0][1][0][RTW89_WW][1][83] = 22, - [0][0][1][0][RTW89_WW][2][83] = 68, [0][0][1][0][RTW89_WW][0][85] = 22, [0][0][1][0][RTW89_WW][1][85] = 22, - [0][0][1][0][RTW89_WW][2][85] = 68, [0][0][1][0][RTW89_WW][0][87] = 22, [0][0][1][0][RTW89_WW][1][87] = 22, - [0][0][1][0][RTW89_WW][2][87] = 0, [0][0][1][0][RTW89_WW][0][89] = 22, [0][0][1][0][RTW89_WW][1][89] = 22, - [0][0][1][0][RTW89_WW][2][89] = 0, [0][0][1][0][RTW89_WW][0][90] = 22, [0][0][1][0][RTW89_WW][1][90] = 22, - [0][0][1][0][RTW89_WW][2][90] = 0, [0][0][1][0][RTW89_WW][0][92] = 22, [0][0][1][0][RTW89_WW][1][92] = 22, - [0][0][1][0][RTW89_WW][2][92] = 0, [0][0][1][0][RTW89_WW][0][94] = 22, [0][0][1][0][RTW89_WW][1][94] = 22, - [0][0][1][0][RTW89_WW][2][94] = 0, [0][0][1][0][RTW89_WW][0][96] = 22, [0][0][1][0][RTW89_WW][1][96] = 22, - [0][0][1][0][RTW89_WW][2][96] = 0, [0][0][1][0][RTW89_WW][0][98] = 22, [0][0][1][0][RTW89_WW][1][98] = 22, - [0][0][1][0][RTW89_WW][2][98] = 0, [0][0][1][0][RTW89_WW][0][100] = 22, [0][0][1][0][RTW89_WW][1][100] = 22, - [0][0][1][0][RTW89_WW][2][100] = 0, [0][0][1][0][RTW89_WW][0][102] = 22, [0][0][1][0][RTW89_WW][1][102] = 22, - [0][0][1][0][RTW89_WW][2][102] = 0, [0][0][1][0][RTW89_WW][0][104] = 22, [0][0][1][0][RTW89_WW][1][104] = 22, - [0][0][1][0][RTW89_WW][2][104] = 0, [0][0][1][0][RTW89_WW][0][105] = 22, [0][0][1][0][RTW89_WW][1][105] = 22, - [0][0][1][0][RTW89_WW][2][105] = 0, [0][0][1][0][RTW89_WW][0][107] = 24, [0][0][1][0][RTW89_WW][1][107] = 24, - [0][0][1][0][RTW89_WW][2][107] = 0, [0][0][1][0][RTW89_WW][0][109] = 24, [0][0][1][0][RTW89_WW][1][109] = 24, - [0][0][1][0][RTW89_WW][2][109] = 0, [0][0][1][0][RTW89_WW][0][111] = 0, [0][0][1][0][RTW89_WW][1][111] = 0, - [0][0][1][0][RTW89_WW][2][111] = 0, [0][0][1][0][RTW89_WW][0][113] = 0, [0][0][1][0][RTW89_WW][1][113] = 0, - [0][0][1][0][RTW89_WW][2][113] = 0, [0][0][1][0][RTW89_WW][0][115] = 0, [0][0][1][0][RTW89_WW][1][115] = 0, - [0][0][1][0][RTW89_WW][2][115] = 0, [0][0][1][0][RTW89_WW][0][117] = 0, [0][0][1][0][RTW89_WW][1][117] = 0, - [0][0][1][0][RTW89_WW][2][117] = 0, [0][0][1][0][RTW89_WW][0][119] = 0, [0][0][1][0][RTW89_WW][1][119] = 0, - [0][0][1][0][RTW89_WW][2][119] = 0, [0][1][1][0][RTW89_WW][0][0] = -2, [0][1][1][0][RTW89_WW][1][0] = -2, - [0][1][1][0][RTW89_WW][2][0] = 54, [0][1][1][0][RTW89_WW][0][2] = -4, [0][1][1][0][RTW89_WW][1][2] = -4, - [0][1][1][0][RTW89_WW][2][2] = 54, [0][1][1][0][RTW89_WW][0][4] = -4, [0][1][1][0][RTW89_WW][1][4] = -4, - [0][1][1][0][RTW89_WW][2][4] = 54, [0][1][1][0][RTW89_WW][0][6] = -4, [0][1][1][0][RTW89_WW][1][6] = -4, - [0][1][1][0][RTW89_WW][2][6] = 54, [0][1][1][0][RTW89_WW][0][8] = -4, [0][1][1][0][RTW89_WW][1][8] = -4, - [0][1][1][0][RTW89_WW][2][8] = 54, [0][1][1][0][RTW89_WW][0][10] = -4, [0][1][1][0][RTW89_WW][1][10] = -4, - [0][1][1][0][RTW89_WW][2][10] = 54, [0][1][1][0][RTW89_WW][0][12] = -4, [0][1][1][0][RTW89_WW][1][12] = -4, - [0][1][1][0][RTW89_WW][2][12] = 54, [0][1][1][0][RTW89_WW][0][14] = -4, [0][1][1][0][RTW89_WW][1][14] = -4, - [0][1][1][0][RTW89_WW][2][14] = 54, [0][1][1][0][RTW89_WW][0][15] = -4, [0][1][1][0][RTW89_WW][1][15] = -4, - [0][1][1][0][RTW89_WW][2][15] = 54, [0][1][1][0][RTW89_WW][0][17] = -4, [0][1][1][0][RTW89_WW][1][17] = -4, - [0][1][1][0][RTW89_WW][2][17] = 54, [0][1][1][0][RTW89_WW][0][19] = -4, [0][1][1][0][RTW89_WW][1][19] = -4, - [0][1][1][0][RTW89_WW][2][19] = 54, [0][1][1][0][RTW89_WW][0][21] = -4, [0][1][1][0][RTW89_WW][1][21] = -4, - [0][1][1][0][RTW89_WW][2][21] = 54, [0][1][1][0][RTW89_WW][0][23] = -4, [0][1][1][0][RTW89_WW][1][23] = -4, - [0][1][1][0][RTW89_WW][2][23] = 68, [0][1][1][0][RTW89_WW][0][25] = -4, [0][1][1][0][RTW89_WW][1][25] = -4, - [0][1][1][0][RTW89_WW][2][25] = 68, [0][1][1][0][RTW89_WW][0][27] = -4, [0][1][1][0][RTW89_WW][1][27] = -4, - [0][1][1][0][RTW89_WW][2][27] = 68, [0][1][1][0][RTW89_WW][0][29] = -4, [0][1][1][0][RTW89_WW][1][29] = -4, - [0][1][1][0][RTW89_WW][2][29] = 68, [0][1][1][0][RTW89_WW][0][30] = -4, [0][1][1][0][RTW89_WW][1][30] = -4, - [0][1][1][0][RTW89_WW][2][30] = 68, [0][1][1][0][RTW89_WW][0][32] = -4, [0][1][1][0][RTW89_WW][1][32] = -4, - [0][1][1][0][RTW89_WW][2][32] = 68, [0][1][1][0][RTW89_WW][0][34] = -4, [0][1][1][0][RTW89_WW][1][34] = -4, - [0][1][1][0][RTW89_WW][2][34] = 68, [0][1][1][0][RTW89_WW][0][36] = -4, [0][1][1][0][RTW89_WW][1][36] = -4, - [0][1][1][0][RTW89_WW][2][36] = 68, [0][1][1][0][RTW89_WW][0][38] = -4, [0][1][1][0][RTW89_WW][1][38] = -4, - [0][1][1][0][RTW89_WW][2][38] = 68, [0][1][1][0][RTW89_WW][0][40] = -4, [0][1][1][0][RTW89_WW][1][40] = -4, - [0][1][1][0][RTW89_WW][2][40] = 68, [0][1][1][0][RTW89_WW][0][42] = -4, [0][1][1][0][RTW89_WW][1][42] = -4, - [0][1][1][0][RTW89_WW][2][42] = 68, [0][1][1][0][RTW89_WW][0][44] = -2, [0][1][1][0][RTW89_WW][1][44] = -2, - [0][1][1][0][RTW89_WW][2][44] = 68, [0][1][1][0][RTW89_WW][0][45] = -2, [0][1][1][0][RTW89_WW][1][45] = -2, - [0][1][1][0][RTW89_WW][2][45] = 70, [0][1][1][0][RTW89_WW][0][47] = -2, [0][1][1][0][RTW89_WW][1][47] = -2, - [0][1][1][0][RTW89_WW][2][47] = 68, [0][1][1][0][RTW89_WW][0][49] = -2, [0][1][1][0][RTW89_WW][1][49] = -2, - [0][1][1][0][RTW89_WW][2][49] = 68, [0][1][1][0][RTW89_WW][0][51] = -2, [0][1][1][0][RTW89_WW][1][51] = -2, - [0][1][1][0][RTW89_WW][2][51] = 68, [0][1][1][0][RTW89_WW][0][53] = -2, [0][1][1][0][RTW89_WW][1][53] = -2, - [0][1][1][0][RTW89_WW][2][53] = 68, [0][1][1][0][RTW89_WW][0][55] = -2, [0][1][1][0][RTW89_WW][1][55] = -2, - [0][1][1][0][RTW89_WW][2][55] = 68, [0][1][1][0][RTW89_WW][0][57] = -2, [0][1][1][0][RTW89_WW][1][57] = -2, - [0][1][1][0][RTW89_WW][2][57] = 68, [0][1][1][0][RTW89_WW][0][59] = -2, [0][1][1][0][RTW89_WW][1][59] = -2, - [0][1][1][0][RTW89_WW][2][59] = 68, [0][1][1][0][RTW89_WW][0][60] = -2, [0][1][1][0][RTW89_WW][1][60] = -2, - [0][1][1][0][RTW89_WW][2][60] = 68, [0][1][1][0][RTW89_WW][0][62] = -2, [0][1][1][0][RTW89_WW][1][62] = -2, - [0][1][1][0][RTW89_WW][2][62] = 68, [0][1][1][0][RTW89_WW][0][64] = -2, [0][1][1][0][RTW89_WW][1][64] = -2, - [0][1][1][0][RTW89_WW][2][64] = 68, [0][1][1][0][RTW89_WW][0][66] = -2, [0][1][1][0][RTW89_WW][1][66] = -2, - [0][1][1][0][RTW89_WW][2][66] = 68, [0][1][1][0][RTW89_WW][0][68] = -2, [0][1][1][0][RTW89_WW][1][68] = -2, - [0][1][1][0][RTW89_WW][2][68] = 68, [0][1][1][0][RTW89_WW][0][70] = -2, [0][1][1][0][RTW89_WW][1][70] = -2, - [0][1][1][0][RTW89_WW][2][70] = 68, [0][1][1][0][RTW89_WW][0][72] = -2, [0][1][1][0][RTW89_WW][1][72] = -2, - [0][1][1][0][RTW89_WW][2][72] = 68, [0][1][1][0][RTW89_WW][0][74] = -2, [0][1][1][0][RTW89_WW][1][74] = -2, - [0][1][1][0][RTW89_WW][2][74] = 68, [0][1][1][0][RTW89_WW][0][75] = -2, [0][1][1][0][RTW89_WW][1][75] = -2, - [0][1][1][0][RTW89_WW][2][75] = 68, [0][1][1][0][RTW89_WW][0][77] = -2, [0][1][1][0][RTW89_WW][1][77] = -2, - [0][1][1][0][RTW89_WW][2][77] = 68, [0][1][1][0][RTW89_WW][0][79] = -2, [0][1][1][0][RTW89_WW][1][79] = -2, - [0][1][1][0][RTW89_WW][2][79] = 68, [0][1][1][0][RTW89_WW][0][81] = -2, [0][1][1][0][RTW89_WW][1][81] = -2, - [0][1][1][0][RTW89_WW][2][81] = 68, [0][1][1][0][RTW89_WW][0][83] = -2, [0][1][1][0][RTW89_WW][1][83] = -2, - [0][1][1][0][RTW89_WW][2][83] = 68, [0][1][1][0][RTW89_WW][0][85] = -2, [0][1][1][0][RTW89_WW][1][85] = -2, - [0][1][1][0][RTW89_WW][2][85] = 68, [0][1][1][0][RTW89_WW][0][87] = -2, [0][1][1][0][RTW89_WW][1][87] = -2, - [0][1][1][0][RTW89_WW][2][87] = 0, [0][1][1][0][RTW89_WW][0][89] = -2, [0][1][1][0][RTW89_WW][1][89] = -2, - [0][1][1][0][RTW89_WW][2][89] = 0, [0][1][1][0][RTW89_WW][0][90] = -2, [0][1][1][0][RTW89_WW][1][90] = -2, - [0][1][1][0][RTW89_WW][2][90] = 0, [0][1][1][0][RTW89_WW][0][92] = -2, [0][1][1][0][RTW89_WW][1][92] = -2, - [0][1][1][0][RTW89_WW][2][92] = 0, [0][1][1][0][RTW89_WW][0][94] = -2, [0][1][1][0][RTW89_WW][1][94] = -2, - [0][1][1][0][RTW89_WW][2][94] = 0, [0][1][1][0][RTW89_WW][0][96] = -2, [0][1][1][0][RTW89_WW][1][96] = -2, - [0][1][1][0][RTW89_WW][2][96] = 0, [0][1][1][0][RTW89_WW][0][98] = -2, [0][1][1][0][RTW89_WW][1][98] = -2, - [0][1][1][0][RTW89_WW][2][98] = 0, [0][1][1][0][RTW89_WW][0][100] = -2, [0][1][1][0][RTW89_WW][1][100] = -2, - [0][1][1][0][RTW89_WW][2][100] = 0, [0][1][1][0][RTW89_WW][0][102] = -2, [0][1][1][0][RTW89_WW][1][102] = -2, - [0][1][1][0][RTW89_WW][2][102] = 0, [0][1][1][0][RTW89_WW][0][104] = -2, [0][1][1][0][RTW89_WW][1][104] = -2, - [0][1][1][0][RTW89_WW][2][104] = 0, [0][1][1][0][RTW89_WW][0][105] = -2, [0][1][1][0][RTW89_WW][1][105] = -2, - [0][1][1][0][RTW89_WW][2][105] = 0, [0][1][1][0][RTW89_WW][0][107] = 1, [0][1][1][0][RTW89_WW][1][107] = 1, - [0][1][1][0][RTW89_WW][2][107] = 0, [0][1][1][0][RTW89_WW][0][109] = 1, [0][1][1][0][RTW89_WW][1][109] = 1, - [0][1][1][0][RTW89_WW][2][109] = 0, [0][1][1][0][RTW89_WW][0][111] = 0, [0][1][1][0][RTW89_WW][1][111] = 0, - [0][1][1][0][RTW89_WW][2][111] = 0, [0][1][1][0][RTW89_WW][0][113] = 0, [0][1][1][0][RTW89_WW][1][113] = 0, - [0][1][1][0][RTW89_WW][2][113] = 0, [0][1][1][0][RTW89_WW][0][115] = 0, [0][1][1][0][RTW89_WW][1][115] = 0, - [0][1][1][0][RTW89_WW][2][115] = 0, [0][1][1][0][RTW89_WW][0][117] = 0, [0][1][1][0][RTW89_WW][1][117] = 0, - [0][1][1][0][RTW89_WW][2][117] = 0, [0][1][1][0][RTW89_WW][0][119] = 0, [0][1][1][0][RTW89_WW][1][119] = 0, - [0][1][1][0][RTW89_WW][2][119] = 0, [0][0][2][0][RTW89_WW][0][0] = 24, [0][0][2][0][RTW89_WW][1][0] = 24, - [0][0][2][0][RTW89_WW][2][0] = 56, [0][0][2][0][RTW89_WW][0][2] = 22, [0][0][2][0][RTW89_WW][1][2] = 22, - [0][0][2][0][RTW89_WW][2][2] = 56, [0][0][2][0][RTW89_WW][0][4] = 22, [0][0][2][0][RTW89_WW][1][4] = 22, - [0][0][2][0][RTW89_WW][2][4] = 56, [0][0][2][0][RTW89_WW][0][6] = 22, [0][0][2][0][RTW89_WW][1][6] = 22, - [0][0][2][0][RTW89_WW][2][6] = 56, [0][0][2][0][RTW89_WW][0][8] = 22, [0][0][2][0][RTW89_WW][1][8] = 22, - [0][0][2][0][RTW89_WW][2][8] = 56, [0][0][2][0][RTW89_WW][0][10] = 22, [0][0][2][0][RTW89_WW][1][10] = 22, - [0][0][2][0][RTW89_WW][2][10] = 56, [0][0][2][0][RTW89_WW][0][12] = 22, [0][0][2][0][RTW89_WW][1][12] = 22, - [0][0][2][0][RTW89_WW][2][12] = 56, [0][0][2][0][RTW89_WW][0][14] = 22, [0][0][2][0][RTW89_WW][1][14] = 22, - [0][0][2][0][RTW89_WW][2][14] = 56, [0][0][2][0][RTW89_WW][0][15] = 22, [0][0][2][0][RTW89_WW][1][15] = 22, - [0][0][2][0][RTW89_WW][2][15] = 56, [0][0][2][0][RTW89_WW][0][17] = 22, [0][0][2][0][RTW89_WW][1][17] = 22, - [0][0][2][0][RTW89_WW][2][17] = 56, [0][0][2][0][RTW89_WW][0][19] = 22, [0][0][2][0][RTW89_WW][1][19] = 22, - [0][0][2][0][RTW89_WW][2][19] = 56, [0][0][2][0][RTW89_WW][0][21] = 22, [0][0][2][0][RTW89_WW][1][21] = 22, - [0][0][2][0][RTW89_WW][2][21] = 56, [0][0][2][0][RTW89_WW][0][23] = 22, [0][0][2][0][RTW89_WW][1][23] = 22, - [0][0][2][0][RTW89_WW][2][23] = 70, [0][0][2][0][RTW89_WW][0][25] = 22, [0][0][2][0][RTW89_WW][1][25] = 22, - [0][0][2][0][RTW89_WW][2][25] = 70, [0][0][2][0][RTW89_WW][0][27] = 22, [0][0][2][0][RTW89_WW][1][27] = 22, - [0][0][2][0][RTW89_WW][2][27] = 70, [0][0][2][0][RTW89_WW][0][29] = 22, [0][0][2][0][RTW89_WW][1][29] = 22, - [0][0][2][0][RTW89_WW][2][29] = 70, [0][0][2][0][RTW89_WW][0][30] = 22, [0][0][2][0][RTW89_WW][1][30] = 22, - [0][0][2][0][RTW89_WW][2][30] = 70, [0][0][2][0][RTW89_WW][0][32] = 22, [0][0][2][0][RTW89_WW][1][32] = 22, - [0][0][2][0][RTW89_WW][2][32] = 70, [0][0][2][0][RTW89_WW][0][34] = 22, [0][0][2][0][RTW89_WW][1][34] = 22, - [0][0][2][0][RTW89_WW][2][34] = 70, [0][0][2][0][RTW89_WW][0][36] = 22, [0][0][2][0][RTW89_WW][1][36] = 22, - [0][0][2][0][RTW89_WW][2][36] = 70, [0][0][2][0][RTW89_WW][0][38] = 22, [0][0][2][0][RTW89_WW][1][38] = 22, - [0][0][2][0][RTW89_WW][2][38] = 70, [0][0][2][0][RTW89_WW][0][40] = 22, [0][0][2][0][RTW89_WW][1][40] = 22, - [0][0][2][0][RTW89_WW][2][40] = 70, [0][0][2][0][RTW89_WW][0][42] = 22, [0][0][2][0][RTW89_WW][1][42] = 22, - [0][0][2][0][RTW89_WW][2][42] = 70, [0][0][2][0][RTW89_WW][0][44] = 22, [0][0][2][0][RTW89_WW][1][44] = 22, - [0][0][2][0][RTW89_WW][2][44] = 70, [0][0][2][0][RTW89_WW][0][45] = 22, [0][0][2][0][RTW89_WW][1][45] = 22, - [0][0][2][0][RTW89_WW][2][45] = 70, [0][0][2][0][RTW89_WW][0][47] = 22, [0][0][2][0][RTW89_WW][1][47] = 22, - [0][0][2][0][RTW89_WW][2][47] = 70, [0][0][2][0][RTW89_WW][0][49] = 24, [0][0][2][0][RTW89_WW][1][49] = 24, - [0][0][2][0][RTW89_WW][2][49] = 70, [0][0][2][0][RTW89_WW][0][51] = 22, [0][0][2][0][RTW89_WW][1][51] = 22, - [0][0][2][0][RTW89_WW][2][51] = 70, [0][0][2][0][RTW89_WW][0][53] = 22, [0][0][2][0][RTW89_WW][1][53] = 22, - [0][0][2][0][RTW89_WW][2][53] = 70, [0][0][2][0][RTW89_WW][0][55] = 22, [0][0][2][0][RTW89_WW][1][55] = 22, - [0][0][2][0][RTW89_WW][2][55] = 68, [0][0][2][0][RTW89_WW][0][57] = 22, [0][0][2][0][RTW89_WW][1][57] = 22, - [0][0][2][0][RTW89_WW][2][57] = 68, [0][0][2][0][RTW89_WW][0][59] = 22, [0][0][2][0][RTW89_WW][1][59] = 22, - [0][0][2][0][RTW89_WW][2][59] = 68, [0][0][2][0][RTW89_WW][0][60] = 22, [0][0][2][0][RTW89_WW][1][60] = 22, - [0][0][2][0][RTW89_WW][2][60] = 68, [0][0][2][0][RTW89_WW][0][62] = 22, [0][0][2][0][RTW89_WW][1][62] = 22, - [0][0][2][0][RTW89_WW][2][62] = 68, [0][0][2][0][RTW89_WW][0][64] = 22, [0][0][2][0][RTW89_WW][1][64] = 22, - [0][0][2][0][RTW89_WW][2][64] = 68, [0][0][2][0][RTW89_WW][0][66] = 22, [0][0][2][0][RTW89_WW][1][66] = 22, - [0][0][2][0][RTW89_WW][2][66] = 68, [0][0][2][0][RTW89_WW][0][68] = 22, [0][0][2][0][RTW89_WW][1][68] = 22, - [0][0][2][0][RTW89_WW][2][68] = 68, [0][0][2][0][RTW89_WW][0][70] = 24, [0][0][2][0][RTW89_WW][1][70] = 24, - [0][0][2][0][RTW89_WW][2][70] = 68, [0][0][2][0][RTW89_WW][0][72] = 22, [0][0][2][0][RTW89_WW][1][72] = 22, - [0][0][2][0][RTW89_WW][2][72] = 68, [0][0][2][0][RTW89_WW][0][74] = 22, [0][0][2][0][RTW89_WW][1][74] = 22, - [0][0][2][0][RTW89_WW][2][74] = 68, [0][0][2][0][RTW89_WW][0][75] = 22, [0][0][2][0][RTW89_WW][1][75] = 22, - [0][0][2][0][RTW89_WW][2][75] = 68, [0][0][2][0][RTW89_WW][0][77] = 22, [0][0][2][0][RTW89_WW][1][77] = 22, - [0][0][2][0][RTW89_WW][2][77] = 68, [0][0][2][0][RTW89_WW][0][79] = 22, [0][0][2][0][RTW89_WW][1][79] = 22, - [0][0][2][0][RTW89_WW][2][79] = 68, [0][0][2][0][RTW89_WW][0][81] = 22, [0][0][2][0][RTW89_WW][1][81] = 22, - [0][0][2][0][RTW89_WW][2][81] = 68, [0][0][2][0][RTW89_WW][0][83] = 22, [0][0][2][0][RTW89_WW][1][83] = 22, - [0][0][2][0][RTW89_WW][2][83] = 68, [0][0][2][0][RTW89_WW][0][85] = 22, [0][0][2][0][RTW89_WW][1][85] = 22, - [0][0][2][0][RTW89_WW][2][85] = 68, [0][0][2][0][RTW89_WW][0][87] = 22, [0][0][2][0][RTW89_WW][1][87] = 22, - [0][0][2][0][RTW89_WW][2][87] = 0, [0][0][2][0][RTW89_WW][0][89] = 22, [0][0][2][0][RTW89_WW][1][89] = 22, - [0][0][2][0][RTW89_WW][2][89] = 0, [0][0][2][0][RTW89_WW][0][90] = 22, [0][0][2][0][RTW89_WW][1][90] = 22, - [0][0][2][0][RTW89_WW][2][90] = 0, [0][0][2][0][RTW89_WW][0][92] = 22, [0][0][2][0][RTW89_WW][1][92] = 22, - [0][0][2][0][RTW89_WW][2][92] = 0, [0][0][2][0][RTW89_WW][0][94] = 22, [0][0][2][0][RTW89_WW][1][94] = 22, - [0][0][2][0][RTW89_WW][2][94] = 0, [0][0][2][0][RTW89_WW][0][96] = 22, [0][0][2][0][RTW89_WW][1][96] = 22, - [0][0][2][0][RTW89_WW][2][96] = 0, [0][0][2][0][RTW89_WW][0][98] = 22, [0][0][2][0][RTW89_WW][1][98] = 22, - [0][0][2][0][RTW89_WW][2][98] = 0, [0][0][2][0][RTW89_WW][0][100] = 22, [0][0][2][0][RTW89_WW][1][100] = 22, - [0][0][2][0][RTW89_WW][2][100] = 0, [0][0][2][0][RTW89_WW][0][102] = 22, [0][0][2][0][RTW89_WW][1][102] = 22, - [0][0][2][0][RTW89_WW][2][102] = 0, [0][0][2][0][RTW89_WW][0][104] = 22, [0][0][2][0][RTW89_WW][1][104] = 22, - [0][0][2][0][RTW89_WW][2][104] = 0, [0][0][2][0][RTW89_WW][0][105] = 22, [0][0][2][0][RTW89_WW][1][105] = 22, - [0][0][2][0][RTW89_WW][2][105] = 0, [0][0][2][0][RTW89_WW][0][107] = 24, [0][0][2][0][RTW89_WW][1][107] = 24, - [0][0][2][0][RTW89_WW][2][107] = 0, [0][0][2][0][RTW89_WW][0][109] = 24, [0][0][2][0][RTW89_WW][1][109] = 24, - [0][0][2][0][RTW89_WW][2][109] = 0, [0][0][2][0][RTW89_WW][0][111] = 0, [0][0][2][0][RTW89_WW][1][111] = 0, - [0][0][2][0][RTW89_WW][2][111] = 0, [0][0][2][0][RTW89_WW][0][113] = 0, [0][0][2][0][RTW89_WW][1][113] = 0, - [0][0][2][0][RTW89_WW][2][113] = 0, [0][0][2][0][RTW89_WW][0][115] = 0, [0][0][2][0][RTW89_WW][1][115] = 0, - [0][0][2][0][RTW89_WW][2][115] = 0, [0][0][2][0][RTW89_WW][0][117] = 0, [0][0][2][0][RTW89_WW][1][117] = 0, - [0][0][2][0][RTW89_WW][2][117] = 0, [0][0][2][0][RTW89_WW][0][119] = 0, [0][0][2][0][RTW89_WW][1][119] = 0, - [0][0][2][0][RTW89_WW][2][119] = 0, [0][1][2][0][RTW89_WW][0][0] = -2, [0][1][2][0][RTW89_WW][1][0] = -2, - [0][1][2][0][RTW89_WW][2][0] = 54, [0][1][2][0][RTW89_WW][0][2] = -4, [0][1][2][0][RTW89_WW][1][2] = -4, - [0][1][2][0][RTW89_WW][2][2] = 54, [0][1][2][0][RTW89_WW][0][4] = -4, [0][1][2][0][RTW89_WW][1][4] = -4, - [0][1][2][0][RTW89_WW][2][4] = 54, [0][1][2][0][RTW89_WW][0][6] = -4, [0][1][2][0][RTW89_WW][1][6] = -4, - [0][1][2][0][RTW89_WW][2][6] = 54, [0][1][2][0][RTW89_WW][0][8] = -4, [0][1][2][0][RTW89_WW][1][8] = -4, - [0][1][2][0][RTW89_WW][2][8] = 54, [0][1][2][0][RTW89_WW][0][10] = -4, [0][1][2][0][RTW89_WW][1][10] = -4, - [0][1][2][0][RTW89_WW][2][10] = 54, [0][1][2][0][RTW89_WW][0][12] = -4, [0][1][2][0][RTW89_WW][1][12] = -4, - [0][1][2][0][RTW89_WW][2][12] = 54, [0][1][2][0][RTW89_WW][0][14] = -4, [0][1][2][0][RTW89_WW][1][14] = -4, - [0][1][2][0][RTW89_WW][2][14] = 54, [0][1][2][0][RTW89_WW][0][15] = -4, [0][1][2][0][RTW89_WW][1][15] = -4, - [0][1][2][0][RTW89_WW][2][15] = 54, [0][1][2][0][RTW89_WW][0][17] = -4, [0][1][2][0][RTW89_WW][1][17] = -4, - [0][1][2][0][RTW89_WW][2][17] = 54, [0][1][2][0][RTW89_WW][0][19] = -4, [0][1][2][0][RTW89_WW][1][19] = -4, - [0][1][2][0][RTW89_WW][2][19] = 54, [0][1][2][0][RTW89_WW][0][21] = -4, [0][1][2][0][RTW89_WW][1][21] = -4, - [0][1][2][0][RTW89_WW][2][21] = 54, [0][1][2][0][RTW89_WW][0][23] = -4, [0][1][2][0][RTW89_WW][1][23] = -4, - [0][1][2][0][RTW89_WW][2][23] = 68, [0][1][2][0][RTW89_WW][0][25] = -4, [0][1][2][0][RTW89_WW][1][25] = -4, - [0][1][2][0][RTW89_WW][2][25] = 68, [0][1][2][0][RTW89_WW][0][27] = -4, [0][1][2][0][RTW89_WW][1][27] = -4, - [0][1][2][0][RTW89_WW][2][27] = 68, [0][1][2][0][RTW89_WW][0][29] = -4, [0][1][2][0][RTW89_WW][1][29] = -4, - [0][1][2][0][RTW89_WW][2][29] = 68, [0][1][2][0][RTW89_WW][0][30] = -4, [0][1][2][0][RTW89_WW][1][30] = -4, - [0][1][2][0][RTW89_WW][2][30] = 68, [0][1][2][0][RTW89_WW][0][32] = -4, [0][1][2][0][RTW89_WW][1][32] = -4, - [0][1][2][0][RTW89_WW][2][32] = 68, [0][1][2][0][RTW89_WW][0][34] = -4, [0][1][2][0][RTW89_WW][1][34] = -4, - [0][1][2][0][RTW89_WW][2][34] = 68, [0][1][2][0][RTW89_WW][0][36] = -4, [0][1][2][0][RTW89_WW][1][36] = -4, - [0][1][2][0][RTW89_WW][2][36] = 68, [0][1][2][0][RTW89_WW][0][38] = -4, [0][1][2][0][RTW89_WW][1][38] = -4, - [0][1][2][0][RTW89_WW][2][38] = 68, [0][1][2][0][RTW89_WW][0][40] = -4, [0][1][2][0][RTW89_WW][1][40] = -4, - [0][1][2][0][RTW89_WW][2][40] = 68, [0][1][2][0][RTW89_WW][0][42] = -4, [0][1][2][0][RTW89_WW][1][42] = -4, - [0][1][2][0][RTW89_WW][2][42] = 68, [0][1][2][0][RTW89_WW][0][44] = -2, [0][1][2][0][RTW89_WW][1][44] = -2, - [0][1][2][0][RTW89_WW][2][44] = 68, [0][1][2][0][RTW89_WW][0][45] = -2, [0][1][2][0][RTW89_WW][1][45] = -2, - [0][1][2][0][RTW89_WW][2][45] = 70, [0][1][2][0][RTW89_WW][0][47] = -2, [0][1][2][0][RTW89_WW][1][47] = -2, - [0][1][2][0][RTW89_WW][2][47] = 68, [0][1][2][0][RTW89_WW][0][49] = -2, [0][1][2][0][RTW89_WW][1][49] = -2, - [0][1][2][0][RTW89_WW][2][49] = 68, [0][1][2][0][RTW89_WW][0][51] = -2, [0][1][2][0][RTW89_WW][1][51] = -2, - [0][1][2][0][RTW89_WW][2][51] = 68, [0][1][2][0][RTW89_WW][0][53] = -2, [0][1][2][0][RTW89_WW][1][53] = -2, - [0][1][2][0][RTW89_WW][2][53] = 68, [0][1][2][0][RTW89_WW][0][55] = -2, [0][1][2][0][RTW89_WW][1][55] = -2, - [0][1][2][0][RTW89_WW][2][55] = 68, [0][1][2][0][RTW89_WW][0][57] = -2, [0][1][2][0][RTW89_WW][1][57] = -2, - [0][1][2][0][RTW89_WW][2][57] = 68, [0][1][2][0][RTW89_WW][0][59] = -2, [0][1][2][0][RTW89_WW][1][59] = -2, - [0][1][2][0][RTW89_WW][2][59] = 68, [0][1][2][0][RTW89_WW][0][60] = -2, [0][1][2][0][RTW89_WW][1][60] = -2, - [0][1][2][0][RTW89_WW][2][60] = 68, [0][1][2][0][RTW89_WW][0][62] = -2, [0][1][2][0][RTW89_WW][1][62] = -2, - [0][1][2][0][RTW89_WW][2][62] = 68, [0][1][2][0][RTW89_WW][0][64] = -2, [0][1][2][0][RTW89_WW][1][64] = -2, - [0][1][2][0][RTW89_WW][2][64] = 68, [0][1][2][0][RTW89_WW][0][66] = -2, [0][1][2][0][RTW89_WW][1][66] = -2, - [0][1][2][0][RTW89_WW][2][66] = 68, [0][1][2][0][RTW89_WW][0][68] = -2, [0][1][2][0][RTW89_WW][1][68] = -2, - [0][1][2][0][RTW89_WW][2][68] = 68, [0][1][2][0][RTW89_WW][0][70] = -2, [0][1][2][0][RTW89_WW][1][70] = -2, - [0][1][2][0][RTW89_WW][2][70] = 68, [0][1][2][0][RTW89_WW][0][72] = -2, [0][1][2][0][RTW89_WW][1][72] = -2, - [0][1][2][0][RTW89_WW][2][72] = 68, [0][1][2][0][RTW89_WW][0][74] = -2, [0][1][2][0][RTW89_WW][1][74] = -2, - [0][1][2][0][RTW89_WW][2][74] = 68, [0][1][2][0][RTW89_WW][0][75] = -2, [0][1][2][0][RTW89_WW][1][75] = -2, - [0][1][2][0][RTW89_WW][2][75] = 68, [0][1][2][0][RTW89_WW][0][77] = -2, [0][1][2][0][RTW89_WW][1][77] = -2, - [0][1][2][0][RTW89_WW][2][77] = 68, [0][1][2][0][RTW89_WW][0][79] = -2, [0][1][2][0][RTW89_WW][1][79] = -2, - [0][1][2][0][RTW89_WW][2][79] = 68, [0][1][2][0][RTW89_WW][0][81] = -2, [0][1][2][0][RTW89_WW][1][81] = -2, - [0][1][2][0][RTW89_WW][2][81] = 68, [0][1][2][0][RTW89_WW][0][83] = -2, [0][1][2][0][RTW89_WW][1][83] = -2, - [0][1][2][0][RTW89_WW][2][83] = 68, [0][1][2][0][RTW89_WW][0][85] = -2, [0][1][2][0][RTW89_WW][1][85] = -2, - [0][1][2][0][RTW89_WW][2][85] = 68, [0][1][2][0][RTW89_WW][0][87] = -2, [0][1][2][0][RTW89_WW][1][87] = -2, - [0][1][2][0][RTW89_WW][2][87] = 0, [0][1][2][0][RTW89_WW][0][89] = -2, [0][1][2][0][RTW89_WW][1][89] = -2, - [0][1][2][0][RTW89_WW][2][89] = 0, [0][1][2][0][RTW89_WW][0][90] = -2, [0][1][2][0][RTW89_WW][1][90] = -2, - [0][1][2][0][RTW89_WW][2][90] = 0, [0][1][2][0][RTW89_WW][0][92] = -2, [0][1][2][0][RTW89_WW][1][92] = -2, - [0][1][2][0][RTW89_WW][2][92] = 0, [0][1][2][0][RTW89_WW][0][94] = -2, [0][1][2][0][RTW89_WW][1][94] = -2, - [0][1][2][0][RTW89_WW][2][94] = 0, [0][1][2][0][RTW89_WW][0][96] = -2, [0][1][2][0][RTW89_WW][1][96] = -2, - [0][1][2][0][RTW89_WW][2][96] = 0, [0][1][2][0][RTW89_WW][0][98] = -2, [0][1][2][0][RTW89_WW][1][98] = -2, - [0][1][2][0][RTW89_WW][2][98] = 0, [0][1][2][0][RTW89_WW][0][100] = -2, [0][1][2][0][RTW89_WW][1][100] = -2, - [0][1][2][0][RTW89_WW][2][100] = 0, [0][1][2][0][RTW89_WW][0][102] = -2, [0][1][2][0][RTW89_WW][1][102] = -2, - [0][1][2][0][RTW89_WW][2][102] = 0, [0][1][2][0][RTW89_WW][0][104] = -2, [0][1][2][0][RTW89_WW][1][104] = -2, - [0][1][2][0][RTW89_WW][2][104] = 0, [0][1][2][0][RTW89_WW][0][105] = -2, [0][1][2][0][RTW89_WW][1][105] = -2, - [0][1][2][0][RTW89_WW][2][105] = 0, [0][1][2][0][RTW89_WW][0][107] = 1, [0][1][2][0][RTW89_WW][1][107] = 1, - [0][1][2][0][RTW89_WW][2][107] = 0, [0][1][2][0][RTW89_WW][0][109] = 1, [0][1][2][0][RTW89_WW][1][109] = 1, - [0][1][2][0][RTW89_WW][2][109] = 0, [0][1][2][0][RTW89_WW][0][111] = 0, [0][1][2][0][RTW89_WW][1][111] = 0, - [0][1][2][0][RTW89_WW][2][111] = 0, [0][1][2][0][RTW89_WW][0][113] = 0, [0][1][2][0][RTW89_WW][1][113] = 0, - [0][1][2][0][RTW89_WW][2][113] = 0, [0][1][2][0][RTW89_WW][0][115] = 0, [0][1][2][0][RTW89_WW][1][115] = 0, - [0][1][2][0][RTW89_WW][2][115] = 0, [0][1][2][0][RTW89_WW][0][117] = 0, [0][1][2][0][RTW89_WW][1][117] = 0, - [0][1][2][0][RTW89_WW][2][117] = 0, [0][1][2][0][RTW89_WW][0][119] = 0, [0][1][2][0][RTW89_WW][1][119] = 0, - [0][1][2][0][RTW89_WW][2][119] = 0, [0][1][2][1][RTW89_WW][0][0] = -2, [0][1][2][1][RTW89_WW][1][0] = -2, - [0][1][2][1][RTW89_WW][2][0] = 54, [0][1][2][1][RTW89_WW][0][2] = -4, [0][1][2][1][RTW89_WW][1][2] = -4, - [0][1][2][1][RTW89_WW][2][2] = 54, [0][1][2][1][RTW89_WW][0][4] = -4, [0][1][2][1][RTW89_WW][1][4] = -4, - [0][1][2][1][RTW89_WW][2][4] = 54, [0][1][2][1][RTW89_WW][0][6] = -4, [0][1][2][1][RTW89_WW][1][6] = -4, - [0][1][2][1][RTW89_WW][2][6] = 54, [0][1][2][1][RTW89_WW][0][8] = -4, [0][1][2][1][RTW89_WW][1][8] = -4, - [0][1][2][1][RTW89_WW][2][8] = 54, [0][1][2][1][RTW89_WW][0][10] = -4, [0][1][2][1][RTW89_WW][1][10] = -4, - [0][1][2][1][RTW89_WW][2][10] = 54, [0][1][2][1][RTW89_WW][0][12] = -4, [0][1][2][1][RTW89_WW][1][12] = -4, - [0][1][2][1][RTW89_WW][2][12] = 54, [0][1][2][1][RTW89_WW][0][14] = -4, [0][1][2][1][RTW89_WW][1][14] = -4, - [0][1][2][1][RTW89_WW][2][14] = 54, [0][1][2][1][RTW89_WW][0][15] = -4, [0][1][2][1][RTW89_WW][1][15] = -4, - [0][1][2][1][RTW89_WW][2][15] = 54, [0][1][2][1][RTW89_WW][0][17] = -4, [0][1][2][1][RTW89_WW][1][17] = -4, - [0][1][2][1][RTW89_WW][2][17] = 54, [0][1][2][1][RTW89_WW][0][19] = -4, [0][1][2][1][RTW89_WW][1][19] = -4, - [0][1][2][1][RTW89_WW][2][19] = 54, [0][1][2][1][RTW89_WW][0][21] = -4, [0][1][2][1][RTW89_WW][1][21] = -4, - [0][1][2][1][RTW89_WW][2][21] = 54, [0][1][2][1][RTW89_WW][0][23] = -4, [0][1][2][1][RTW89_WW][1][23] = -4, - [0][1][2][1][RTW89_WW][2][23] = 68, [0][1][2][1][RTW89_WW][0][25] = -4, [0][1][2][1][RTW89_WW][1][25] = -4, - [0][1][2][1][RTW89_WW][2][25] = 68, [0][1][2][1][RTW89_WW][0][27] = -4, [0][1][2][1][RTW89_WW][1][27] = -4, - [0][1][2][1][RTW89_WW][2][27] = 68, [0][1][2][1][RTW89_WW][0][29] = -4, [0][1][2][1][RTW89_WW][1][29] = -4, - [0][1][2][1][RTW89_WW][2][29] = 68, [0][1][2][1][RTW89_WW][0][30] = -4, [0][1][2][1][RTW89_WW][1][30] = -4, - [0][1][2][1][RTW89_WW][2][30] = 68, [0][1][2][1][RTW89_WW][0][32] = -4, [0][1][2][1][RTW89_WW][1][32] = -4, - [0][1][2][1][RTW89_WW][2][32] = 68, [0][1][2][1][RTW89_WW][0][34] = -4, [0][1][2][1][RTW89_WW][1][34] = -4, - [0][1][2][1][RTW89_WW][2][34] = 68, [0][1][2][1][RTW89_WW][0][36] = -4, [0][1][2][1][RTW89_WW][1][36] = -4, - [0][1][2][1][RTW89_WW][2][36] = 68, [0][1][2][1][RTW89_WW][0][38] = -4, [0][1][2][1][RTW89_WW][1][38] = -4, - [0][1][2][1][RTW89_WW][2][38] = 68, [0][1][2][1][RTW89_WW][0][40] = -4, [0][1][2][1][RTW89_WW][1][40] = -4, - [0][1][2][1][RTW89_WW][2][40] = 68, [0][1][2][1][RTW89_WW][0][42] = -4, [0][1][2][1][RTW89_WW][1][42] = -4, - [0][1][2][1][RTW89_WW][2][42] = 68, [0][1][2][1][RTW89_WW][0][44] = -2, [0][1][2][1][RTW89_WW][1][44] = -2, - [0][1][2][1][RTW89_WW][2][44] = 68, [0][1][2][1][RTW89_WW][0][45] = -2, [0][1][2][1][RTW89_WW][1][45] = -2, - [0][1][2][1][RTW89_WW][2][45] = 70, [0][1][2][1][RTW89_WW][0][47] = -2, [0][1][2][1][RTW89_WW][1][47] = -2, - [0][1][2][1][RTW89_WW][2][47] = 68, [0][1][2][1][RTW89_WW][0][49] = -2, [0][1][2][1][RTW89_WW][1][49] = -2, - [0][1][2][1][RTW89_WW][2][49] = 68, [0][1][2][1][RTW89_WW][0][51] = -2, [0][1][2][1][RTW89_WW][1][51] = -2, - [0][1][2][1][RTW89_WW][2][51] = 68, [0][1][2][1][RTW89_WW][0][53] = -2, [0][1][2][1][RTW89_WW][1][53] = -2, - [0][1][2][1][RTW89_WW][2][53] = 68, [0][1][2][1][RTW89_WW][0][55] = -2, [0][1][2][1][RTW89_WW][1][55] = -2, - [0][1][2][1][RTW89_WW][2][55] = 68, [0][1][2][1][RTW89_WW][0][57] = -2, [0][1][2][1][RTW89_WW][1][57] = -2, - [0][1][2][1][RTW89_WW][2][57] = 68, [0][1][2][1][RTW89_WW][0][59] = -2, [0][1][2][1][RTW89_WW][1][59] = -2, - [0][1][2][1][RTW89_WW][2][59] = 68, [0][1][2][1][RTW89_WW][0][60] = -2, [0][1][2][1][RTW89_WW][1][60] = -2, - [0][1][2][1][RTW89_WW][2][60] = 68, [0][1][2][1][RTW89_WW][0][62] = -2, [0][1][2][1][RTW89_WW][1][62] = -2, - [0][1][2][1][RTW89_WW][2][62] = 68, [0][1][2][1][RTW89_WW][0][64] = -2, [0][1][2][1][RTW89_WW][1][64] = -2, - [0][1][2][1][RTW89_WW][2][64] = 68, [0][1][2][1][RTW89_WW][0][66] = -2, [0][1][2][1][RTW89_WW][1][66] = -2, - [0][1][2][1][RTW89_WW][2][66] = 68, [0][1][2][1][RTW89_WW][0][68] = -2, [0][1][2][1][RTW89_WW][1][68] = -2, - [0][1][2][1][RTW89_WW][2][68] = 68, [0][1][2][1][RTW89_WW][0][70] = -2, [0][1][2][1][RTW89_WW][1][70] = -2, - [0][1][2][1][RTW89_WW][2][70] = 68, [0][1][2][1][RTW89_WW][0][72] = -2, [0][1][2][1][RTW89_WW][1][72] = -2, - [0][1][2][1][RTW89_WW][2][72] = 68, [0][1][2][1][RTW89_WW][0][74] = -2, [0][1][2][1][RTW89_WW][1][74] = -2, - [0][1][2][1][RTW89_WW][2][74] = 68, [0][1][2][1][RTW89_WW][0][75] = -2, [0][1][2][1][RTW89_WW][1][75] = -2, - [0][1][2][1][RTW89_WW][2][75] = 68, [0][1][2][1][RTW89_WW][0][77] = -2, [0][1][2][1][RTW89_WW][1][77] = -2, - [0][1][2][1][RTW89_WW][2][77] = 68, [0][1][2][1][RTW89_WW][0][79] = -2, [0][1][2][1][RTW89_WW][1][79] = -2, - [0][1][2][1][RTW89_WW][2][79] = 68, [0][1][2][1][RTW89_WW][0][81] = -2, [0][1][2][1][RTW89_WW][1][81] = -2, - [0][1][2][1][RTW89_WW][2][81] = 68, [0][1][2][1][RTW89_WW][0][83] = -2, [0][1][2][1][RTW89_WW][1][83] = -2, - [0][1][2][1][RTW89_WW][2][83] = 68, [0][1][2][1][RTW89_WW][0][85] = -2, [0][1][2][1][RTW89_WW][1][85] = -2, - [0][1][2][1][RTW89_WW][2][85] = 68, [0][1][2][1][RTW89_WW][0][87] = -2, [0][1][2][1][RTW89_WW][1][87] = -2, - [0][1][2][1][RTW89_WW][2][87] = 0, [0][1][2][1][RTW89_WW][0][89] = -2, [0][1][2][1][RTW89_WW][1][89] = -2, - [0][1][2][1][RTW89_WW][2][89] = 0, [0][1][2][1][RTW89_WW][0][90] = -2, [0][1][2][1][RTW89_WW][1][90] = -2, - [0][1][2][1][RTW89_WW][2][90] = 0, [0][1][2][1][RTW89_WW][0][92] = -2, [0][1][2][1][RTW89_WW][1][92] = -2, - [0][1][2][1][RTW89_WW][2][92] = 0, [0][1][2][1][RTW89_WW][0][94] = -2, [0][1][2][1][RTW89_WW][1][94] = -2, - [0][1][2][1][RTW89_WW][2][94] = 0, [0][1][2][1][RTW89_WW][0][96] = -2, [0][1][2][1][RTW89_WW][1][96] = -2, - [0][1][2][1][RTW89_WW][2][96] = 0, [0][1][2][1][RTW89_WW][0][98] = -2, [0][1][2][1][RTW89_WW][1][98] = -2, - [0][1][2][1][RTW89_WW][2][98] = 0, [0][1][2][1][RTW89_WW][0][100] = -2, [0][1][2][1][RTW89_WW][1][100] = -2, - [0][1][2][1][RTW89_WW][2][100] = 0, [0][1][2][1][RTW89_WW][0][102] = -2, [0][1][2][1][RTW89_WW][1][102] = -2, - [0][1][2][1][RTW89_WW][2][102] = 0, [0][1][2][1][RTW89_WW][0][104] = -2, [0][1][2][1][RTW89_WW][1][104] = -2, - [0][1][2][1][RTW89_WW][2][104] = 0, [0][1][2][1][RTW89_WW][0][105] = -2, [0][1][2][1][RTW89_WW][1][105] = -2, - [0][1][2][1][RTW89_WW][2][105] = 0, [0][1][2][1][RTW89_WW][0][107] = 1, [0][1][2][1][RTW89_WW][1][107] = 1, - [0][1][2][1][RTW89_WW][2][107] = 0, [0][1][2][1][RTW89_WW][0][109] = 1, [0][1][2][1][RTW89_WW][1][109] = 1, - [0][1][2][1][RTW89_WW][2][109] = 0, [0][1][2][1][RTW89_WW][0][111] = 0, [0][1][2][1][RTW89_WW][1][111] = 0, - [0][1][2][1][RTW89_WW][2][111] = 0, [0][1][2][1][RTW89_WW][0][113] = 0, [0][1][2][1][RTW89_WW][1][113] = 0, - [0][1][2][1][RTW89_WW][2][113] = 0, [0][1][2][1][RTW89_WW][0][115] = 0, [0][1][2][1][RTW89_WW][1][115] = 0, - [0][1][2][1][RTW89_WW][2][115] = 0, [0][1][2][1][RTW89_WW][0][117] = 0, [0][1][2][1][RTW89_WW][1][117] = 0, - [0][1][2][1][RTW89_WW][2][117] = 0, [0][1][2][1][RTW89_WW][0][119] = 0, [0][1][2][1][RTW89_WW][1][119] = 0, - [0][1][2][1][RTW89_WW][2][119] = 0, [1][0][2][0][RTW89_WW][0][1] = 24, [1][0][2][0][RTW89_WW][1][1] = 34, - [1][0][2][0][RTW89_WW][2][1] = 70, [1][0][2][0][RTW89_WW][0][5] = 24, [1][0][2][0][RTW89_WW][1][5] = 34, - [1][0][2][0][RTW89_WW][2][5] = 70, [1][0][2][0][RTW89_WW][0][9] = 24, [1][0][2][0][RTW89_WW][1][9] = 34, - [1][0][2][0][RTW89_WW][2][9] = 70, [1][0][2][0][RTW89_WW][0][13] = 24, [1][0][2][0][RTW89_WW][1][13] = 34, - [1][0][2][0][RTW89_WW][2][13] = 70, [1][0][2][0][RTW89_WW][0][16] = 24, [1][0][2][0][RTW89_WW][1][16] = 34, - [1][0][2][0][RTW89_WW][2][16] = 70, [1][0][2][0][RTW89_WW][0][20] = 24, [1][0][2][0][RTW89_WW][1][20] = 34, - [1][0][2][0][RTW89_WW][2][20] = 70, [1][0][2][0][RTW89_WW][0][24] = 26, [1][0][2][0][RTW89_WW][1][24] = 36, - [1][0][2][0][RTW89_WW][2][24] = 70, [1][0][2][0][RTW89_WW][0][28] = 26, [1][0][2][0][RTW89_WW][1][28] = 34, - [1][0][2][0][RTW89_WW][2][28] = 70, [1][0][2][0][RTW89_WW][0][31] = 26, [1][0][2][0][RTW89_WW][1][31] = 34, - [1][0][2][0][RTW89_WW][2][31] = 70, [1][0][2][0][RTW89_WW][0][35] = 26, [1][0][2][0][RTW89_WW][1][35] = 34, - [1][0][2][0][RTW89_WW][2][35] = 70, [1][0][2][0][RTW89_WW][0][39] = 26, [1][0][2][0][RTW89_WW][1][39] = 34, - [1][0][2][0][RTW89_WW][2][39] = 70, [1][0][2][0][RTW89_WW][0][43] = 26, [1][0][2][0][RTW89_WW][1][43] = 34, - [1][0][2][0][RTW89_WW][2][43] = 70, [1][0][2][0][RTW89_WW][0][46] = 34, [1][0][2][0][RTW89_WW][1][46] = 34, - [1][0][2][0][RTW89_WW][2][46] = 68, [1][0][2][0][RTW89_WW][0][50] = 34, [1][0][2][0][RTW89_WW][1][50] = 34, - [1][0][2][0][RTW89_WW][2][50] = 68, [1][0][2][0][RTW89_WW][0][54] = 36, [1][0][2][0][RTW89_WW][1][54] = 36, - [1][0][2][0][RTW89_WW][2][54] = 0, [1][0][2][0][RTW89_WW][0][58] = 36, [1][0][2][0][RTW89_WW][1][58] = 36, - [1][0][2][0][RTW89_WW][2][58] = 66, [1][0][2][0][RTW89_WW][0][61] = 34, [1][0][2][0][RTW89_WW][1][61] = 34, - [1][0][2][0][RTW89_WW][2][61] = 66, [1][0][2][0][RTW89_WW][0][65] = 34, [1][0][2][0][RTW89_WW][1][65] = 34, - [1][0][2][0][RTW89_WW][2][65] = 66, [1][0][2][0][RTW89_WW][0][69] = 34, [1][0][2][0][RTW89_WW][1][69] = 34, - [1][0][2][0][RTW89_WW][2][69] = 66, [1][0][2][0][RTW89_WW][0][73] = 34, [1][0][2][0][RTW89_WW][1][73] = 34, - [1][0][2][0][RTW89_WW][2][73] = 66, [1][0][2][0][RTW89_WW][0][76] = 34, [1][0][2][0][RTW89_WW][1][76] = 34, - [1][0][2][0][RTW89_WW][2][76] = 66, [1][0][2][0][RTW89_WW][0][80] = 34, [1][0][2][0][RTW89_WW][1][80] = 34, - [1][0][2][0][RTW89_WW][2][80] = 66, [1][0][2][0][RTW89_WW][0][84] = 34, [1][0][2][0][RTW89_WW][1][84] = 34, - [1][0][2][0][RTW89_WW][2][84] = 66, [1][0][2][0][RTW89_WW][0][88] = 34, [1][0][2][0][RTW89_WW][1][88] = 34, - [1][0][2][0][RTW89_WW][2][88] = 0, [1][0][2][0][RTW89_WW][0][91] = 36, [1][0][2][0][RTW89_WW][1][91] = 36, - [1][0][2][0][RTW89_WW][2][91] = 0, [1][0][2][0][RTW89_WW][0][95] = 34, [1][0][2][0][RTW89_WW][1][95] = 34, - [1][0][2][0][RTW89_WW][2][95] = 0, [1][0][2][0][RTW89_WW][0][99] = 34, [1][0][2][0][RTW89_WW][1][99] = 34, - [1][0][2][0][RTW89_WW][2][99] = 0, [1][0][2][0][RTW89_WW][0][103] = 34, [1][0][2][0][RTW89_WW][1][103] = 34, - [1][0][2][0][RTW89_WW][2][103] = 0, [1][0][2][0][RTW89_WW][0][106] = 36, [1][0][2][0][RTW89_WW][1][106] = 36, - [1][0][2][0][RTW89_WW][2][106] = 0, [1][0][2][0][RTW89_WW][0][110] = 0, [1][0][2][0][RTW89_WW][1][110] = 0, - [1][0][2][0][RTW89_WW][2][110] = 0, [1][0][2][0][RTW89_WW][0][114] = 0, [1][0][2][0][RTW89_WW][1][114] = 0, - [1][0][2][0][RTW89_WW][2][114] = 0, [1][0][2][0][RTW89_WW][0][118] = 0, [1][0][2][0][RTW89_WW][1][118] = 0, - [1][0][2][0][RTW89_WW][2][118] = 0, [1][1][2][0][RTW89_WW][0][1] = 10, [1][1][2][0][RTW89_WW][1][1] = 10, - [1][1][2][0][RTW89_WW][2][1] = 58, [1][1][2][0][RTW89_WW][0][5] = 10, [1][1][2][0][RTW89_WW][1][5] = 10, - [1][1][2][0][RTW89_WW][2][5] = 58, [1][1][2][0][RTW89_WW][0][9] = 10, [1][1][2][0][RTW89_WW][1][9] = 10, - [1][1][2][0][RTW89_WW][2][9] = 58, [1][1][2][0][RTW89_WW][0][13] = 10, [1][1][2][0][RTW89_WW][1][13] = 10, - [1][1][2][0][RTW89_WW][2][13] = 58, [1][1][2][0][RTW89_WW][0][16] = 10, [1][1][2][0][RTW89_WW][1][16] = 10, - [1][1][2][0][RTW89_WW][2][16] = 58, [1][1][2][0][RTW89_WW][0][20] = 10, [1][1][2][0][RTW89_WW][1][20] = 10, - [1][1][2][0][RTW89_WW][2][20] = 58, [1][1][2][0][RTW89_WW][0][24] = 10, [1][1][2][0][RTW89_WW][1][24] = 10, - [1][1][2][0][RTW89_WW][2][24] = 70, [1][1][2][0][RTW89_WW][0][28] = 10, [1][1][2][0][RTW89_WW][1][28] = 10, - [1][1][2][0][RTW89_WW][2][28] = 70, [1][1][2][0][RTW89_WW][0][31] = 10, [1][1][2][0][RTW89_WW][1][31] = 10, - [1][1][2][0][RTW89_WW][2][31] = 70, [1][1][2][0][RTW89_WW][0][35] = 10, [1][1][2][0][RTW89_WW][1][35] = 10, - [1][1][2][0][RTW89_WW][2][35] = 70, [1][1][2][0][RTW89_WW][0][39] = 10, [1][1][2][0][RTW89_WW][1][39] = 10, - [1][1][2][0][RTW89_WW][2][39] = 70, [1][1][2][0][RTW89_WW][0][43] = 10, [1][1][2][0][RTW89_WW][1][43] = 10, - [1][1][2][0][RTW89_WW][2][43] = 70, [1][1][2][0][RTW89_WW][0][46] = 12, [1][1][2][0][RTW89_WW][1][46] = 12, - [1][1][2][0][RTW89_WW][2][46] = 68, [1][1][2][0][RTW89_WW][0][50] = 12, [1][1][2][0][RTW89_WW][1][50] = 12, - [1][1][2][0][RTW89_WW][2][50] = 68, [1][1][2][0][RTW89_WW][0][54] = 10, [1][1][2][0][RTW89_WW][1][54] = 10, - [1][1][2][0][RTW89_WW][2][54] = 0, [1][1][2][0][RTW89_WW][0][58] = 10, [1][1][2][0][RTW89_WW][1][58] = 10, - [1][1][2][0][RTW89_WW][2][58] = 66, [1][1][2][0][RTW89_WW][0][61] = 10, [1][1][2][0][RTW89_WW][1][61] = 10, - [1][1][2][0][RTW89_WW][2][61] = 66, [1][1][2][0][RTW89_WW][0][65] = 10, [1][1][2][0][RTW89_WW][1][65] = 10, - [1][1][2][0][RTW89_WW][2][65] = 66, [1][1][2][0][RTW89_WW][0][69] = 10, [1][1][2][0][RTW89_WW][1][69] = 10, - [1][1][2][0][RTW89_WW][2][69] = 66, [1][1][2][0][RTW89_WW][0][73] = 10, [1][1][2][0][RTW89_WW][1][73] = 10, - [1][1][2][0][RTW89_WW][2][73] = 66, [1][1][2][0][RTW89_WW][0][76] = 10, [1][1][2][0][RTW89_WW][1][76] = 10, - [1][1][2][0][RTW89_WW][2][76] = 66, [1][1][2][0][RTW89_WW][0][80] = 10, [1][1][2][0][RTW89_WW][1][80] = 10, - [1][1][2][0][RTW89_WW][2][80] = 66, [1][1][2][0][RTW89_WW][0][84] = 10, [1][1][2][0][RTW89_WW][1][84] = 10, - [1][1][2][0][RTW89_WW][2][84] = 66, [1][1][2][0][RTW89_WW][0][88] = 10, [1][1][2][0][RTW89_WW][1][88] = 10, - [1][1][2][0][RTW89_WW][2][88] = 0, [1][1][2][0][RTW89_WW][0][91] = 12, [1][1][2][0][RTW89_WW][1][91] = 12, - [1][1][2][0][RTW89_WW][2][91] = 0, [1][1][2][0][RTW89_WW][0][95] = 10, [1][1][2][0][RTW89_WW][1][95] = 10, - [1][1][2][0][RTW89_WW][2][95] = 0, [1][1][2][0][RTW89_WW][0][99] = 10, [1][1][2][0][RTW89_WW][1][99] = 10, - [1][1][2][0][RTW89_WW][2][99] = 0, [1][1][2][0][RTW89_WW][0][103] = 10, [1][1][2][0][RTW89_WW][1][103] = 10, - [1][1][2][0][RTW89_WW][2][103] = 0, [1][1][2][0][RTW89_WW][0][106] = 12, [1][1][2][0][RTW89_WW][1][106] = 12, - [1][1][2][0][RTW89_WW][2][106] = 0, [1][1][2][0][RTW89_WW][0][110] = 0, [1][1][2][0][RTW89_WW][1][110] = 0, - [1][1][2][0][RTW89_WW][2][110] = 0, [1][1][2][0][RTW89_WW][0][114] = 0, [1][1][2][0][RTW89_WW][1][114] = 0, - [1][1][2][0][RTW89_WW][2][114] = 0, [1][1][2][0][RTW89_WW][0][118] = 0, [1][1][2][0][RTW89_WW][1][118] = 0, - [1][1][2][0][RTW89_WW][2][118] = 0, [1][1][2][1][RTW89_WW][0][1] = 6, [1][1][2][1][RTW89_WW][1][1] = 10, - [1][1][2][1][RTW89_WW][2][1] = 58, [1][1][2][1][RTW89_WW][0][5] = 6, [1][1][2][1][RTW89_WW][1][5] = 10, - [1][1][2][1][RTW89_WW][2][5] = 58, [1][1][2][1][RTW89_WW][0][9] = 6, [1][1][2][1][RTW89_WW][1][9] = 10, - [1][1][2][1][RTW89_WW][2][9] = 58, [1][1][2][1][RTW89_WW][0][13] = 6, [1][1][2][1][RTW89_WW][1][13] = 10, - [1][1][2][1][RTW89_WW][2][13] = 58, [1][1][2][1][RTW89_WW][0][16] = 6, [1][1][2][1][RTW89_WW][1][16] = 10, - [1][1][2][1][RTW89_WW][2][16] = 58, [1][1][2][1][RTW89_WW][0][20] = 6, [1][1][2][1][RTW89_WW][1][20] = 10, - [1][1][2][1][RTW89_WW][2][20] = 58, [1][1][2][1][RTW89_WW][0][24] = 6, [1][1][2][1][RTW89_WW][1][24] = 10, - [1][1][2][1][RTW89_WW][2][24] = 70, [1][1][2][1][RTW89_WW][0][28] = 6, [1][1][2][1][RTW89_WW][1][28] = 10, - [1][1][2][1][RTW89_WW][2][28] = 70, [1][1][2][1][RTW89_WW][0][31] = 6, [1][1][2][1][RTW89_WW][1][31] = 10, - [1][1][2][1][RTW89_WW][2][31] = 70, [1][1][2][1][RTW89_WW][0][35] = 6, [1][1][2][1][RTW89_WW][1][35] = 10, - [1][1][2][1][RTW89_WW][2][35] = 70, [1][1][2][1][RTW89_WW][0][39] = 6, [1][1][2][1][RTW89_WW][1][39] = 10, - [1][1][2][1][RTW89_WW][2][39] = 70, [1][1][2][1][RTW89_WW][0][43] = 6, [1][1][2][1][RTW89_WW][1][43] = 10, - [1][1][2][1][RTW89_WW][2][43] = 70, [1][1][2][1][RTW89_WW][0][46] = 12, [1][1][2][1][RTW89_WW][1][46] = 12, - [1][1][2][1][RTW89_WW][2][46] = 68, [1][1][2][1][RTW89_WW][0][50] = 12, [1][1][2][1][RTW89_WW][1][50] = 12, - [1][1][2][1][RTW89_WW][2][50] = 68, [1][1][2][1][RTW89_WW][0][54] = 10, [1][1][2][1][RTW89_WW][1][54] = 10, - [1][1][2][1][RTW89_WW][2][54] = 0, [1][1][2][1][RTW89_WW][0][58] = 10, [1][1][2][1][RTW89_WW][1][58] = 10, - [1][1][2][1][RTW89_WW][2][58] = 66, [1][1][2][1][RTW89_WW][0][61] = 10, [1][1][2][1][RTW89_WW][1][61] = 10, - [1][1][2][1][RTW89_WW][2][61] = 66, [1][1][2][1][RTW89_WW][0][65] = 10, [1][1][2][1][RTW89_WW][1][65] = 10, - [1][1][2][1][RTW89_WW][2][65] = 66, [1][1][2][1][RTW89_WW][0][69] = 10, [1][1][2][1][RTW89_WW][1][69] = 10, - [1][1][2][1][RTW89_WW][2][69] = 66, [1][1][2][1][RTW89_WW][0][73] = 10, [1][1][2][1][RTW89_WW][1][73] = 10, - [1][1][2][1][RTW89_WW][2][73] = 66, [1][1][2][1][RTW89_WW][0][76] = 10, [1][1][2][1][RTW89_WW][1][76] = 10, - [1][1][2][1][RTW89_WW][2][76] = 66, [1][1][2][1][RTW89_WW][0][80] = 10, [1][1][2][1][RTW89_WW][1][80] = 10, - [1][1][2][1][RTW89_WW][2][80] = 66, [1][1][2][1][RTW89_WW][0][84] = 10, [1][1][2][1][RTW89_WW][1][84] = 10, - [1][1][2][1][RTW89_WW][2][84] = 66, [1][1][2][1][RTW89_WW][0][88] = 10, [1][1][2][1][RTW89_WW][1][88] = 10, - [1][1][2][1][RTW89_WW][2][88] = 0, [1][1][2][1][RTW89_WW][0][91] = 12, [1][1][2][1][RTW89_WW][1][91] = 12, - [1][1][2][1][RTW89_WW][2][91] = 0, [1][1][2][1][RTW89_WW][0][95] = 10, [1][1][2][1][RTW89_WW][1][95] = 10, - [1][1][2][1][RTW89_WW][2][95] = 0, [1][1][2][1][RTW89_WW][0][99] = 10, [1][1][2][1][RTW89_WW][1][99] = 10, - [1][1][2][1][RTW89_WW][2][99] = 0, [1][1][2][1][RTW89_WW][0][103] = 10, [1][1][2][1][RTW89_WW][1][103] = 10, - [1][1][2][1][RTW89_WW][2][103] = 0, [1][1][2][1][RTW89_WW][0][106] = 12, [1][1][2][1][RTW89_WW][1][106] = 12, - [1][1][2][1][RTW89_WW][2][106] = 0, [1][1][2][1][RTW89_WW][0][110] = 0, [1][1][2][1][RTW89_WW][1][110] = 0, - [1][1][2][1][RTW89_WW][2][110] = 0, [1][1][2][1][RTW89_WW][0][114] = 0, [1][1][2][1][RTW89_WW][1][114] = 0, - [1][1][2][1][RTW89_WW][2][114] = 0, [1][1][2][1][RTW89_WW][0][118] = 0, [1][1][2][1][RTW89_WW][1][118] = 0, - [1][1][2][1][RTW89_WW][2][118] = 0, [2][0][2][0][RTW89_WW][0][3] = 24, [2][0][2][0][RTW89_WW][1][3] = 46, - [2][0][2][0][RTW89_WW][2][3] = 60, [2][0][2][0][RTW89_WW][0][11] = 24, [2][0][2][0][RTW89_WW][1][11] = 46, - [2][0][2][0][RTW89_WW][2][11] = 60, [2][0][2][0][RTW89_WW][0][18] = 24, [2][0][2][0][RTW89_WW][1][18] = 46, - [2][0][2][0][RTW89_WW][2][18] = 60, [2][0][2][0][RTW89_WW][0][26] = 24, [2][0][2][0][RTW89_WW][1][26] = 46, - [2][0][2][0][RTW89_WW][2][26] = 60, [2][0][2][0][RTW89_WW][0][33] = 24, [2][0][2][0][RTW89_WW][1][33] = 46, - [2][0][2][0][RTW89_WW][2][33] = 60, [2][0][2][0][RTW89_WW][0][41] = 24, [2][0][2][0][RTW89_WW][1][41] = 46, - [2][0][2][0][RTW89_WW][2][41] = 60, [2][0][2][0][RTW89_WW][0][48] = 46, [2][0][2][0][RTW89_WW][1][48] = 46, - [2][0][2][0][RTW89_WW][2][48] = 60, [2][0][2][0][RTW89_WW][0][56] = 46, [2][0][2][0][RTW89_WW][1][56] = 46, - [2][0][2][0][RTW89_WW][2][56] = 58, [2][0][2][0][RTW89_WW][0][63] = 46, [2][0][2][0][RTW89_WW][1][63] = 46, - [2][0][2][0][RTW89_WW][2][63] = 58, [2][0][2][0][RTW89_WW][0][71] = 46, [2][0][2][0][RTW89_WW][1][71] = 46, - [2][0][2][0][RTW89_WW][2][71] = 58, [2][0][2][0][RTW89_WW][0][78] = 46, [2][0][2][0][RTW89_WW][1][78] = 46, - [2][0][2][0][RTW89_WW][2][78] = 58, [2][0][2][0][RTW89_WW][0][86] = 46, [2][0][2][0][RTW89_WW][1][86] = 46, - [2][0][2][0][RTW89_WW][2][86] = 0, [2][0][2][0][RTW89_WW][0][93] = 46, [2][0][2][0][RTW89_WW][1][93] = 46, - [2][0][2][0][RTW89_WW][2][93] = 0, [2][0][2][0][RTW89_WW][0][101] = 44, [2][0][2][0][RTW89_WW][1][101] = 44, - [2][0][2][0][RTW89_WW][2][101] = 0, [2][0][2][0][RTW89_WW][0][108] = 0, [2][0][2][0][RTW89_WW][1][108] = 0, - [2][0][2][0][RTW89_WW][2][108] = 0, [2][0][2][0][RTW89_WW][0][116] = 0, [2][0][2][0][RTW89_WW][1][116] = 0, - [2][0][2][0][RTW89_WW][2][116] = 0, [2][1][2][0][RTW89_WW][0][3] = 12, [2][1][2][0][RTW89_WW][1][3] = 22, - [2][1][2][0][RTW89_WW][2][3] = 50, [2][1][2][0][RTW89_WW][0][11] = 12, [2][1][2][0][RTW89_WW][1][11] = 20, - [2][1][2][0][RTW89_WW][2][11] = 50, [2][1][2][0][RTW89_WW][0][18] = 12, [2][1][2][0][RTW89_WW][1][18] = 20, - [2][1][2][0][RTW89_WW][2][18] = 50, [2][1][2][0][RTW89_WW][0][26] = 12, [2][1][2][0][RTW89_WW][1][26] = 20, - [2][1][2][0][RTW89_WW][2][26] = 60, [2][1][2][0][RTW89_WW][0][33] = 12, [2][1][2][0][RTW89_WW][1][33] = 20, - [2][1][2][0][RTW89_WW][2][33] = 60, [2][1][2][0][RTW89_WW][0][41] = 12, [2][1][2][0][RTW89_WW][1][41] = 22, - [2][1][2][0][RTW89_WW][2][41] = 60, [2][1][2][0][RTW89_WW][0][48] = 22, [2][1][2][0][RTW89_WW][1][48] = 22, - [2][1][2][0][RTW89_WW][2][48] = 60, [2][1][2][0][RTW89_WW][0][56] = 20, [2][1][2][0][RTW89_WW][1][56] = 20, - [2][1][2][0][RTW89_WW][2][56] = 56, [2][1][2][0][RTW89_WW][0][63] = 22, [2][1][2][0][RTW89_WW][1][63] = 22, - [2][1][2][0][RTW89_WW][2][63] = 58, [2][1][2][0][RTW89_WW][0][71] = 20, [2][1][2][0][RTW89_WW][1][71] = 20, - [2][1][2][0][RTW89_WW][2][71] = 58, [2][1][2][0][RTW89_WW][0][78] = 20, [2][1][2][0][RTW89_WW][1][78] = 20, - [2][1][2][0][RTW89_WW][2][78] = 58, [2][1][2][0][RTW89_WW][0][86] = 20, [2][1][2][0][RTW89_WW][1][86] = 20, - [2][1][2][0][RTW89_WW][2][86] = 0, [2][1][2][0][RTW89_WW][0][93] = 22, [2][1][2][0][RTW89_WW][1][93] = 22, - [2][1][2][0][RTW89_WW][2][93] = 0, [2][1][2][0][RTW89_WW][0][101] = 22, [2][1][2][0][RTW89_WW][1][101] = 22, - [2][1][2][0][RTW89_WW][2][101] = 0, [2][1][2][0][RTW89_WW][0][108] = 0, [2][1][2][0][RTW89_WW][1][108] = 0, - [2][1][2][0][RTW89_WW][2][108] = 0, [2][1][2][0][RTW89_WW][0][116] = 0, [2][1][2][0][RTW89_WW][1][116] = 0, - [2][1][2][0][RTW89_WW][2][116] = 0, [2][1][2][1][RTW89_WW][0][3] = 6, [2][1][2][1][RTW89_WW][1][3] = 22, - [2][1][2][1][RTW89_WW][2][3] = 50, [2][1][2][1][RTW89_WW][0][11] = 6, [2][1][2][1][RTW89_WW][1][11] = 20, - [2][1][2][1][RTW89_WW][2][11] = 50, [2][1][2][1][RTW89_WW][0][18] = 6, [2][1][2][1][RTW89_WW][1][18] = 20, - [2][1][2][1][RTW89_WW][2][18] = 50, [2][1][2][1][RTW89_WW][0][26] = 6, [2][1][2][1][RTW89_WW][1][26] = 20, - [2][1][2][1][RTW89_WW][2][26] = 60, [2][1][2][1][RTW89_WW][0][33] = 6, [2][1][2][1][RTW89_WW][1][33] = 20, - [2][1][2][1][RTW89_WW][2][33] = 60, [2][1][2][1][RTW89_WW][0][41] = 6, [2][1][2][1][RTW89_WW][1][41] = 22, - [2][1][2][1][RTW89_WW][2][41] = 60, [2][1][2][1][RTW89_WW][0][48] = 22, [2][1][2][1][RTW89_WW][1][48] = 22, - [2][1][2][1][RTW89_WW][2][48] = 60, [2][1][2][1][RTW89_WW][0][56] = 20, [2][1][2][1][RTW89_WW][1][56] = 20, - [2][1][2][1][RTW89_WW][2][56] = 56, [2][1][2][1][RTW89_WW][0][63] = 22, [2][1][2][1][RTW89_WW][1][63] = 22, - [2][1][2][1][RTW89_WW][2][63] = 58, [2][1][2][1][RTW89_WW][0][71] = 20, [2][1][2][1][RTW89_WW][1][71] = 20, - [2][1][2][1][RTW89_WW][2][71] = 58, [2][1][2][1][RTW89_WW][0][78] = 20, [2][1][2][1][RTW89_WW][1][78] = 20, - [2][1][2][1][RTW89_WW][2][78] = 58, [2][1][2][1][RTW89_WW][0][86] = 20, [2][1][2][1][RTW89_WW][1][86] = 20, - [2][1][2][1][RTW89_WW][2][86] = 0, [2][1][2][1][RTW89_WW][0][93] = 22, [2][1][2][1][RTW89_WW][1][93] = 22, - [2][1][2][1][RTW89_WW][2][93] = 0, [2][1][2][1][RTW89_WW][0][101] = 22, [2][1][2][1][RTW89_WW][1][101] = 22, - [2][1][2][1][RTW89_WW][2][101] = 0, [2][1][2][1][RTW89_WW][0][108] = 0, [2][1][2][1][RTW89_WW][1][108] = 0, - [2][1][2][1][RTW89_WW][2][108] = 0, [2][1][2][1][RTW89_WW][0][116] = 0, [2][1][2][1][RTW89_WW][1][116] = 0, - [2][1][2][1][RTW89_WW][2][116] = 0, [3][0][2][0][RTW89_WW][0][7] = 22, [3][0][2][0][RTW89_WW][1][7] = 42, - [3][0][2][0][RTW89_WW][2][7] = 52, [3][0][2][0][RTW89_WW][0][22] = 20, [3][0][2][0][RTW89_WW][1][22] = 42, - [3][0][2][0][RTW89_WW][2][22] = 52, [3][0][2][0][RTW89_WW][0][37] = 20, [3][0][2][0][RTW89_WW][1][37] = 42, - [3][0][2][0][RTW89_WW][2][37] = 52, [3][0][2][0][RTW89_WW][0][52] = 54, [3][0][2][0][RTW89_WW][1][52] = 54, - [3][0][2][0][RTW89_WW][2][52] = 56, [3][0][2][0][RTW89_WW][0][67] = 54, [3][0][2][0][RTW89_WW][1][67] = 54, - [3][0][2][0][RTW89_WW][2][67] = 54, [3][0][2][0][RTW89_WW][0][82] = 26, [3][0][2][0][RTW89_WW][1][82] = 26, - [3][0][2][0][RTW89_WW][2][82] = 0, [3][0][2][0][RTW89_WW][0][97] = 26, [3][0][2][0][RTW89_WW][1][97] = 26, - [3][0][2][0][RTW89_WW][2][97] = 0, [3][0][2][0][RTW89_WW][0][112] = 0, [3][0][2][0][RTW89_WW][1][112] = 0, - [3][0][2][0][RTW89_WW][2][112] = 0, [3][1][2][0][RTW89_WW][0][7] = 10, [3][1][2][0][RTW89_WW][1][7] = 32, - [3][1][2][0][RTW89_WW][2][7] = 46, [3][1][2][0][RTW89_WW][0][22] = 8, [3][1][2][0][RTW89_WW][1][22] = 30, - [3][1][2][0][RTW89_WW][2][22] = 52, [3][1][2][0][RTW89_WW][0][37] = 8, [3][1][2][0][RTW89_WW][1][37] = 30, - [3][1][2][0][RTW89_WW][2][37] = 52, [3][1][2][0][RTW89_WW][0][52] = 30, [3][1][2][0][RTW89_WW][1][52] = 30, - [3][1][2][0][RTW89_WW][2][52] = 56, [3][1][2][0][RTW89_WW][0][67] = 32, [3][1][2][0][RTW89_WW][1][67] = 32, - [3][1][2][0][RTW89_WW][2][67] = 54, [3][1][2][0][RTW89_WW][0][82] = 24, [3][1][2][0][RTW89_WW][1][82] = 24, - [3][1][2][0][RTW89_WW][2][82] = 0, [3][1][2][0][RTW89_WW][0][97] = 24, [3][1][2][0][RTW89_WW][1][97] = 24, - [3][1][2][0][RTW89_WW][2][97] = 0, [3][1][2][0][RTW89_WW][0][112] = 0, [3][1][2][0][RTW89_WW][1][112] = 0, - [3][1][2][0][RTW89_WW][2][112] = 0, [3][1][2][1][RTW89_WW][0][7] = 6, [3][1][2][1][RTW89_WW][1][7] = 32, - [3][1][2][1][RTW89_WW][2][7] = 46, [3][1][2][1][RTW89_WW][0][22] = 6, [3][1][2][1][RTW89_WW][1][22] = 30, - [3][1][2][1][RTW89_WW][2][22] = 52, [3][1][2][1][RTW89_WW][0][37] = 6, [3][1][2][1][RTW89_WW][1][37] = 30, - [3][1][2][1][RTW89_WW][2][37] = 52, [3][1][2][1][RTW89_WW][0][52] = 30, [3][1][2][1][RTW89_WW][1][52] = 30, - [3][1][2][1][RTW89_WW][2][52] = 56, [3][1][2][1][RTW89_WW][0][67] = 32, [3][1][2][1][RTW89_WW][1][67] = 32, - [3][1][2][1][RTW89_WW][2][67] = 54, [3][1][2][1][RTW89_WW][0][82] = 24, [3][1][2][1][RTW89_WW][1][82] = 24, - [3][1][2][1][RTW89_WW][2][82] = 0, [3][1][2][1][RTW89_WW][0][97] = 24, [3][1][2][1][RTW89_WW][1][97] = 24, - [3][1][2][1][RTW89_WW][2][97] = 0, [3][1][2][1][RTW89_WW][0][112] = 0, [3][1][2][1][RTW89_WW][1][112] = 0, - [3][1][2][1][RTW89_WW][2][112] = 0, [0][0][1][0][RTW89_FCC][1][0] = 24, - [0][0][1][0][RTW89_FCC][2][0] = 56, [0][0][1][0][RTW89_ETSI][1][0] = 66, [0][0][1][0][RTW89_ETSI][0][0] = 28, [0][0][1][0][RTW89_MKK][1][0] = 66, [0][0][1][0][RTW89_MKK][0][0] = 26, [0][0][1][0][RTW89_IC][1][0] = 24, - [0][0][1][0][RTW89_IC][2][0] = 56, [0][0][1][0][RTW89_KCC][1][0] = 24, [0][0][1][0][RTW89_KCC][0][0] = 24, [0][0][1][0][RTW89_ACMA][1][0] = 66, @@ -38440,13 +37950,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][0] = 56, [0][0][1][0][RTW89_THAILAND][0][0] = 24, [0][0][1][0][RTW89_FCC][1][2] = 22, - [0][0][1][0][RTW89_FCC][2][2] = 56, [0][0][1][0][RTW89_ETSI][1][2] = 66, [0][0][1][0][RTW89_ETSI][0][2] = 28, [0][0][1][0][RTW89_MKK][1][2] = 66, [0][0][1][0][RTW89_MKK][0][2] = 26, [0][0][1][0][RTW89_IC][1][2] = 22, - [0][0][1][0][RTW89_IC][2][2] = 56, [0][0][1][0][RTW89_KCC][1][2] = 24, [0][0][1][0][RTW89_KCC][0][2] = 24, [0][0][1][0][RTW89_ACMA][1][2] = 66, @@ -38459,13 +37967,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][2] = 56, [0][0][1][0][RTW89_THAILAND][0][2] = 22, [0][0][1][0][RTW89_FCC][1][4] = 22, - [0][0][1][0][RTW89_FCC][2][4] = 56, [0][0][1][0][RTW89_ETSI][1][4] = 66, [0][0][1][0][RTW89_ETSI][0][4] = 28, [0][0][1][0][RTW89_MKK][1][4] = 66, [0][0][1][0][RTW89_MKK][0][4] = 26, [0][0][1][0][RTW89_IC][1][4] = 22, - [0][0][1][0][RTW89_IC][2][4] = 56, [0][0][1][0][RTW89_KCC][1][4] = 24, [0][0][1][0][RTW89_KCC][0][4] = 24, [0][0][1][0][RTW89_ACMA][1][4] = 66, @@ -38478,13 +37984,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][4] = 56, [0][0][1][0][RTW89_THAILAND][0][4] = 22, [0][0][1][0][RTW89_FCC][1][6] = 22, - [0][0][1][0][RTW89_FCC][2][6] = 56, [0][0][1][0][RTW89_ETSI][1][6] = 66, [0][0][1][0][RTW89_ETSI][0][6] = 28, [0][0][1][0][RTW89_MKK][1][6] = 66, [0][0][1][0][RTW89_MKK][0][6] = 26, [0][0][1][0][RTW89_IC][1][6] = 22, - [0][0][1][0][RTW89_IC][2][6] = 56, [0][0][1][0][RTW89_KCC][1][6] = 24, [0][0][1][0][RTW89_KCC][0][6] = 24, [0][0][1][0][RTW89_ACMA][1][6] = 66, @@ -38497,13 +38001,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][6] = 56, [0][0][1][0][RTW89_THAILAND][0][6] = 22, [0][0][1][0][RTW89_FCC][1][8] = 22, - [0][0][1][0][RTW89_FCC][2][8] = 56, [0][0][1][0][RTW89_ETSI][1][8] = 66, [0][0][1][0][RTW89_ETSI][0][8] = 28, [0][0][1][0][RTW89_MKK][1][8] = 66, [0][0][1][0][RTW89_MKK][0][8] = 26, [0][0][1][0][RTW89_IC][1][8] = 22, - [0][0][1][0][RTW89_IC][2][8] = 56, [0][0][1][0][RTW89_KCC][1][8] = 24, [0][0][1][0][RTW89_KCC][0][8] = 24, [0][0][1][0][RTW89_ACMA][1][8] = 66, @@ -38516,13 +38018,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][8] = 56, [0][0][1][0][RTW89_THAILAND][0][8] = 22, [0][0][1][0][RTW89_FCC][1][10] = 22, - [0][0][1][0][RTW89_FCC][2][10] = 56, [0][0][1][0][RTW89_ETSI][1][10] = 66, [0][0][1][0][RTW89_ETSI][0][10] = 28, [0][0][1][0][RTW89_MKK][1][10] = 66, [0][0][1][0][RTW89_MKK][0][10] = 26, [0][0][1][0][RTW89_IC][1][10] = 22, - [0][0][1][0][RTW89_IC][2][10] = 56, [0][0][1][0][RTW89_KCC][1][10] = 24, [0][0][1][0][RTW89_KCC][0][10] = 24, [0][0][1][0][RTW89_ACMA][1][10] = 66, @@ -38535,13 +38035,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][10] = 56, [0][0][1][0][RTW89_THAILAND][0][10] = 22, [0][0][1][0][RTW89_FCC][1][12] = 22, - [0][0][1][0][RTW89_FCC][2][12] = 56, [0][0][1][0][RTW89_ETSI][1][12] = 66, [0][0][1][0][RTW89_ETSI][0][12] = 28, [0][0][1][0][RTW89_MKK][1][12] = 66, [0][0][1][0][RTW89_MKK][0][12] = 26, [0][0][1][0][RTW89_IC][1][12] = 22, - [0][0][1][0][RTW89_IC][2][12] = 56, [0][0][1][0][RTW89_KCC][1][12] = 24, [0][0][1][0][RTW89_KCC][0][12] = 24, [0][0][1][0][RTW89_ACMA][1][12] = 66, @@ -38554,13 +38052,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][12] = 56, [0][0][1][0][RTW89_THAILAND][0][12] = 22, [0][0][1][0][RTW89_FCC][1][14] = 22, - [0][0][1][0][RTW89_FCC][2][14] = 56, [0][0][1][0][RTW89_ETSI][1][14] = 66, [0][0][1][0][RTW89_ETSI][0][14] = 28, [0][0][1][0][RTW89_MKK][1][14] = 66, [0][0][1][0][RTW89_MKK][0][14] = 26, [0][0][1][0][RTW89_IC][1][14] = 22, - [0][0][1][0][RTW89_IC][2][14] = 56, [0][0][1][0][RTW89_KCC][1][14] = 24, [0][0][1][0][RTW89_KCC][0][14] = 24, [0][0][1][0][RTW89_ACMA][1][14] = 66, @@ -38573,13 +38069,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][14] = 56, [0][0][1][0][RTW89_THAILAND][0][14] = 22, [0][0][1][0][RTW89_FCC][1][15] = 22, - [0][0][1][0][RTW89_FCC][2][15] = 56, [0][0][1][0][RTW89_ETSI][1][15] = 66, [0][0][1][0][RTW89_ETSI][0][15] = 28, [0][0][1][0][RTW89_MKK][1][15] = 66, [0][0][1][0][RTW89_MKK][0][15] = 26, [0][0][1][0][RTW89_IC][1][15] = 22, - [0][0][1][0][RTW89_IC][2][15] = 56, [0][0][1][0][RTW89_KCC][1][15] = 24, [0][0][1][0][RTW89_KCC][0][15] = 24, [0][0][1][0][RTW89_ACMA][1][15] = 66, @@ -38592,13 +38086,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][15] = 56, [0][0][1][0][RTW89_THAILAND][0][15] = 22, [0][0][1][0][RTW89_FCC][1][17] = 22, - [0][0][1][0][RTW89_FCC][2][17] = 56, [0][0][1][0][RTW89_ETSI][1][17] = 66, [0][0][1][0][RTW89_ETSI][0][17] = 28, [0][0][1][0][RTW89_MKK][1][17] = 66, [0][0][1][0][RTW89_MKK][0][17] = 26, [0][0][1][0][RTW89_IC][1][17] = 22, - [0][0][1][0][RTW89_IC][2][17] = 56, [0][0][1][0][RTW89_KCC][1][17] = 24, [0][0][1][0][RTW89_KCC][0][17] = 24, [0][0][1][0][RTW89_ACMA][1][17] = 66, @@ -38611,13 +38103,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][17] = 56, [0][0][1][0][RTW89_THAILAND][0][17] = 22, [0][0][1][0][RTW89_FCC][1][19] = 22, - [0][0][1][0][RTW89_FCC][2][19] = 56, [0][0][1][0][RTW89_ETSI][1][19] = 66, [0][0][1][0][RTW89_ETSI][0][19] = 28, [0][0][1][0][RTW89_MKK][1][19] = 66, [0][0][1][0][RTW89_MKK][0][19] = 26, [0][0][1][0][RTW89_IC][1][19] = 22, - [0][0][1][0][RTW89_IC][2][19] = 56, [0][0][1][0][RTW89_KCC][1][19] = 24, [0][0][1][0][RTW89_KCC][0][19] = 24, [0][0][1][0][RTW89_ACMA][1][19] = 66, @@ -38630,13 +38120,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][19] = 56, [0][0][1][0][RTW89_THAILAND][0][19] = 22, [0][0][1][0][RTW89_FCC][1][21] = 22, - [0][0][1][0][RTW89_FCC][2][21] = 56, [0][0][1][0][RTW89_ETSI][1][21] = 66, [0][0][1][0][RTW89_ETSI][0][21] = 28, [0][0][1][0][RTW89_MKK][1][21] = 66, [0][0][1][0][RTW89_MKK][0][21] = 26, [0][0][1][0][RTW89_IC][1][21] = 22, - [0][0][1][0][RTW89_IC][2][21] = 56, [0][0][1][0][RTW89_KCC][1][21] = 24, [0][0][1][0][RTW89_KCC][0][21] = 24, [0][0][1][0][RTW89_ACMA][1][21] = 66, @@ -38649,13 +38137,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][21] = 56, [0][0][1][0][RTW89_THAILAND][0][21] = 22, [0][0][1][0][RTW89_FCC][1][23] = 22, - [0][0][1][0][RTW89_FCC][2][23] = 70, [0][0][1][0][RTW89_ETSI][1][23] = 66, [0][0][1][0][RTW89_ETSI][0][23] = 28, [0][0][1][0][RTW89_MKK][1][23] = 66, [0][0][1][0][RTW89_MKK][0][23] = 26, [0][0][1][0][RTW89_IC][1][23] = 22, - [0][0][1][0][RTW89_IC][2][23] = 70, [0][0][1][0][RTW89_KCC][1][23] = 24, [0][0][1][0][RTW89_KCC][0][23] = 26, [0][0][1][0][RTW89_ACMA][1][23] = 66, @@ -38668,13 +38154,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][23] = 66, [0][0][1][0][RTW89_THAILAND][0][23] = 22, [0][0][1][0][RTW89_FCC][1][25] = 22, - [0][0][1][0][RTW89_FCC][2][25] = 70, [0][0][1][0][RTW89_ETSI][1][25] = 66, [0][0][1][0][RTW89_ETSI][0][25] = 28, [0][0][1][0][RTW89_MKK][1][25] = 66, [0][0][1][0][RTW89_MKK][0][25] = 26, [0][0][1][0][RTW89_IC][1][25] = 22, - [0][0][1][0][RTW89_IC][2][25] = 70, [0][0][1][0][RTW89_KCC][1][25] = 24, [0][0][1][0][RTW89_KCC][0][25] = 26, [0][0][1][0][RTW89_ACMA][1][25] = 66, @@ -38687,13 +38171,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][25] = 66, [0][0][1][0][RTW89_THAILAND][0][25] = 22, [0][0][1][0][RTW89_FCC][1][27] = 22, - [0][0][1][0][RTW89_FCC][2][27] = 70, [0][0][1][0][RTW89_ETSI][1][27] = 66, [0][0][1][0][RTW89_ETSI][0][27] = 28, [0][0][1][0][RTW89_MKK][1][27] = 66, [0][0][1][0][RTW89_MKK][0][27] = 26, [0][0][1][0][RTW89_IC][1][27] = 22, - [0][0][1][0][RTW89_IC][2][27] = 70, [0][0][1][0][RTW89_KCC][1][27] = 24, [0][0][1][0][RTW89_KCC][0][27] = 26, [0][0][1][0][RTW89_ACMA][1][27] = 66, @@ -38706,13 +38188,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][27] = 66, [0][0][1][0][RTW89_THAILAND][0][27] = 22, [0][0][1][0][RTW89_FCC][1][29] = 22, - [0][0][1][0][RTW89_FCC][2][29] = 70, [0][0][1][0][RTW89_ETSI][1][29] = 66, [0][0][1][0][RTW89_ETSI][0][29] = 28, [0][0][1][0][RTW89_MKK][1][29] = 66, [0][0][1][0][RTW89_MKK][0][29] = 26, [0][0][1][0][RTW89_IC][1][29] = 22, - [0][0][1][0][RTW89_IC][2][29] = 70, [0][0][1][0][RTW89_KCC][1][29] = 24, [0][0][1][0][RTW89_KCC][0][29] = 26, [0][0][1][0][RTW89_ACMA][1][29] = 66, @@ -38725,13 +38205,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][29] = 66, [0][0][1][0][RTW89_THAILAND][0][29] = 22, [0][0][1][0][RTW89_FCC][1][30] = 22, - [0][0][1][0][RTW89_FCC][2][30] = 70, [0][0][1][0][RTW89_ETSI][1][30] = 66, [0][0][1][0][RTW89_ETSI][0][30] = 28, [0][0][1][0][RTW89_MKK][1][30] = 66, [0][0][1][0][RTW89_MKK][0][30] = 26, [0][0][1][0][RTW89_IC][1][30] = 22, - [0][0][1][0][RTW89_IC][2][30] = 70, [0][0][1][0][RTW89_KCC][1][30] = 24, [0][0][1][0][RTW89_KCC][0][30] = 26, [0][0][1][0][RTW89_ACMA][1][30] = 66, @@ -38744,13 +38222,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][30] = 66, [0][0][1][0][RTW89_THAILAND][0][30] = 22, [0][0][1][0][RTW89_FCC][1][32] = 22, - [0][0][1][0][RTW89_FCC][2][32] = 70, [0][0][1][0][RTW89_ETSI][1][32] = 66, [0][0][1][0][RTW89_ETSI][0][32] = 28, [0][0][1][0][RTW89_MKK][1][32] = 66, [0][0][1][0][RTW89_MKK][0][32] = 26, [0][0][1][0][RTW89_IC][1][32] = 22, - [0][0][1][0][RTW89_IC][2][32] = 70, [0][0][1][0][RTW89_KCC][1][32] = 24, [0][0][1][0][RTW89_KCC][0][32] = 26, [0][0][1][0][RTW89_ACMA][1][32] = 66, @@ -38763,13 +38239,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][32] = 66, [0][0][1][0][RTW89_THAILAND][0][32] = 22, [0][0][1][0][RTW89_FCC][1][34] = 22, - [0][0][1][0][RTW89_FCC][2][34] = 70, [0][0][1][0][RTW89_ETSI][1][34] = 66, [0][0][1][0][RTW89_ETSI][0][34] = 28, [0][0][1][0][RTW89_MKK][1][34] = 66, [0][0][1][0][RTW89_MKK][0][34] = 26, [0][0][1][0][RTW89_IC][1][34] = 22, - [0][0][1][0][RTW89_IC][2][34] = 70, [0][0][1][0][RTW89_KCC][1][34] = 24, [0][0][1][0][RTW89_KCC][0][34] = 26, [0][0][1][0][RTW89_ACMA][1][34] = 66, @@ -38782,13 +38256,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][34] = 66, [0][0][1][0][RTW89_THAILAND][0][34] = 22, [0][0][1][0][RTW89_FCC][1][36] = 22, - [0][0][1][0][RTW89_FCC][2][36] = 70, [0][0][1][0][RTW89_ETSI][1][36] = 66, [0][0][1][0][RTW89_ETSI][0][36] = 28, [0][0][1][0][RTW89_MKK][1][36] = 66, [0][0][1][0][RTW89_MKK][0][36] = 26, [0][0][1][0][RTW89_IC][1][36] = 22, - [0][0][1][0][RTW89_IC][2][36] = 70, [0][0][1][0][RTW89_KCC][1][36] = 24, [0][0][1][0][RTW89_KCC][0][36] = 26, [0][0][1][0][RTW89_ACMA][1][36] = 66, @@ -38801,13 +38273,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][36] = 66, [0][0][1][0][RTW89_THAILAND][0][36] = 22, [0][0][1][0][RTW89_FCC][1][38] = 22, - [0][0][1][0][RTW89_FCC][2][38] = 70, [0][0][1][0][RTW89_ETSI][1][38] = 66, [0][0][1][0][RTW89_ETSI][0][38] = 28, [0][0][1][0][RTW89_MKK][1][38] = 66, [0][0][1][0][RTW89_MKK][0][38] = 26, [0][0][1][0][RTW89_IC][1][38] = 22, - [0][0][1][0][RTW89_IC][2][38] = 70, [0][0][1][0][RTW89_KCC][1][38] = 24, [0][0][1][0][RTW89_KCC][0][38] = 26, [0][0][1][0][RTW89_ACMA][1][38] = 66, @@ -38820,13 +38290,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][38] = 66, [0][0][1][0][RTW89_THAILAND][0][38] = 22, [0][0][1][0][RTW89_FCC][1][40] = 22, - [0][0][1][0][RTW89_FCC][2][40] = 70, [0][0][1][0][RTW89_ETSI][1][40] = 66, [0][0][1][0][RTW89_ETSI][0][40] = 28, [0][0][1][0][RTW89_MKK][1][40] = 66, [0][0][1][0][RTW89_MKK][0][40] = 26, [0][0][1][0][RTW89_IC][1][40] = 22, - [0][0][1][0][RTW89_IC][2][40] = 70, [0][0][1][0][RTW89_KCC][1][40] = 24, [0][0][1][0][RTW89_KCC][0][40] = 26, [0][0][1][0][RTW89_ACMA][1][40] = 66, @@ -38839,13 +38307,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][40] = 66, [0][0][1][0][RTW89_THAILAND][0][40] = 22, [0][0][1][0][RTW89_FCC][1][42] = 22, - [0][0][1][0][RTW89_FCC][2][42] = 70, [0][0][1][0][RTW89_ETSI][1][42] = 66, [0][0][1][0][RTW89_ETSI][0][42] = 28, [0][0][1][0][RTW89_MKK][1][42] = 66, [0][0][1][0][RTW89_MKK][0][42] = 26, [0][0][1][0][RTW89_IC][1][42] = 22, - [0][0][1][0][RTW89_IC][2][42] = 70, [0][0][1][0][RTW89_KCC][1][42] = 24, [0][0][1][0][RTW89_KCC][0][42] = 26, [0][0][1][0][RTW89_ACMA][1][42] = 66, @@ -38858,13 +38324,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][42] = 66, [0][0][1][0][RTW89_THAILAND][0][42] = 22, [0][0][1][0][RTW89_FCC][1][44] = 22, - [0][0][1][0][RTW89_FCC][2][44] = 70, [0][0][1][0][RTW89_ETSI][1][44] = 66, [0][0][1][0][RTW89_ETSI][0][44] = 30, [0][0][1][0][RTW89_MKK][1][44] = 44, [0][0][1][0][RTW89_MKK][0][44] = 28, [0][0][1][0][RTW89_IC][1][44] = 22, - [0][0][1][0][RTW89_IC][2][44] = 70, [0][0][1][0][RTW89_KCC][1][44] = 24, [0][0][1][0][RTW89_KCC][0][44] = 26, [0][0][1][0][RTW89_ACMA][1][44] = 66, @@ -38877,13 +38341,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][44] = 68, [0][0][1][0][RTW89_THAILAND][0][44] = 22, [0][0][1][0][RTW89_FCC][1][45] = 22, - [0][0][1][0][RTW89_FCC][2][45] = 127, [0][0][1][0][RTW89_ETSI][1][45] = 127, [0][0][1][0][RTW89_ETSI][0][45] = 127, [0][0][1][0][RTW89_MKK][1][45] = 127, [0][0][1][0][RTW89_MKK][0][45] = 127, [0][0][1][0][RTW89_IC][1][45] = 22, - [0][0][1][0][RTW89_IC][2][45] = 70, [0][0][1][0][RTW89_KCC][1][45] = 24, [0][0][1][0][RTW89_KCC][0][45] = 127, [0][0][1][0][RTW89_ACMA][1][45] = 127, @@ -38896,13 +38358,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][45] = 127, [0][0][1][0][RTW89_THAILAND][0][45] = 127, [0][0][1][0][RTW89_FCC][1][47] = 22, - [0][0][1][0][RTW89_FCC][2][47] = 127, [0][0][1][0][RTW89_ETSI][1][47] = 127, [0][0][1][0][RTW89_ETSI][0][47] = 127, [0][0][1][0][RTW89_MKK][1][47] = 127, [0][0][1][0][RTW89_MKK][0][47] = 127, [0][0][1][0][RTW89_IC][1][47] = 22, - [0][0][1][0][RTW89_IC][2][47] = 70, [0][0][1][0][RTW89_KCC][1][47] = 24, [0][0][1][0][RTW89_KCC][0][47] = 127, [0][0][1][0][RTW89_ACMA][1][47] = 127, @@ -38915,13 +38375,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][47] = 127, [0][0][1][0][RTW89_THAILAND][0][47] = 127, [0][0][1][0][RTW89_FCC][1][49] = 24, - [0][0][1][0][RTW89_FCC][2][49] = 127, [0][0][1][0][RTW89_ETSI][1][49] = 127, [0][0][1][0][RTW89_ETSI][0][49] = 127, [0][0][1][0][RTW89_MKK][1][49] = 127, [0][0][1][0][RTW89_MKK][0][49] = 127, [0][0][1][0][RTW89_IC][1][49] = 24, - [0][0][1][0][RTW89_IC][2][49] = 70, [0][0][1][0][RTW89_KCC][1][49] = 24, [0][0][1][0][RTW89_KCC][0][49] = 127, [0][0][1][0][RTW89_ACMA][1][49] = 127, @@ -38934,13 +38392,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][49] = 127, [0][0][1][0][RTW89_THAILAND][0][49] = 127, [0][0][1][0][RTW89_FCC][1][51] = 22, - [0][0][1][0][RTW89_FCC][2][51] = 127, [0][0][1][0][RTW89_ETSI][1][51] = 127, [0][0][1][0][RTW89_ETSI][0][51] = 127, [0][0][1][0][RTW89_MKK][1][51] = 127, [0][0][1][0][RTW89_MKK][0][51] = 127, [0][0][1][0][RTW89_IC][1][51] = 22, - [0][0][1][0][RTW89_IC][2][51] = 70, [0][0][1][0][RTW89_KCC][1][51] = 24, [0][0][1][0][RTW89_KCC][0][51] = 127, [0][0][1][0][RTW89_ACMA][1][51] = 127, @@ -38953,13 +38409,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][51] = 127, [0][0][1][0][RTW89_THAILAND][0][51] = 127, [0][0][1][0][RTW89_FCC][1][53] = 22, - [0][0][1][0][RTW89_FCC][2][53] = 127, [0][0][1][0][RTW89_ETSI][1][53] = 127, [0][0][1][0][RTW89_ETSI][0][53] = 127, [0][0][1][0][RTW89_MKK][1][53] = 127, [0][0][1][0][RTW89_MKK][0][53] = 127, [0][0][1][0][RTW89_IC][1][53] = 22, - [0][0][1][0][RTW89_IC][2][53] = 70, [0][0][1][0][RTW89_KCC][1][53] = 24, [0][0][1][0][RTW89_KCC][0][53] = 127, [0][0][1][0][RTW89_ACMA][1][53] = 127, @@ -38972,13 +38426,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][53] = 127, [0][0][1][0][RTW89_THAILAND][0][53] = 127, [0][0][1][0][RTW89_FCC][1][55] = 22, - [0][0][1][0][RTW89_FCC][2][55] = 68, [0][0][1][0][RTW89_ETSI][1][55] = 127, [0][0][1][0][RTW89_ETSI][0][55] = 127, [0][0][1][0][RTW89_MKK][1][55] = 127, [0][0][1][0][RTW89_MKK][0][55] = 127, [0][0][1][0][RTW89_IC][1][55] = 22, - [0][0][1][0][RTW89_IC][2][55] = 68, [0][0][1][0][RTW89_KCC][1][55] = 26, [0][0][1][0][RTW89_KCC][0][55] = 127, [0][0][1][0][RTW89_ACMA][1][55] = 127, @@ -38991,13 +38443,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][55] = 127, [0][0][1][0][RTW89_THAILAND][0][55] = 127, [0][0][1][0][RTW89_FCC][1][57] = 22, - [0][0][1][0][RTW89_FCC][2][57] = 68, [0][0][1][0][RTW89_ETSI][1][57] = 127, [0][0][1][0][RTW89_ETSI][0][57] = 127, [0][0][1][0][RTW89_MKK][1][57] = 127, [0][0][1][0][RTW89_MKK][0][57] = 127, [0][0][1][0][RTW89_IC][1][57] = 22, - [0][0][1][0][RTW89_IC][2][57] = 68, [0][0][1][0][RTW89_KCC][1][57] = 26, [0][0][1][0][RTW89_KCC][0][57] = 127, [0][0][1][0][RTW89_ACMA][1][57] = 127, @@ -39010,13 +38460,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][57] = 127, [0][0][1][0][RTW89_THAILAND][0][57] = 127, [0][0][1][0][RTW89_FCC][1][59] = 22, - [0][0][1][0][RTW89_FCC][2][59] = 68, [0][0][1][0][RTW89_ETSI][1][59] = 127, [0][0][1][0][RTW89_ETSI][0][59] = 127, [0][0][1][0][RTW89_MKK][1][59] = 127, [0][0][1][0][RTW89_MKK][0][59] = 127, [0][0][1][0][RTW89_IC][1][59] = 22, - [0][0][1][0][RTW89_IC][2][59] = 68, [0][0][1][0][RTW89_KCC][1][59] = 26, [0][0][1][0][RTW89_KCC][0][59] = 127, [0][0][1][0][RTW89_ACMA][1][59] = 127, @@ -39029,13 +38477,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][59] = 127, [0][0][1][0][RTW89_THAILAND][0][59] = 127, [0][0][1][0][RTW89_FCC][1][60] = 22, - [0][0][1][0][RTW89_FCC][2][60] = 68, [0][0][1][0][RTW89_ETSI][1][60] = 127, [0][0][1][0][RTW89_ETSI][0][60] = 127, [0][0][1][0][RTW89_MKK][1][60] = 127, [0][0][1][0][RTW89_MKK][0][60] = 127, [0][0][1][0][RTW89_IC][1][60] = 22, - [0][0][1][0][RTW89_IC][2][60] = 68, [0][0][1][0][RTW89_KCC][1][60] = 26, [0][0][1][0][RTW89_KCC][0][60] = 127, [0][0][1][0][RTW89_ACMA][1][60] = 127, @@ -39048,13 +38494,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][60] = 127, [0][0][1][0][RTW89_THAILAND][0][60] = 127, [0][0][1][0][RTW89_FCC][1][62] = 22, - [0][0][1][0][RTW89_FCC][2][62] = 68, [0][0][1][0][RTW89_ETSI][1][62] = 127, [0][0][1][0][RTW89_ETSI][0][62] = 127, [0][0][1][0][RTW89_MKK][1][62] = 127, [0][0][1][0][RTW89_MKK][0][62] = 127, [0][0][1][0][RTW89_IC][1][62] = 22, - [0][0][1][0][RTW89_IC][2][62] = 68, [0][0][1][0][RTW89_KCC][1][62] = 26, [0][0][1][0][RTW89_KCC][0][62] = 127, [0][0][1][0][RTW89_ACMA][1][62] = 127, @@ -39067,13 +38511,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][62] = 127, [0][0][1][0][RTW89_THAILAND][0][62] = 127, [0][0][1][0][RTW89_FCC][1][64] = 22, - [0][0][1][0][RTW89_FCC][2][64] = 68, [0][0][1][0][RTW89_ETSI][1][64] = 127, [0][0][1][0][RTW89_ETSI][0][64] = 127, [0][0][1][0][RTW89_MKK][1][64] = 127, [0][0][1][0][RTW89_MKK][0][64] = 127, [0][0][1][0][RTW89_IC][1][64] = 22, - [0][0][1][0][RTW89_IC][2][64] = 68, [0][0][1][0][RTW89_KCC][1][64] = 26, [0][0][1][0][RTW89_KCC][0][64] = 127, [0][0][1][0][RTW89_ACMA][1][64] = 127, @@ -39086,13 +38528,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][64] = 127, [0][0][1][0][RTW89_THAILAND][0][64] = 127, [0][0][1][0][RTW89_FCC][1][66] = 22, - [0][0][1][0][RTW89_FCC][2][66] = 68, [0][0][1][0][RTW89_ETSI][1][66] = 127, [0][0][1][0][RTW89_ETSI][0][66] = 127, [0][0][1][0][RTW89_MKK][1][66] = 127, [0][0][1][0][RTW89_MKK][0][66] = 127, [0][0][1][0][RTW89_IC][1][66] = 22, - [0][0][1][0][RTW89_IC][2][66] = 68, [0][0][1][0][RTW89_KCC][1][66] = 26, [0][0][1][0][RTW89_KCC][0][66] = 127, [0][0][1][0][RTW89_ACMA][1][66] = 127, @@ -39105,13 +38545,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][66] = 127, [0][0][1][0][RTW89_THAILAND][0][66] = 127, [0][0][1][0][RTW89_FCC][1][68] = 22, - [0][0][1][0][RTW89_FCC][2][68] = 68, [0][0][1][0][RTW89_ETSI][1][68] = 127, [0][0][1][0][RTW89_ETSI][0][68] = 127, [0][0][1][0][RTW89_MKK][1][68] = 127, [0][0][1][0][RTW89_MKK][0][68] = 127, [0][0][1][0][RTW89_IC][1][68] = 22, - [0][0][1][0][RTW89_IC][2][68] = 68, [0][0][1][0][RTW89_KCC][1][68] = 26, [0][0][1][0][RTW89_KCC][0][68] = 127, [0][0][1][0][RTW89_ACMA][1][68] = 127, @@ -39124,13 +38562,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][68] = 127, [0][0][1][0][RTW89_THAILAND][0][68] = 127, [0][0][1][0][RTW89_FCC][1][70] = 24, - [0][0][1][0][RTW89_FCC][2][70] = 68, [0][0][1][0][RTW89_ETSI][1][70] = 127, [0][0][1][0][RTW89_ETSI][0][70] = 127, [0][0][1][0][RTW89_MKK][1][70] = 127, [0][0][1][0][RTW89_MKK][0][70] = 127, [0][0][1][0][RTW89_IC][1][70] = 24, - [0][0][1][0][RTW89_IC][2][70] = 68, [0][0][1][0][RTW89_KCC][1][70] = 26, [0][0][1][0][RTW89_KCC][0][70] = 127, [0][0][1][0][RTW89_ACMA][1][70] = 127, @@ -39143,13 +38579,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][70] = 127, [0][0][1][0][RTW89_THAILAND][0][70] = 127, [0][0][1][0][RTW89_FCC][1][72] = 22, - [0][0][1][0][RTW89_FCC][2][72] = 68, [0][0][1][0][RTW89_ETSI][1][72] = 127, [0][0][1][0][RTW89_ETSI][0][72] = 127, [0][0][1][0][RTW89_MKK][1][72] = 127, [0][0][1][0][RTW89_MKK][0][72] = 127, [0][0][1][0][RTW89_IC][1][72] = 22, - [0][0][1][0][RTW89_IC][2][72] = 68, [0][0][1][0][RTW89_KCC][1][72] = 26, [0][0][1][0][RTW89_KCC][0][72] = 127, [0][0][1][0][RTW89_ACMA][1][72] = 127, @@ -39162,13 +38596,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][72] = 127, [0][0][1][0][RTW89_THAILAND][0][72] = 127, [0][0][1][0][RTW89_FCC][1][74] = 22, - [0][0][1][0][RTW89_FCC][2][74] = 68, [0][0][1][0][RTW89_ETSI][1][74] = 127, [0][0][1][0][RTW89_ETSI][0][74] = 127, [0][0][1][0][RTW89_MKK][1][74] = 127, [0][0][1][0][RTW89_MKK][0][74] = 127, [0][0][1][0][RTW89_IC][1][74] = 22, - [0][0][1][0][RTW89_IC][2][74] = 68, [0][0][1][0][RTW89_KCC][1][74] = 26, [0][0][1][0][RTW89_KCC][0][74] = 127, [0][0][1][0][RTW89_ACMA][1][74] = 127, @@ -39181,13 +38613,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][74] = 127, [0][0][1][0][RTW89_THAILAND][0][74] = 127, [0][0][1][0][RTW89_FCC][1][75] = 22, - [0][0][1][0][RTW89_FCC][2][75] = 68, [0][0][1][0][RTW89_ETSI][1][75] = 127, [0][0][1][0][RTW89_ETSI][0][75] = 127, [0][0][1][0][RTW89_MKK][1][75] = 127, [0][0][1][0][RTW89_MKK][0][75] = 127, [0][0][1][0][RTW89_IC][1][75] = 22, - [0][0][1][0][RTW89_IC][2][75] = 68, [0][0][1][0][RTW89_KCC][1][75] = 26, [0][0][1][0][RTW89_KCC][0][75] = 127, [0][0][1][0][RTW89_ACMA][1][75] = 127, @@ -39200,13 +38630,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][75] = 127, [0][0][1][0][RTW89_THAILAND][0][75] = 127, [0][0][1][0][RTW89_FCC][1][77] = 22, - [0][0][1][0][RTW89_FCC][2][77] = 68, [0][0][1][0][RTW89_ETSI][1][77] = 127, [0][0][1][0][RTW89_ETSI][0][77] = 127, [0][0][1][0][RTW89_MKK][1][77] = 127, [0][0][1][0][RTW89_MKK][0][77] = 127, [0][0][1][0][RTW89_IC][1][77] = 22, - [0][0][1][0][RTW89_IC][2][77] = 68, [0][0][1][0][RTW89_KCC][1][77] = 26, [0][0][1][0][RTW89_KCC][0][77] = 127, [0][0][1][0][RTW89_ACMA][1][77] = 127, @@ -39219,13 +38647,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][77] = 127, [0][0][1][0][RTW89_THAILAND][0][77] = 127, [0][0][1][0][RTW89_FCC][1][79] = 22, - [0][0][1][0][RTW89_FCC][2][79] = 68, [0][0][1][0][RTW89_ETSI][1][79] = 127, [0][0][1][0][RTW89_ETSI][0][79] = 127, [0][0][1][0][RTW89_MKK][1][79] = 127, [0][0][1][0][RTW89_MKK][0][79] = 127, [0][0][1][0][RTW89_IC][1][79] = 22, - [0][0][1][0][RTW89_IC][2][79] = 68, [0][0][1][0][RTW89_KCC][1][79] = 26, [0][0][1][0][RTW89_KCC][0][79] = 127, [0][0][1][0][RTW89_ACMA][1][79] = 127, @@ -39238,13 +38664,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][79] = 127, [0][0][1][0][RTW89_THAILAND][0][79] = 127, [0][0][1][0][RTW89_FCC][1][81] = 22, - [0][0][1][0][RTW89_FCC][2][81] = 68, [0][0][1][0][RTW89_ETSI][1][81] = 127, [0][0][1][0][RTW89_ETSI][0][81] = 127, [0][0][1][0][RTW89_MKK][1][81] = 127, [0][0][1][0][RTW89_MKK][0][81] = 127, [0][0][1][0][RTW89_IC][1][81] = 22, - [0][0][1][0][RTW89_IC][2][81] = 68, [0][0][1][0][RTW89_KCC][1][81] = 26, [0][0][1][0][RTW89_KCC][0][81] = 127, [0][0][1][0][RTW89_ACMA][1][81] = 127, @@ -39257,13 +38681,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][81] = 127, [0][0][1][0][RTW89_THAILAND][0][81] = 127, [0][0][1][0][RTW89_FCC][1][83] = 22, - [0][0][1][0][RTW89_FCC][2][83] = 68, [0][0][1][0][RTW89_ETSI][1][83] = 127, [0][0][1][0][RTW89_ETSI][0][83] = 127, [0][0][1][0][RTW89_MKK][1][83] = 127, [0][0][1][0][RTW89_MKK][0][83] = 127, [0][0][1][0][RTW89_IC][1][83] = 22, - [0][0][1][0][RTW89_IC][2][83] = 68, [0][0][1][0][RTW89_KCC][1][83] = 32, [0][0][1][0][RTW89_KCC][0][83] = 127, [0][0][1][0][RTW89_ACMA][1][83] = 127, @@ -39276,13 +38698,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][83] = 127, [0][0][1][0][RTW89_THAILAND][0][83] = 127, [0][0][1][0][RTW89_FCC][1][85] = 22, - [0][0][1][0][RTW89_FCC][2][85] = 68, [0][0][1][0][RTW89_ETSI][1][85] = 127, [0][0][1][0][RTW89_ETSI][0][85] = 127, [0][0][1][0][RTW89_MKK][1][85] = 127, [0][0][1][0][RTW89_MKK][0][85] = 127, [0][0][1][0][RTW89_IC][1][85] = 22, - [0][0][1][0][RTW89_IC][2][85] = 68, [0][0][1][0][RTW89_KCC][1][85] = 32, [0][0][1][0][RTW89_KCC][0][85] = 127, [0][0][1][0][RTW89_ACMA][1][85] = 127, @@ -39295,13 +38715,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][85] = 127, [0][0][1][0][RTW89_THAILAND][0][85] = 127, [0][0][1][0][RTW89_FCC][1][87] = 22, - [0][0][1][0][RTW89_FCC][2][87] = 127, [0][0][1][0][RTW89_ETSI][1][87] = 127, [0][0][1][0][RTW89_ETSI][0][87] = 127, [0][0][1][0][RTW89_MKK][1][87] = 127, [0][0][1][0][RTW89_MKK][0][87] = 127, [0][0][1][0][RTW89_IC][1][87] = 22, - [0][0][1][0][RTW89_IC][2][87] = 127, [0][0][1][0][RTW89_KCC][1][87] = 32, [0][0][1][0][RTW89_KCC][0][87] = 127, [0][0][1][0][RTW89_ACMA][1][87] = 127, @@ -39314,13 +38732,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][87] = 127, [0][0][1][0][RTW89_THAILAND][0][87] = 127, [0][0][1][0][RTW89_FCC][1][89] = 22, - [0][0][1][0][RTW89_FCC][2][89] = 127, [0][0][1][0][RTW89_ETSI][1][89] = 127, [0][0][1][0][RTW89_ETSI][0][89] = 127, [0][0][1][0][RTW89_MKK][1][89] = 127, [0][0][1][0][RTW89_MKK][0][89] = 127, [0][0][1][0][RTW89_IC][1][89] = 22, - [0][0][1][0][RTW89_IC][2][89] = 127, [0][0][1][0][RTW89_KCC][1][89] = 32, [0][0][1][0][RTW89_KCC][0][89] = 127, [0][0][1][0][RTW89_ACMA][1][89] = 127, @@ -39333,13 +38749,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][89] = 127, [0][0][1][0][RTW89_THAILAND][0][89] = 127, [0][0][1][0][RTW89_FCC][1][90] = 22, - [0][0][1][0][RTW89_FCC][2][90] = 127, [0][0][1][0][RTW89_ETSI][1][90] = 127, [0][0][1][0][RTW89_ETSI][0][90] = 127, [0][0][1][0][RTW89_MKK][1][90] = 127, [0][0][1][0][RTW89_MKK][0][90] = 127, [0][0][1][0][RTW89_IC][1][90] = 22, - [0][0][1][0][RTW89_IC][2][90] = 127, [0][0][1][0][RTW89_KCC][1][90] = 32, [0][0][1][0][RTW89_KCC][0][90] = 127, [0][0][1][0][RTW89_ACMA][1][90] = 127, @@ -39352,13 +38766,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][90] = 127, [0][0][1][0][RTW89_THAILAND][0][90] = 127, [0][0][1][0][RTW89_FCC][1][92] = 22, - [0][0][1][0][RTW89_FCC][2][92] = 127, [0][0][1][0][RTW89_ETSI][1][92] = 127, [0][0][1][0][RTW89_ETSI][0][92] = 127, [0][0][1][0][RTW89_MKK][1][92] = 127, [0][0][1][0][RTW89_MKK][0][92] = 127, [0][0][1][0][RTW89_IC][1][92] = 22, - [0][0][1][0][RTW89_IC][2][92] = 127, [0][0][1][0][RTW89_KCC][1][92] = 32, [0][0][1][0][RTW89_KCC][0][92] = 127, [0][0][1][0][RTW89_ACMA][1][92] = 127, @@ -39371,13 +38783,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][92] = 127, [0][0][1][0][RTW89_THAILAND][0][92] = 127, [0][0][1][0][RTW89_FCC][1][94] = 22, - [0][0][1][0][RTW89_FCC][2][94] = 127, [0][0][1][0][RTW89_ETSI][1][94] = 127, [0][0][1][0][RTW89_ETSI][0][94] = 127, [0][0][1][0][RTW89_MKK][1][94] = 127, [0][0][1][0][RTW89_MKK][0][94] = 127, [0][0][1][0][RTW89_IC][1][94] = 22, - [0][0][1][0][RTW89_IC][2][94] = 127, [0][0][1][0][RTW89_KCC][1][94] = 32, [0][0][1][0][RTW89_KCC][0][94] = 127, [0][0][1][0][RTW89_ACMA][1][94] = 127, @@ -39390,13 +38800,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][94] = 127, [0][0][1][0][RTW89_THAILAND][0][94] = 127, [0][0][1][0][RTW89_FCC][1][96] = 22, - [0][0][1][0][RTW89_FCC][2][96] = 127, [0][0][1][0][RTW89_ETSI][1][96] = 127, [0][0][1][0][RTW89_ETSI][0][96] = 127, [0][0][1][0][RTW89_MKK][1][96] = 127, [0][0][1][0][RTW89_MKK][0][96] = 127, [0][0][1][0][RTW89_IC][1][96] = 22, - [0][0][1][0][RTW89_IC][2][96] = 127, [0][0][1][0][RTW89_KCC][1][96] = 32, [0][0][1][0][RTW89_KCC][0][96] = 127, [0][0][1][0][RTW89_ACMA][1][96] = 127, @@ -39409,13 +38817,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][96] = 127, [0][0][1][0][RTW89_THAILAND][0][96] = 127, [0][0][1][0][RTW89_FCC][1][98] = 22, - [0][0][1][0][RTW89_FCC][2][98] = 127, [0][0][1][0][RTW89_ETSI][1][98] = 127, [0][0][1][0][RTW89_ETSI][0][98] = 127, [0][0][1][0][RTW89_MKK][1][98] = 127, [0][0][1][0][RTW89_MKK][0][98] = 127, [0][0][1][0][RTW89_IC][1][98] = 22, - [0][0][1][0][RTW89_IC][2][98] = 127, [0][0][1][0][RTW89_KCC][1][98] = 32, [0][0][1][0][RTW89_KCC][0][98] = 127, [0][0][1][0][RTW89_ACMA][1][98] = 127, @@ -39428,13 +38834,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][98] = 127, [0][0][1][0][RTW89_THAILAND][0][98] = 127, [0][0][1][0][RTW89_FCC][1][100] = 22, - [0][0][1][0][RTW89_FCC][2][100] = 127, [0][0][1][0][RTW89_ETSI][1][100] = 127, [0][0][1][0][RTW89_ETSI][0][100] = 127, [0][0][1][0][RTW89_MKK][1][100] = 127, [0][0][1][0][RTW89_MKK][0][100] = 127, [0][0][1][0][RTW89_IC][1][100] = 22, - [0][0][1][0][RTW89_IC][2][100] = 127, [0][0][1][0][RTW89_KCC][1][100] = 32, [0][0][1][0][RTW89_KCC][0][100] = 127, [0][0][1][0][RTW89_ACMA][1][100] = 127, @@ -39447,13 +38851,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][100] = 127, [0][0][1][0][RTW89_THAILAND][0][100] = 127, [0][0][1][0][RTW89_FCC][1][102] = 22, - [0][0][1][0][RTW89_FCC][2][102] = 127, [0][0][1][0][RTW89_ETSI][1][102] = 127, [0][0][1][0][RTW89_ETSI][0][102] = 127, [0][0][1][0][RTW89_MKK][1][102] = 127, [0][0][1][0][RTW89_MKK][0][102] = 127, [0][0][1][0][RTW89_IC][1][102] = 22, - [0][0][1][0][RTW89_IC][2][102] = 127, [0][0][1][0][RTW89_KCC][1][102] = 32, [0][0][1][0][RTW89_KCC][0][102] = 127, [0][0][1][0][RTW89_ACMA][1][102] = 127, @@ -39466,13 +38868,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][102] = 127, [0][0][1][0][RTW89_THAILAND][0][102] = 127, [0][0][1][0][RTW89_FCC][1][104] = 22, - [0][0][1][0][RTW89_FCC][2][104] = 127, [0][0][1][0][RTW89_ETSI][1][104] = 127, [0][0][1][0][RTW89_ETSI][0][104] = 127, [0][0][1][0][RTW89_MKK][1][104] = 127, [0][0][1][0][RTW89_MKK][0][104] = 127, [0][0][1][0][RTW89_IC][1][104] = 22, - [0][0][1][0][RTW89_IC][2][104] = 127, [0][0][1][0][RTW89_KCC][1][104] = 32, [0][0][1][0][RTW89_KCC][0][104] = 127, [0][0][1][0][RTW89_ACMA][1][104] = 127, @@ -39485,13 +38885,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][104] = 127, [0][0][1][0][RTW89_THAILAND][0][104] = 127, [0][0][1][0][RTW89_FCC][1][105] = 22, - [0][0][1][0][RTW89_FCC][2][105] = 127, [0][0][1][0][RTW89_ETSI][1][105] = 127, [0][0][1][0][RTW89_ETSI][0][105] = 127, [0][0][1][0][RTW89_MKK][1][105] = 127, [0][0][1][0][RTW89_MKK][0][105] = 127, [0][0][1][0][RTW89_IC][1][105] = 22, - [0][0][1][0][RTW89_IC][2][105] = 127, [0][0][1][0][RTW89_KCC][1][105] = 32, [0][0][1][0][RTW89_KCC][0][105] = 127, [0][0][1][0][RTW89_ACMA][1][105] = 127, @@ -39504,13 +38902,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][105] = 127, [0][0][1][0][RTW89_THAILAND][0][105] = 127, [0][0][1][0][RTW89_FCC][1][107] = 24, - [0][0][1][0][RTW89_FCC][2][107] = 127, [0][0][1][0][RTW89_ETSI][1][107] = 127, [0][0][1][0][RTW89_ETSI][0][107] = 127, [0][0][1][0][RTW89_MKK][1][107] = 127, [0][0][1][0][RTW89_MKK][0][107] = 127, [0][0][1][0][RTW89_IC][1][107] = 24, - [0][0][1][0][RTW89_IC][2][107] = 127, [0][0][1][0][RTW89_KCC][1][107] = 32, [0][0][1][0][RTW89_KCC][0][107] = 127, [0][0][1][0][RTW89_ACMA][1][107] = 127, @@ -39523,13 +38919,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][107] = 127, [0][0][1][0][RTW89_THAILAND][0][107] = 127, [0][0][1][0][RTW89_FCC][1][109] = 24, - [0][0][1][0][RTW89_FCC][2][109] = 127, [0][0][1][0][RTW89_ETSI][1][109] = 127, [0][0][1][0][RTW89_ETSI][0][109] = 127, [0][0][1][0][RTW89_MKK][1][109] = 127, [0][0][1][0][RTW89_MKK][0][109] = 127, [0][0][1][0][RTW89_IC][1][109] = 24, - [0][0][1][0][RTW89_IC][2][109] = 127, [0][0][1][0][RTW89_KCC][1][109] = 32, [0][0][1][0][RTW89_KCC][0][109] = 127, [0][0][1][0][RTW89_ACMA][1][109] = 127, @@ -39542,13 +38936,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][109] = 127, [0][0][1][0][RTW89_THAILAND][0][109] = 127, [0][0][1][0][RTW89_FCC][1][111] = 127, - [0][0][1][0][RTW89_FCC][2][111] = 127, [0][0][1][0][RTW89_ETSI][1][111] = 127, [0][0][1][0][RTW89_ETSI][0][111] = 127, [0][0][1][0][RTW89_MKK][1][111] = 127, [0][0][1][0][RTW89_MKK][0][111] = 127, [0][0][1][0][RTW89_IC][1][111] = 127, - [0][0][1][0][RTW89_IC][2][111] = 127, [0][0][1][0][RTW89_KCC][1][111] = 127, [0][0][1][0][RTW89_KCC][0][111] = 127, [0][0][1][0][RTW89_ACMA][1][111] = 127, @@ -39561,13 +38953,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][111] = 127, [0][0][1][0][RTW89_THAILAND][0][111] = 127, [0][0][1][0][RTW89_FCC][1][113] = 127, - [0][0][1][0][RTW89_FCC][2][113] = 127, [0][0][1][0][RTW89_ETSI][1][113] = 127, [0][0][1][0][RTW89_ETSI][0][113] = 127, [0][0][1][0][RTW89_MKK][1][113] = 127, [0][0][1][0][RTW89_MKK][0][113] = 127, [0][0][1][0][RTW89_IC][1][113] = 127, - [0][0][1][0][RTW89_IC][2][113] = 127, [0][0][1][0][RTW89_KCC][1][113] = 127, [0][0][1][0][RTW89_KCC][0][113] = 127, [0][0][1][0][RTW89_ACMA][1][113] = 127, @@ -39580,13 +38970,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][113] = 127, [0][0][1][0][RTW89_THAILAND][0][113] = 127, [0][0][1][0][RTW89_FCC][1][115] = 127, - [0][0][1][0][RTW89_FCC][2][115] = 127, [0][0][1][0][RTW89_ETSI][1][115] = 127, [0][0][1][0][RTW89_ETSI][0][115] = 127, [0][0][1][0][RTW89_MKK][1][115] = 127, [0][0][1][0][RTW89_MKK][0][115] = 127, [0][0][1][0][RTW89_IC][1][115] = 127, - [0][0][1][0][RTW89_IC][2][115] = 127, [0][0][1][0][RTW89_KCC][1][115] = 127, [0][0][1][0][RTW89_KCC][0][115] = 127, [0][0][1][0][RTW89_ACMA][1][115] = 127, @@ -39599,13 +38987,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][115] = 127, [0][0][1][0][RTW89_THAILAND][0][115] = 127, [0][0][1][0][RTW89_FCC][1][117] = 127, - [0][0][1][0][RTW89_FCC][2][117] = 127, [0][0][1][0][RTW89_ETSI][1][117] = 127, [0][0][1][0][RTW89_ETSI][0][117] = 127, [0][0][1][0][RTW89_MKK][1][117] = 127, [0][0][1][0][RTW89_MKK][0][117] = 127, [0][0][1][0][RTW89_IC][1][117] = 127, - [0][0][1][0][RTW89_IC][2][117] = 127, [0][0][1][0][RTW89_KCC][1][117] = 127, [0][0][1][0][RTW89_KCC][0][117] = 127, [0][0][1][0][RTW89_ACMA][1][117] = 127, @@ -39618,13 +39004,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][117] = 127, [0][0][1][0][RTW89_THAILAND][0][117] = 127, [0][0][1][0][RTW89_FCC][1][119] = 127, - [0][0][1][0][RTW89_FCC][2][119] = 127, [0][0][1][0][RTW89_ETSI][1][119] = 127, [0][0][1][0][RTW89_ETSI][0][119] = 127, [0][0][1][0][RTW89_MKK][1][119] = 127, [0][0][1][0][RTW89_MKK][0][119] = 127, [0][0][1][0][RTW89_IC][1][119] = 127, - [0][0][1][0][RTW89_IC][2][119] = 127, [0][0][1][0][RTW89_KCC][1][119] = 127, [0][0][1][0][RTW89_KCC][0][119] = 127, [0][0][1][0][RTW89_ACMA][1][119] = 127, @@ -39637,13 +39021,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][1][0][RTW89_THAILAND][1][119] = 127, [0][0][1][0][RTW89_THAILAND][0][119] = 127, [0][1][1][0][RTW89_FCC][1][0] = -2, - [0][1][1][0][RTW89_FCC][2][0] = 54, [0][1][1][0][RTW89_ETSI][1][0] = 54, [0][1][1][0][RTW89_ETSI][0][0] = 18, [0][1][1][0][RTW89_MKK][1][0] = 56, [0][1][1][0][RTW89_MKK][0][0] = 16, [0][1][1][0][RTW89_IC][1][0] = -2, - [0][1][1][0][RTW89_IC][2][0] = 54, [0][1][1][0][RTW89_KCC][1][0] = 12, [0][1][1][0][RTW89_KCC][0][0] = 10, [0][1][1][0][RTW89_ACMA][1][0] = 54, @@ -39656,13 +39038,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][0] = 44, [0][1][1][0][RTW89_THAILAND][0][0] = -2, [0][1][1][0][RTW89_FCC][1][2] = -4, - [0][1][1][0][RTW89_FCC][2][2] = 54, [0][1][1][0][RTW89_ETSI][1][2] = 54, [0][1][1][0][RTW89_ETSI][0][2] = 18, [0][1][1][0][RTW89_MKK][1][2] = 54, [0][1][1][0][RTW89_MKK][0][2] = 16, [0][1][1][0][RTW89_IC][1][2] = -4, - [0][1][1][0][RTW89_IC][2][2] = 54, [0][1][1][0][RTW89_KCC][1][2] = 12, [0][1][1][0][RTW89_KCC][0][2] = 12, [0][1][1][0][RTW89_ACMA][1][2] = 54, @@ -39675,13 +39055,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][2] = 44, [0][1][1][0][RTW89_THAILAND][0][2] = -4, [0][1][1][0][RTW89_FCC][1][4] = -4, - [0][1][1][0][RTW89_FCC][2][4] = 54, [0][1][1][0][RTW89_ETSI][1][4] = 54, [0][1][1][0][RTW89_ETSI][0][4] = 18, [0][1][1][0][RTW89_MKK][1][4] = 54, [0][1][1][0][RTW89_MKK][0][4] = 16, [0][1][1][0][RTW89_IC][1][4] = -4, - [0][1][1][0][RTW89_IC][2][4] = 54, [0][1][1][0][RTW89_KCC][1][4] = 12, [0][1][1][0][RTW89_KCC][0][4] = 12, [0][1][1][0][RTW89_ACMA][1][4] = 54, @@ -39694,13 +39072,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][4] = 44, [0][1][1][0][RTW89_THAILAND][0][4] = -4, [0][1][1][0][RTW89_FCC][1][6] = -4, - [0][1][1][0][RTW89_FCC][2][6] = 54, [0][1][1][0][RTW89_ETSI][1][6] = 54, [0][1][1][0][RTW89_ETSI][0][6] = 18, [0][1][1][0][RTW89_MKK][1][6] = 54, [0][1][1][0][RTW89_MKK][0][6] = 16, [0][1][1][0][RTW89_IC][1][6] = -4, - [0][1][1][0][RTW89_IC][2][6] = 54, [0][1][1][0][RTW89_KCC][1][6] = 12, [0][1][1][0][RTW89_KCC][0][6] = 12, [0][1][1][0][RTW89_ACMA][1][6] = 54, @@ -39713,13 +39089,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][6] = 44, [0][1][1][0][RTW89_THAILAND][0][6] = -4, [0][1][1][0][RTW89_FCC][1][8] = -4, - [0][1][1][0][RTW89_FCC][2][8] = 54, [0][1][1][0][RTW89_ETSI][1][8] = 54, [0][1][1][0][RTW89_ETSI][0][8] = 18, [0][1][1][0][RTW89_MKK][1][8] = 54, [0][1][1][0][RTW89_MKK][0][8] = 16, [0][1][1][0][RTW89_IC][1][8] = -4, - [0][1][1][0][RTW89_IC][2][8] = 54, [0][1][1][0][RTW89_KCC][1][8] = 12, [0][1][1][0][RTW89_KCC][0][8] = 12, [0][1][1][0][RTW89_ACMA][1][8] = 54, @@ -39732,13 +39106,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][8] = 44, [0][1][1][0][RTW89_THAILAND][0][8] = -4, [0][1][1][0][RTW89_FCC][1][10] = -4, - [0][1][1][0][RTW89_FCC][2][10] = 54, [0][1][1][0][RTW89_ETSI][1][10] = 54, [0][1][1][0][RTW89_ETSI][0][10] = 18, [0][1][1][0][RTW89_MKK][1][10] = 54, [0][1][1][0][RTW89_MKK][0][10] = 16, [0][1][1][0][RTW89_IC][1][10] = -4, - [0][1][1][0][RTW89_IC][2][10] = 54, [0][1][1][0][RTW89_KCC][1][10] = 12, [0][1][1][0][RTW89_KCC][0][10] = 12, [0][1][1][0][RTW89_ACMA][1][10] = 54, @@ -39751,13 +39123,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][10] = 44, [0][1][1][0][RTW89_THAILAND][0][10] = -4, [0][1][1][0][RTW89_FCC][1][12] = -4, - [0][1][1][0][RTW89_FCC][2][12] = 54, [0][1][1][0][RTW89_ETSI][1][12] = 54, [0][1][1][0][RTW89_ETSI][0][12] = 18, [0][1][1][0][RTW89_MKK][1][12] = 54, [0][1][1][0][RTW89_MKK][0][12] = 16, [0][1][1][0][RTW89_IC][1][12] = -4, - [0][1][1][0][RTW89_IC][2][12] = 54, [0][1][1][0][RTW89_KCC][1][12] = 12, [0][1][1][0][RTW89_KCC][0][12] = 12, [0][1][1][0][RTW89_ACMA][1][12] = 54, @@ -39770,13 +39140,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][12] = 44, [0][1][1][0][RTW89_THAILAND][0][12] = -4, [0][1][1][0][RTW89_FCC][1][14] = -4, - [0][1][1][0][RTW89_FCC][2][14] = 54, [0][1][1][0][RTW89_ETSI][1][14] = 54, [0][1][1][0][RTW89_ETSI][0][14] = 18, [0][1][1][0][RTW89_MKK][1][14] = 54, [0][1][1][0][RTW89_MKK][0][14] = 16, [0][1][1][0][RTW89_IC][1][14] = -4, - [0][1][1][0][RTW89_IC][2][14] = 54, [0][1][1][0][RTW89_KCC][1][14] = 12, [0][1][1][0][RTW89_KCC][0][14] = 12, [0][1][1][0][RTW89_ACMA][1][14] = 54, @@ -39789,13 +39157,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][14] = 44, [0][1][1][0][RTW89_THAILAND][0][14] = -4, [0][1][1][0][RTW89_FCC][1][15] = -4, - [0][1][1][0][RTW89_FCC][2][15] = 54, [0][1][1][0][RTW89_ETSI][1][15] = 54, [0][1][1][0][RTW89_ETSI][0][15] = 18, [0][1][1][0][RTW89_MKK][1][15] = 54, [0][1][1][0][RTW89_MKK][0][15] = 16, [0][1][1][0][RTW89_IC][1][15] = -4, - [0][1][1][0][RTW89_IC][2][15] = 54, [0][1][1][0][RTW89_KCC][1][15] = 12, [0][1][1][0][RTW89_KCC][0][15] = 12, [0][1][1][0][RTW89_ACMA][1][15] = 54, @@ -39808,13 +39174,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][15] = 44, [0][1][1][0][RTW89_THAILAND][0][15] = -4, [0][1][1][0][RTW89_FCC][1][17] = -4, - [0][1][1][0][RTW89_FCC][2][17] = 54, [0][1][1][0][RTW89_ETSI][1][17] = 54, [0][1][1][0][RTW89_ETSI][0][17] = 18, [0][1][1][0][RTW89_MKK][1][17] = 54, [0][1][1][0][RTW89_MKK][0][17] = 16, [0][1][1][0][RTW89_IC][1][17] = -4, - [0][1][1][0][RTW89_IC][2][17] = 54, [0][1][1][0][RTW89_KCC][1][17] = 12, [0][1][1][0][RTW89_KCC][0][17] = 12, [0][1][1][0][RTW89_ACMA][1][17] = 54, @@ -39827,13 +39191,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][17] = 44, [0][1][1][0][RTW89_THAILAND][0][17] = -4, [0][1][1][0][RTW89_FCC][1][19] = -4, - [0][1][1][0][RTW89_FCC][2][19] = 54, [0][1][1][0][RTW89_ETSI][1][19] = 54, [0][1][1][0][RTW89_ETSI][0][19] = 18, [0][1][1][0][RTW89_MKK][1][19] = 54, [0][1][1][0][RTW89_MKK][0][19] = 16, [0][1][1][0][RTW89_IC][1][19] = -4, - [0][1][1][0][RTW89_IC][2][19] = 54, [0][1][1][0][RTW89_KCC][1][19] = 12, [0][1][1][0][RTW89_KCC][0][19] = 12, [0][1][1][0][RTW89_ACMA][1][19] = 54, @@ -39846,13 +39208,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][19] = 44, [0][1][1][0][RTW89_THAILAND][0][19] = -4, [0][1][1][0][RTW89_FCC][1][21] = -4, - [0][1][1][0][RTW89_FCC][2][21] = 54, [0][1][1][0][RTW89_ETSI][1][21] = 54, [0][1][1][0][RTW89_ETSI][0][21] = 18, [0][1][1][0][RTW89_MKK][1][21] = 54, [0][1][1][0][RTW89_MKK][0][21] = 16, [0][1][1][0][RTW89_IC][1][21] = -4, - [0][1][1][0][RTW89_IC][2][21] = 54, [0][1][1][0][RTW89_KCC][1][21] = 12, [0][1][1][0][RTW89_KCC][0][21] = 12, [0][1][1][0][RTW89_ACMA][1][21] = 54, @@ -39865,13 +39225,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][21] = 44, [0][1][1][0][RTW89_THAILAND][0][21] = -4, [0][1][1][0][RTW89_FCC][1][23] = -4, - [0][1][1][0][RTW89_FCC][2][23] = 68, [0][1][1][0][RTW89_ETSI][1][23] = 54, [0][1][1][0][RTW89_ETSI][0][23] = 18, [0][1][1][0][RTW89_MKK][1][23] = 54, [0][1][1][0][RTW89_MKK][0][23] = 16, [0][1][1][0][RTW89_IC][1][23] = -4, - [0][1][1][0][RTW89_IC][2][23] = 68, [0][1][1][0][RTW89_KCC][1][23] = 12, [0][1][1][0][RTW89_KCC][0][23] = 10, [0][1][1][0][RTW89_ACMA][1][23] = 54, @@ -39884,13 +39242,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][23] = 44, [0][1][1][0][RTW89_THAILAND][0][23] = -4, [0][1][1][0][RTW89_FCC][1][25] = -4, - [0][1][1][0][RTW89_FCC][2][25] = 68, [0][1][1][0][RTW89_ETSI][1][25] = 54, [0][1][1][0][RTW89_ETSI][0][25] = 18, [0][1][1][0][RTW89_MKK][1][25] = 54, [0][1][1][0][RTW89_MKK][0][25] = 16, [0][1][1][0][RTW89_IC][1][25] = -4, - [0][1][1][0][RTW89_IC][2][25] = 68, [0][1][1][0][RTW89_KCC][1][25] = 12, [0][1][1][0][RTW89_KCC][0][25] = 14, [0][1][1][0][RTW89_ACMA][1][25] = 54, @@ -39903,13 +39259,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][25] = 42, [0][1][1][0][RTW89_THAILAND][0][25] = -4, [0][1][1][0][RTW89_FCC][1][27] = -4, - [0][1][1][0][RTW89_FCC][2][27] = 68, [0][1][1][0][RTW89_ETSI][1][27] = 54, [0][1][1][0][RTW89_ETSI][0][27] = 18, [0][1][1][0][RTW89_MKK][1][27] = 54, [0][1][1][0][RTW89_MKK][0][27] = 16, [0][1][1][0][RTW89_IC][1][27] = -4, - [0][1][1][0][RTW89_IC][2][27] = 68, [0][1][1][0][RTW89_KCC][1][27] = 12, [0][1][1][0][RTW89_KCC][0][27] = 14, [0][1][1][0][RTW89_ACMA][1][27] = 54, @@ -39922,13 +39276,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][27] = 42, [0][1][1][0][RTW89_THAILAND][0][27] = -4, [0][1][1][0][RTW89_FCC][1][29] = -4, - [0][1][1][0][RTW89_FCC][2][29] = 68, [0][1][1][0][RTW89_ETSI][1][29] = 54, [0][1][1][0][RTW89_ETSI][0][29] = 18, [0][1][1][0][RTW89_MKK][1][29] = 54, [0][1][1][0][RTW89_MKK][0][29] = 16, [0][1][1][0][RTW89_IC][1][29] = -4, - [0][1][1][0][RTW89_IC][2][29] = 68, [0][1][1][0][RTW89_KCC][1][29] = 12, [0][1][1][0][RTW89_KCC][0][29] = 14, [0][1][1][0][RTW89_ACMA][1][29] = 54, @@ -39941,13 +39293,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][29] = 42, [0][1][1][0][RTW89_THAILAND][0][29] = -4, [0][1][1][0][RTW89_FCC][1][30] = -4, - [0][1][1][0][RTW89_FCC][2][30] = 68, [0][1][1][0][RTW89_ETSI][1][30] = 54, [0][1][1][0][RTW89_ETSI][0][30] = 18, [0][1][1][0][RTW89_MKK][1][30] = 54, [0][1][1][0][RTW89_MKK][0][30] = 16, [0][1][1][0][RTW89_IC][1][30] = -4, - [0][1][1][0][RTW89_IC][2][30] = 68, [0][1][1][0][RTW89_KCC][1][30] = 12, [0][1][1][0][RTW89_KCC][0][30] = 14, [0][1][1][0][RTW89_ACMA][1][30] = 54, @@ -39960,13 +39310,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][30] = 42, [0][1][1][0][RTW89_THAILAND][0][30] = -4, [0][1][1][0][RTW89_FCC][1][32] = -4, - [0][1][1][0][RTW89_FCC][2][32] = 68, [0][1][1][0][RTW89_ETSI][1][32] = 54, [0][1][1][0][RTW89_ETSI][0][32] = 18, [0][1][1][0][RTW89_MKK][1][32] = 54, [0][1][1][0][RTW89_MKK][0][32] = 16, [0][1][1][0][RTW89_IC][1][32] = -4, - [0][1][1][0][RTW89_IC][2][32] = 68, [0][1][1][0][RTW89_KCC][1][32] = 12, [0][1][1][0][RTW89_KCC][0][32] = 14, [0][1][1][0][RTW89_ACMA][1][32] = 54, @@ -39979,13 +39327,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][32] = 42, [0][1][1][0][RTW89_THAILAND][0][32] = -4, [0][1][1][0][RTW89_FCC][1][34] = -4, - [0][1][1][0][RTW89_FCC][2][34] = 68, [0][1][1][0][RTW89_ETSI][1][34] = 54, [0][1][1][0][RTW89_ETSI][0][34] = 18, [0][1][1][0][RTW89_MKK][1][34] = 54, [0][1][1][0][RTW89_MKK][0][34] = 16, [0][1][1][0][RTW89_IC][1][34] = -4, - [0][1][1][0][RTW89_IC][2][34] = 68, [0][1][1][0][RTW89_KCC][1][34] = 12, [0][1][1][0][RTW89_KCC][0][34] = 14, [0][1][1][0][RTW89_ACMA][1][34] = 54, @@ -39998,13 +39344,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][34] = 42, [0][1][1][0][RTW89_THAILAND][0][34] = -4, [0][1][1][0][RTW89_FCC][1][36] = -4, - [0][1][1][0][RTW89_FCC][2][36] = 68, [0][1][1][0][RTW89_ETSI][1][36] = 54, [0][1][1][0][RTW89_ETSI][0][36] = 18, [0][1][1][0][RTW89_MKK][1][36] = 54, [0][1][1][0][RTW89_MKK][0][36] = 16, [0][1][1][0][RTW89_IC][1][36] = -4, - [0][1][1][0][RTW89_IC][2][36] = 68, [0][1][1][0][RTW89_KCC][1][36] = 12, [0][1][1][0][RTW89_KCC][0][36] = 14, [0][1][1][0][RTW89_ACMA][1][36] = 54, @@ -40017,13 +39361,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][36] = 42, [0][1][1][0][RTW89_THAILAND][0][36] = -4, [0][1][1][0][RTW89_FCC][1][38] = -4, - [0][1][1][0][RTW89_FCC][2][38] = 68, [0][1][1][0][RTW89_ETSI][1][38] = 54, [0][1][1][0][RTW89_ETSI][0][38] = 18, [0][1][1][0][RTW89_MKK][1][38] = 54, [0][1][1][0][RTW89_MKK][0][38] = 16, [0][1][1][0][RTW89_IC][1][38] = -4, - [0][1][1][0][RTW89_IC][2][38] = 68, [0][1][1][0][RTW89_KCC][1][38] = 12, [0][1][1][0][RTW89_KCC][0][38] = 14, [0][1][1][0][RTW89_ACMA][1][38] = 54, @@ -40036,13 +39378,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][38] = 42, [0][1][1][0][RTW89_THAILAND][0][38] = -4, [0][1][1][0][RTW89_FCC][1][40] = -4, - [0][1][1][0][RTW89_FCC][2][40] = 68, [0][1][1][0][RTW89_ETSI][1][40] = 54, [0][1][1][0][RTW89_ETSI][0][40] = 18, [0][1][1][0][RTW89_MKK][1][40] = 54, [0][1][1][0][RTW89_MKK][0][40] = 16, [0][1][1][0][RTW89_IC][1][40] = -4, - [0][1][1][0][RTW89_IC][2][40] = 68, [0][1][1][0][RTW89_KCC][1][40] = 12, [0][1][1][0][RTW89_KCC][0][40] = 14, [0][1][1][0][RTW89_ACMA][1][40] = 54, @@ -40055,13 +39395,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][40] = 42, [0][1][1][0][RTW89_THAILAND][0][40] = -4, [0][1][1][0][RTW89_FCC][1][42] = -4, - [0][1][1][0][RTW89_FCC][2][42] = 68, [0][1][1][0][RTW89_ETSI][1][42] = 54, [0][1][1][0][RTW89_ETSI][0][42] = 18, [0][1][1][0][RTW89_MKK][1][42] = 54, [0][1][1][0][RTW89_MKK][0][42] = 16, [0][1][1][0][RTW89_IC][1][42] = -4, - [0][1][1][0][RTW89_IC][2][42] = 68, [0][1][1][0][RTW89_KCC][1][42] = 12, [0][1][1][0][RTW89_KCC][0][42] = 14, [0][1][1][0][RTW89_ACMA][1][42] = 54, @@ -40074,13 +39412,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][42] = 42, [0][1][1][0][RTW89_THAILAND][0][42] = -4, [0][1][1][0][RTW89_FCC][1][44] = -2, - [0][1][1][0][RTW89_FCC][2][44] = 68, [0][1][1][0][RTW89_ETSI][1][44] = 54, [0][1][1][0][RTW89_ETSI][0][44] = 18, [0][1][1][0][RTW89_MKK][1][44] = 34, [0][1][1][0][RTW89_MKK][0][44] = 16, [0][1][1][0][RTW89_IC][1][44] = -2, - [0][1][1][0][RTW89_IC][2][44] = 68, [0][1][1][0][RTW89_KCC][1][44] = 12, [0][1][1][0][RTW89_KCC][0][44] = 12, [0][1][1][0][RTW89_ACMA][1][44] = 54, @@ -40093,13 +39429,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][44] = 42, [0][1][1][0][RTW89_THAILAND][0][44] = -2, [0][1][1][0][RTW89_FCC][1][45] = -2, - [0][1][1][0][RTW89_FCC][2][45] = 127, [0][1][1][0][RTW89_ETSI][1][45] = 127, [0][1][1][0][RTW89_ETSI][0][45] = 127, [0][1][1][0][RTW89_MKK][1][45] = 127, [0][1][1][0][RTW89_MKK][0][45] = 127, [0][1][1][0][RTW89_IC][1][45] = -2, - [0][1][1][0][RTW89_IC][2][45] = 70, [0][1][1][0][RTW89_KCC][1][45] = 12, [0][1][1][0][RTW89_KCC][0][45] = 127, [0][1][1][0][RTW89_ACMA][1][45] = 127, @@ -40112,13 +39446,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][45] = 127, [0][1][1][0][RTW89_THAILAND][0][45] = 127, [0][1][1][0][RTW89_FCC][1][47] = -2, - [0][1][1][0][RTW89_FCC][2][47] = 127, [0][1][1][0][RTW89_ETSI][1][47] = 127, [0][1][1][0][RTW89_ETSI][0][47] = 127, [0][1][1][0][RTW89_MKK][1][47] = 127, [0][1][1][0][RTW89_MKK][0][47] = 127, [0][1][1][0][RTW89_IC][1][47] = -2, - [0][1][1][0][RTW89_IC][2][47] = 68, [0][1][1][0][RTW89_KCC][1][47] = 12, [0][1][1][0][RTW89_KCC][0][47] = 127, [0][1][1][0][RTW89_ACMA][1][47] = 127, @@ -40131,13 +39463,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][47] = 127, [0][1][1][0][RTW89_THAILAND][0][47] = 127, [0][1][1][0][RTW89_FCC][1][49] = -2, - [0][1][1][0][RTW89_FCC][2][49] = 127, [0][1][1][0][RTW89_ETSI][1][49] = 127, [0][1][1][0][RTW89_ETSI][0][49] = 127, [0][1][1][0][RTW89_MKK][1][49] = 127, [0][1][1][0][RTW89_MKK][0][49] = 127, [0][1][1][0][RTW89_IC][1][49] = -2, - [0][1][1][0][RTW89_IC][2][49] = 68, [0][1][1][0][RTW89_KCC][1][49] = 12, [0][1][1][0][RTW89_KCC][0][49] = 127, [0][1][1][0][RTW89_ACMA][1][49] = 127, @@ -40150,13 +39480,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][49] = 127, [0][1][1][0][RTW89_THAILAND][0][49] = 127, [0][1][1][0][RTW89_FCC][1][51] = -2, - [0][1][1][0][RTW89_FCC][2][51] = 127, [0][1][1][0][RTW89_ETSI][1][51] = 127, [0][1][1][0][RTW89_ETSI][0][51] = 127, [0][1][1][0][RTW89_MKK][1][51] = 127, [0][1][1][0][RTW89_MKK][0][51] = 127, [0][1][1][0][RTW89_IC][1][51] = -2, - [0][1][1][0][RTW89_IC][2][51] = 68, [0][1][1][0][RTW89_KCC][1][51] = 12, [0][1][1][0][RTW89_KCC][0][51] = 127, [0][1][1][0][RTW89_ACMA][1][51] = 127, @@ -40169,13 +39497,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][51] = 127, [0][1][1][0][RTW89_THAILAND][0][51] = 127, [0][1][1][0][RTW89_FCC][1][53] = -2, - [0][1][1][0][RTW89_FCC][2][53] = 127, [0][1][1][0][RTW89_ETSI][1][53] = 127, [0][1][1][0][RTW89_ETSI][0][53] = 127, [0][1][1][0][RTW89_MKK][1][53] = 127, [0][1][1][0][RTW89_MKK][0][53] = 127, [0][1][1][0][RTW89_IC][1][53] = -2, - [0][1][1][0][RTW89_IC][2][53] = 68, [0][1][1][0][RTW89_KCC][1][53] = 12, [0][1][1][0][RTW89_KCC][0][53] = 127, [0][1][1][0][RTW89_ACMA][1][53] = 127, @@ -40188,13 +39514,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][53] = 127, [0][1][1][0][RTW89_THAILAND][0][53] = 127, [0][1][1][0][RTW89_FCC][1][55] = -2, - [0][1][1][0][RTW89_FCC][2][55] = 68, [0][1][1][0][RTW89_ETSI][1][55] = 127, [0][1][1][0][RTW89_ETSI][0][55] = 127, [0][1][1][0][RTW89_MKK][1][55] = 127, [0][1][1][0][RTW89_MKK][0][55] = 127, [0][1][1][0][RTW89_IC][1][55] = -2, - [0][1][1][0][RTW89_IC][2][55] = 68, [0][1][1][0][RTW89_KCC][1][55] = 12, [0][1][1][0][RTW89_KCC][0][55] = 127, [0][1][1][0][RTW89_ACMA][1][55] = 127, @@ -40207,13 +39531,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][55] = 127, [0][1][1][0][RTW89_THAILAND][0][55] = 127, [0][1][1][0][RTW89_FCC][1][57] = -2, - [0][1][1][0][RTW89_FCC][2][57] = 68, [0][1][1][0][RTW89_ETSI][1][57] = 127, [0][1][1][0][RTW89_ETSI][0][57] = 127, [0][1][1][0][RTW89_MKK][1][57] = 127, [0][1][1][0][RTW89_MKK][0][57] = 127, [0][1][1][0][RTW89_IC][1][57] = -2, - [0][1][1][0][RTW89_IC][2][57] = 68, [0][1][1][0][RTW89_KCC][1][57] = 12, [0][1][1][0][RTW89_KCC][0][57] = 127, [0][1][1][0][RTW89_ACMA][1][57] = 127, @@ -40226,13 +39548,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][57] = 127, [0][1][1][0][RTW89_THAILAND][0][57] = 127, [0][1][1][0][RTW89_FCC][1][59] = -2, - [0][1][1][0][RTW89_FCC][2][59] = 68, [0][1][1][0][RTW89_ETSI][1][59] = 127, [0][1][1][0][RTW89_ETSI][0][59] = 127, [0][1][1][0][RTW89_MKK][1][59] = 127, [0][1][1][0][RTW89_MKK][0][59] = 127, [0][1][1][0][RTW89_IC][1][59] = -2, - [0][1][1][0][RTW89_IC][2][59] = 68, [0][1][1][0][RTW89_KCC][1][59] = 12, [0][1][1][0][RTW89_KCC][0][59] = 127, [0][1][1][0][RTW89_ACMA][1][59] = 127, @@ -40245,13 +39565,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][59] = 127, [0][1][1][0][RTW89_THAILAND][0][59] = 127, [0][1][1][0][RTW89_FCC][1][60] = -2, - [0][1][1][0][RTW89_FCC][2][60] = 68, [0][1][1][0][RTW89_ETSI][1][60] = 127, [0][1][1][0][RTW89_ETSI][0][60] = 127, [0][1][1][0][RTW89_MKK][1][60] = 127, [0][1][1][0][RTW89_MKK][0][60] = 127, [0][1][1][0][RTW89_IC][1][60] = -2, - [0][1][1][0][RTW89_IC][2][60] = 68, [0][1][1][0][RTW89_KCC][1][60] = 12, [0][1][1][0][RTW89_KCC][0][60] = 127, [0][1][1][0][RTW89_ACMA][1][60] = 127, @@ -40264,13 +39582,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][60] = 127, [0][1][1][0][RTW89_THAILAND][0][60] = 127, [0][1][1][0][RTW89_FCC][1][62] = -2, - [0][1][1][0][RTW89_FCC][2][62] = 68, [0][1][1][0][RTW89_ETSI][1][62] = 127, [0][1][1][0][RTW89_ETSI][0][62] = 127, [0][1][1][0][RTW89_MKK][1][62] = 127, [0][1][1][0][RTW89_MKK][0][62] = 127, [0][1][1][0][RTW89_IC][1][62] = -2, - [0][1][1][0][RTW89_IC][2][62] = 68, [0][1][1][0][RTW89_KCC][1][62] = 12, [0][1][1][0][RTW89_KCC][0][62] = 127, [0][1][1][0][RTW89_ACMA][1][62] = 127, @@ -40283,13 +39599,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][62] = 127, [0][1][1][0][RTW89_THAILAND][0][62] = 127, [0][1][1][0][RTW89_FCC][1][64] = -2, - [0][1][1][0][RTW89_FCC][2][64] = 68, [0][1][1][0][RTW89_ETSI][1][64] = 127, [0][1][1][0][RTW89_ETSI][0][64] = 127, [0][1][1][0][RTW89_MKK][1][64] = 127, [0][1][1][0][RTW89_MKK][0][64] = 127, [0][1][1][0][RTW89_IC][1][64] = -2, - [0][1][1][0][RTW89_IC][2][64] = 68, [0][1][1][0][RTW89_KCC][1][64] = 12, [0][1][1][0][RTW89_KCC][0][64] = 127, [0][1][1][0][RTW89_ACMA][1][64] = 127, @@ -40302,13 +39616,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][64] = 127, [0][1][1][0][RTW89_THAILAND][0][64] = 127, [0][1][1][0][RTW89_FCC][1][66] = -2, - [0][1][1][0][RTW89_FCC][2][66] = 68, [0][1][1][0][RTW89_ETSI][1][66] = 127, [0][1][1][0][RTW89_ETSI][0][66] = 127, [0][1][1][0][RTW89_MKK][1][66] = 127, [0][1][1][0][RTW89_MKK][0][66] = 127, [0][1][1][0][RTW89_IC][1][66] = -2, - [0][1][1][0][RTW89_IC][2][66] = 68, [0][1][1][0][RTW89_KCC][1][66] = 12, [0][1][1][0][RTW89_KCC][0][66] = 127, [0][1][1][0][RTW89_ACMA][1][66] = 127, @@ -40321,13 +39633,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][66] = 127, [0][1][1][0][RTW89_THAILAND][0][66] = 127, [0][1][1][0][RTW89_FCC][1][68] = -2, - [0][1][1][0][RTW89_FCC][2][68] = 68, [0][1][1][0][RTW89_ETSI][1][68] = 127, [0][1][1][0][RTW89_ETSI][0][68] = 127, [0][1][1][0][RTW89_MKK][1][68] = 127, [0][1][1][0][RTW89_MKK][0][68] = 127, [0][1][1][0][RTW89_IC][1][68] = -2, - [0][1][1][0][RTW89_IC][2][68] = 68, [0][1][1][0][RTW89_KCC][1][68] = 12, [0][1][1][0][RTW89_KCC][0][68] = 127, [0][1][1][0][RTW89_ACMA][1][68] = 127, @@ -40340,13 +39650,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][68] = 127, [0][1][1][0][RTW89_THAILAND][0][68] = 127, [0][1][1][0][RTW89_FCC][1][70] = -2, - [0][1][1][0][RTW89_FCC][2][70] = 68, [0][1][1][0][RTW89_ETSI][1][70] = 127, [0][1][1][0][RTW89_ETSI][0][70] = 127, [0][1][1][0][RTW89_MKK][1][70] = 127, [0][1][1][0][RTW89_MKK][0][70] = 127, [0][1][1][0][RTW89_IC][1][70] = -2, - [0][1][1][0][RTW89_IC][2][70] = 68, [0][1][1][0][RTW89_KCC][1][70] = 12, [0][1][1][0][RTW89_KCC][0][70] = 127, [0][1][1][0][RTW89_ACMA][1][70] = 127, @@ -40359,13 +39667,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][70] = 127, [0][1][1][0][RTW89_THAILAND][0][70] = 127, [0][1][1][0][RTW89_FCC][1][72] = -2, - [0][1][1][0][RTW89_FCC][2][72] = 68, [0][1][1][0][RTW89_ETSI][1][72] = 127, [0][1][1][0][RTW89_ETSI][0][72] = 127, [0][1][1][0][RTW89_MKK][1][72] = 127, [0][1][1][0][RTW89_MKK][0][72] = 127, [0][1][1][0][RTW89_IC][1][72] = -2, - [0][1][1][0][RTW89_IC][2][72] = 68, [0][1][1][0][RTW89_KCC][1][72] = 12, [0][1][1][0][RTW89_KCC][0][72] = 127, [0][1][1][0][RTW89_ACMA][1][72] = 127, @@ -40378,13 +39684,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][72] = 127, [0][1][1][0][RTW89_THAILAND][0][72] = 127, [0][1][1][0][RTW89_FCC][1][74] = -2, - [0][1][1][0][RTW89_FCC][2][74] = 68, [0][1][1][0][RTW89_ETSI][1][74] = 127, [0][1][1][0][RTW89_ETSI][0][74] = 127, [0][1][1][0][RTW89_MKK][1][74] = 127, [0][1][1][0][RTW89_MKK][0][74] = 127, [0][1][1][0][RTW89_IC][1][74] = -2, - [0][1][1][0][RTW89_IC][2][74] = 68, [0][1][1][0][RTW89_KCC][1][74] = 12, [0][1][1][0][RTW89_KCC][0][74] = 127, [0][1][1][0][RTW89_ACMA][1][74] = 127, @@ -40397,13 +39701,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][74] = 127, [0][1][1][0][RTW89_THAILAND][0][74] = 127, [0][1][1][0][RTW89_FCC][1][75] = -2, - [0][1][1][0][RTW89_FCC][2][75] = 68, [0][1][1][0][RTW89_ETSI][1][75] = 127, [0][1][1][0][RTW89_ETSI][0][75] = 127, [0][1][1][0][RTW89_MKK][1][75] = 127, [0][1][1][0][RTW89_MKK][0][75] = 127, [0][1][1][0][RTW89_IC][1][75] = -2, - [0][1][1][0][RTW89_IC][2][75] = 68, [0][1][1][0][RTW89_KCC][1][75] = 12, [0][1][1][0][RTW89_KCC][0][75] = 127, [0][1][1][0][RTW89_ACMA][1][75] = 127, @@ -40416,13 +39718,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][75] = 127, [0][1][1][0][RTW89_THAILAND][0][75] = 127, [0][1][1][0][RTW89_FCC][1][77] = -2, - [0][1][1][0][RTW89_FCC][2][77] = 68, [0][1][1][0][RTW89_ETSI][1][77] = 127, [0][1][1][0][RTW89_ETSI][0][77] = 127, [0][1][1][0][RTW89_MKK][1][77] = 127, [0][1][1][0][RTW89_MKK][0][77] = 127, [0][1][1][0][RTW89_IC][1][77] = -2, - [0][1][1][0][RTW89_IC][2][77] = 68, [0][1][1][0][RTW89_KCC][1][77] = 12, [0][1][1][0][RTW89_KCC][0][77] = 127, [0][1][1][0][RTW89_ACMA][1][77] = 127, @@ -40435,13 +39735,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][77] = 127, [0][1][1][0][RTW89_THAILAND][0][77] = 127, [0][1][1][0][RTW89_FCC][1][79] = -2, - [0][1][1][0][RTW89_FCC][2][79] = 68, [0][1][1][0][RTW89_ETSI][1][79] = 127, [0][1][1][0][RTW89_ETSI][0][79] = 127, [0][1][1][0][RTW89_MKK][1][79] = 127, [0][1][1][0][RTW89_MKK][0][79] = 127, [0][1][1][0][RTW89_IC][1][79] = -2, - [0][1][1][0][RTW89_IC][2][79] = 68, [0][1][1][0][RTW89_KCC][1][79] = 12, [0][1][1][0][RTW89_KCC][0][79] = 127, [0][1][1][0][RTW89_ACMA][1][79] = 127, @@ -40454,13 +39752,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][79] = 127, [0][1][1][0][RTW89_THAILAND][0][79] = 127, [0][1][1][0][RTW89_FCC][1][81] = -2, - [0][1][1][0][RTW89_FCC][2][81] = 68, [0][1][1][0][RTW89_ETSI][1][81] = 127, [0][1][1][0][RTW89_ETSI][0][81] = 127, [0][1][1][0][RTW89_MKK][1][81] = 127, [0][1][1][0][RTW89_MKK][0][81] = 127, [0][1][1][0][RTW89_IC][1][81] = -2, - [0][1][1][0][RTW89_IC][2][81] = 68, [0][1][1][0][RTW89_KCC][1][81] = 12, [0][1][1][0][RTW89_KCC][0][81] = 127, [0][1][1][0][RTW89_ACMA][1][81] = 127, @@ -40473,13 +39769,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][81] = 127, [0][1][1][0][RTW89_THAILAND][0][81] = 127, [0][1][1][0][RTW89_FCC][1][83] = -2, - [0][1][1][0][RTW89_FCC][2][83] = 68, [0][1][1][0][RTW89_ETSI][1][83] = 127, [0][1][1][0][RTW89_ETSI][0][83] = 127, [0][1][1][0][RTW89_MKK][1][83] = 127, [0][1][1][0][RTW89_MKK][0][83] = 127, [0][1][1][0][RTW89_IC][1][83] = -2, - [0][1][1][0][RTW89_IC][2][83] = 68, [0][1][1][0][RTW89_KCC][1][83] = 20, [0][1][1][0][RTW89_KCC][0][83] = 127, [0][1][1][0][RTW89_ACMA][1][83] = 127, @@ -40492,13 +39786,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][83] = 127, [0][1][1][0][RTW89_THAILAND][0][83] = 127, [0][1][1][0][RTW89_FCC][1][85] = -2, - [0][1][1][0][RTW89_FCC][2][85] = 68, [0][1][1][0][RTW89_ETSI][1][85] = 127, [0][1][1][0][RTW89_ETSI][0][85] = 127, [0][1][1][0][RTW89_MKK][1][85] = 127, [0][1][1][0][RTW89_MKK][0][85] = 127, [0][1][1][0][RTW89_IC][1][85] = -2, - [0][1][1][0][RTW89_IC][2][85] = 68, [0][1][1][0][RTW89_KCC][1][85] = 20, [0][1][1][0][RTW89_KCC][0][85] = 127, [0][1][1][0][RTW89_ACMA][1][85] = 127, @@ -40511,13 +39803,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][85] = 127, [0][1][1][0][RTW89_THAILAND][0][85] = 127, [0][1][1][0][RTW89_FCC][1][87] = -2, - [0][1][1][0][RTW89_FCC][2][87] = 127, [0][1][1][0][RTW89_ETSI][1][87] = 127, [0][1][1][0][RTW89_ETSI][0][87] = 127, [0][1][1][0][RTW89_MKK][1][87] = 127, [0][1][1][0][RTW89_MKK][0][87] = 127, [0][1][1][0][RTW89_IC][1][87] = -2, - [0][1][1][0][RTW89_IC][2][87] = 127, [0][1][1][0][RTW89_KCC][1][87] = 20, [0][1][1][0][RTW89_KCC][0][87] = 127, [0][1][1][0][RTW89_ACMA][1][87] = 127, @@ -40530,13 +39820,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][87] = 127, [0][1][1][0][RTW89_THAILAND][0][87] = 127, [0][1][1][0][RTW89_FCC][1][89] = -2, - [0][1][1][0][RTW89_FCC][2][89] = 127, [0][1][1][0][RTW89_ETSI][1][89] = 127, [0][1][1][0][RTW89_ETSI][0][89] = 127, [0][1][1][0][RTW89_MKK][1][89] = 127, [0][1][1][0][RTW89_MKK][0][89] = 127, [0][1][1][0][RTW89_IC][1][89] = -2, - [0][1][1][0][RTW89_IC][2][89] = 127, [0][1][1][0][RTW89_KCC][1][89] = 20, [0][1][1][0][RTW89_KCC][0][89] = 127, [0][1][1][0][RTW89_ACMA][1][89] = 127, @@ -40549,13 +39837,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][89] = 127, [0][1][1][0][RTW89_THAILAND][0][89] = 127, [0][1][1][0][RTW89_FCC][1][90] = -2, - [0][1][1][0][RTW89_FCC][2][90] = 127, [0][1][1][0][RTW89_ETSI][1][90] = 127, [0][1][1][0][RTW89_ETSI][0][90] = 127, [0][1][1][0][RTW89_MKK][1][90] = 127, [0][1][1][0][RTW89_MKK][0][90] = 127, [0][1][1][0][RTW89_IC][1][90] = -2, - [0][1][1][0][RTW89_IC][2][90] = 127, [0][1][1][0][RTW89_KCC][1][90] = 20, [0][1][1][0][RTW89_KCC][0][90] = 127, [0][1][1][0][RTW89_ACMA][1][90] = 127, @@ -40568,13 +39854,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][90] = 127, [0][1][1][0][RTW89_THAILAND][0][90] = 127, [0][1][1][0][RTW89_FCC][1][92] = -2, - [0][1][1][0][RTW89_FCC][2][92] = 127, [0][1][1][0][RTW89_ETSI][1][92] = 127, [0][1][1][0][RTW89_ETSI][0][92] = 127, [0][1][1][0][RTW89_MKK][1][92] = 127, [0][1][1][0][RTW89_MKK][0][92] = 127, [0][1][1][0][RTW89_IC][1][92] = -2, - [0][1][1][0][RTW89_IC][2][92] = 127, [0][1][1][0][RTW89_KCC][1][92] = 20, [0][1][1][0][RTW89_KCC][0][92] = 127, [0][1][1][0][RTW89_ACMA][1][92] = 127, @@ -40587,13 +39871,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][92] = 127, [0][1][1][0][RTW89_THAILAND][0][92] = 127, [0][1][1][0][RTW89_FCC][1][94] = -2, - [0][1][1][0][RTW89_FCC][2][94] = 127, [0][1][1][0][RTW89_ETSI][1][94] = 127, [0][1][1][0][RTW89_ETSI][0][94] = 127, [0][1][1][0][RTW89_MKK][1][94] = 127, [0][1][1][0][RTW89_MKK][0][94] = 127, [0][1][1][0][RTW89_IC][1][94] = -2, - [0][1][1][0][RTW89_IC][2][94] = 127, [0][1][1][0][RTW89_KCC][1][94] = 20, [0][1][1][0][RTW89_KCC][0][94] = 127, [0][1][1][0][RTW89_ACMA][1][94] = 127, @@ -40606,13 +39888,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][94] = 127, [0][1][1][0][RTW89_THAILAND][0][94] = 127, [0][1][1][0][RTW89_FCC][1][96] = -2, - [0][1][1][0][RTW89_FCC][2][96] = 127, [0][1][1][0][RTW89_ETSI][1][96] = 127, [0][1][1][0][RTW89_ETSI][0][96] = 127, [0][1][1][0][RTW89_MKK][1][96] = 127, [0][1][1][0][RTW89_MKK][0][96] = 127, [0][1][1][0][RTW89_IC][1][96] = -2, - [0][1][1][0][RTW89_IC][2][96] = 127, [0][1][1][0][RTW89_KCC][1][96] = 20, [0][1][1][0][RTW89_KCC][0][96] = 127, [0][1][1][0][RTW89_ACMA][1][96] = 127, @@ -40625,13 +39905,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][96] = 127, [0][1][1][0][RTW89_THAILAND][0][96] = 127, [0][1][1][0][RTW89_FCC][1][98] = -2, - [0][1][1][0][RTW89_FCC][2][98] = 127, [0][1][1][0][RTW89_ETSI][1][98] = 127, [0][1][1][0][RTW89_ETSI][0][98] = 127, [0][1][1][0][RTW89_MKK][1][98] = 127, [0][1][1][0][RTW89_MKK][0][98] = 127, [0][1][1][0][RTW89_IC][1][98] = -2, - [0][1][1][0][RTW89_IC][2][98] = 127, [0][1][1][0][RTW89_KCC][1][98] = 20, [0][1][1][0][RTW89_KCC][0][98] = 127, [0][1][1][0][RTW89_ACMA][1][98] = 127, @@ -40644,13 +39922,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][98] = 127, [0][1][1][0][RTW89_THAILAND][0][98] = 127, [0][1][1][0][RTW89_FCC][1][100] = -2, - [0][1][1][0][RTW89_FCC][2][100] = 127, [0][1][1][0][RTW89_ETSI][1][100] = 127, [0][1][1][0][RTW89_ETSI][0][100] = 127, [0][1][1][0][RTW89_MKK][1][100] = 127, [0][1][1][0][RTW89_MKK][0][100] = 127, [0][1][1][0][RTW89_IC][1][100] = -2, - [0][1][1][0][RTW89_IC][2][100] = 127, [0][1][1][0][RTW89_KCC][1][100] = 20, [0][1][1][0][RTW89_KCC][0][100] = 127, [0][1][1][0][RTW89_ACMA][1][100] = 127, @@ -40663,13 +39939,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][100] = 127, [0][1][1][0][RTW89_THAILAND][0][100] = 127, [0][1][1][0][RTW89_FCC][1][102] = -2, - [0][1][1][0][RTW89_FCC][2][102] = 127, [0][1][1][0][RTW89_ETSI][1][102] = 127, [0][1][1][0][RTW89_ETSI][0][102] = 127, [0][1][1][0][RTW89_MKK][1][102] = 127, [0][1][1][0][RTW89_MKK][0][102] = 127, [0][1][1][0][RTW89_IC][1][102] = -2, - [0][1][1][0][RTW89_IC][2][102] = 127, [0][1][1][0][RTW89_KCC][1][102] = 20, [0][1][1][0][RTW89_KCC][0][102] = 127, [0][1][1][0][RTW89_ACMA][1][102] = 127, @@ -40682,13 +39956,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][102] = 127, [0][1][1][0][RTW89_THAILAND][0][102] = 127, [0][1][1][0][RTW89_FCC][1][104] = -2, - [0][1][1][0][RTW89_FCC][2][104] = 127, [0][1][1][0][RTW89_ETSI][1][104] = 127, [0][1][1][0][RTW89_ETSI][0][104] = 127, [0][1][1][0][RTW89_MKK][1][104] = 127, [0][1][1][0][RTW89_MKK][0][104] = 127, [0][1][1][0][RTW89_IC][1][104] = -2, - [0][1][1][0][RTW89_IC][2][104] = 127, [0][1][1][0][RTW89_KCC][1][104] = 20, [0][1][1][0][RTW89_KCC][0][104] = 127, [0][1][1][0][RTW89_ACMA][1][104] = 127, @@ -40701,13 +39973,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][104] = 127, [0][1][1][0][RTW89_THAILAND][0][104] = 127, [0][1][1][0][RTW89_FCC][1][105] = -2, - [0][1][1][0][RTW89_FCC][2][105] = 127, [0][1][1][0][RTW89_ETSI][1][105] = 127, [0][1][1][0][RTW89_ETSI][0][105] = 127, [0][1][1][0][RTW89_MKK][1][105] = 127, [0][1][1][0][RTW89_MKK][0][105] = 127, [0][1][1][0][RTW89_IC][1][105] = -2, - [0][1][1][0][RTW89_IC][2][105] = 127, [0][1][1][0][RTW89_KCC][1][105] = 20, [0][1][1][0][RTW89_KCC][0][105] = 127, [0][1][1][0][RTW89_ACMA][1][105] = 127, @@ -40720,13 +39990,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][105] = 127, [0][1][1][0][RTW89_THAILAND][0][105] = 127, [0][1][1][0][RTW89_FCC][1][107] = 1, - [0][1][1][0][RTW89_FCC][2][107] = 127, [0][1][1][0][RTW89_ETSI][1][107] = 127, [0][1][1][0][RTW89_ETSI][0][107] = 127, [0][1][1][0][RTW89_MKK][1][107] = 127, [0][1][1][0][RTW89_MKK][0][107] = 127, [0][1][1][0][RTW89_IC][1][107] = 1, - [0][1][1][0][RTW89_IC][2][107] = 127, [0][1][1][0][RTW89_KCC][1][107] = 20, [0][1][1][0][RTW89_KCC][0][107] = 127, [0][1][1][0][RTW89_ACMA][1][107] = 127, @@ -40739,13 +40007,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][107] = 127, [0][1][1][0][RTW89_THAILAND][0][107] = 127, [0][1][1][0][RTW89_FCC][1][109] = 1, - [0][1][1][0][RTW89_FCC][2][109] = 127, [0][1][1][0][RTW89_ETSI][1][109] = 127, [0][1][1][0][RTW89_ETSI][0][109] = 127, [0][1][1][0][RTW89_MKK][1][109] = 127, [0][1][1][0][RTW89_MKK][0][109] = 127, [0][1][1][0][RTW89_IC][1][109] = 1, - [0][1][1][0][RTW89_IC][2][109] = 127, [0][1][1][0][RTW89_KCC][1][109] = 20, [0][1][1][0][RTW89_KCC][0][109] = 127, [0][1][1][0][RTW89_ACMA][1][109] = 127, @@ -40758,13 +40024,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][109] = 127, [0][1][1][0][RTW89_THAILAND][0][109] = 127, [0][1][1][0][RTW89_FCC][1][111] = 127, - [0][1][1][0][RTW89_FCC][2][111] = 127, [0][1][1][0][RTW89_ETSI][1][111] = 127, [0][1][1][0][RTW89_ETSI][0][111] = 127, [0][1][1][0][RTW89_MKK][1][111] = 127, [0][1][1][0][RTW89_MKK][0][111] = 127, [0][1][1][0][RTW89_IC][1][111] = 127, - [0][1][1][0][RTW89_IC][2][111] = 127, [0][1][1][0][RTW89_KCC][1][111] = 127, [0][1][1][0][RTW89_KCC][0][111] = 127, [0][1][1][0][RTW89_ACMA][1][111] = 127, @@ -40777,13 +40041,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][111] = 127, [0][1][1][0][RTW89_THAILAND][0][111] = 127, [0][1][1][0][RTW89_FCC][1][113] = 127, - [0][1][1][0][RTW89_FCC][2][113] = 127, [0][1][1][0][RTW89_ETSI][1][113] = 127, [0][1][1][0][RTW89_ETSI][0][113] = 127, [0][1][1][0][RTW89_MKK][1][113] = 127, [0][1][1][0][RTW89_MKK][0][113] = 127, [0][1][1][0][RTW89_IC][1][113] = 127, - [0][1][1][0][RTW89_IC][2][113] = 127, [0][1][1][0][RTW89_KCC][1][113] = 127, [0][1][1][0][RTW89_KCC][0][113] = 127, [0][1][1][0][RTW89_ACMA][1][113] = 127, @@ -40796,13 +40058,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][113] = 127, [0][1][1][0][RTW89_THAILAND][0][113] = 127, [0][1][1][0][RTW89_FCC][1][115] = 127, - [0][1][1][0][RTW89_FCC][2][115] = 127, [0][1][1][0][RTW89_ETSI][1][115] = 127, [0][1][1][0][RTW89_ETSI][0][115] = 127, [0][1][1][0][RTW89_MKK][1][115] = 127, [0][1][1][0][RTW89_MKK][0][115] = 127, [0][1][1][0][RTW89_IC][1][115] = 127, - [0][1][1][0][RTW89_IC][2][115] = 127, [0][1][1][0][RTW89_KCC][1][115] = 127, [0][1][1][0][RTW89_KCC][0][115] = 127, [0][1][1][0][RTW89_ACMA][1][115] = 127, @@ -40815,13 +40075,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][115] = 127, [0][1][1][0][RTW89_THAILAND][0][115] = 127, [0][1][1][0][RTW89_FCC][1][117] = 127, - [0][1][1][0][RTW89_FCC][2][117] = 127, [0][1][1][0][RTW89_ETSI][1][117] = 127, [0][1][1][0][RTW89_ETSI][0][117] = 127, [0][1][1][0][RTW89_MKK][1][117] = 127, [0][1][1][0][RTW89_MKK][0][117] = 127, [0][1][1][0][RTW89_IC][1][117] = 127, - [0][1][1][0][RTW89_IC][2][117] = 127, [0][1][1][0][RTW89_KCC][1][117] = 127, [0][1][1][0][RTW89_KCC][0][117] = 127, [0][1][1][0][RTW89_ACMA][1][117] = 127, @@ -40834,13 +40092,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][117] = 127, [0][1][1][0][RTW89_THAILAND][0][117] = 127, [0][1][1][0][RTW89_FCC][1][119] = 127, - [0][1][1][0][RTW89_FCC][2][119] = 127, [0][1][1][0][RTW89_ETSI][1][119] = 127, [0][1][1][0][RTW89_ETSI][0][119] = 127, [0][1][1][0][RTW89_MKK][1][119] = 127, [0][1][1][0][RTW89_MKK][0][119] = 127, [0][1][1][0][RTW89_IC][1][119] = 127, - [0][1][1][0][RTW89_IC][2][119] = 127, [0][1][1][0][RTW89_KCC][1][119] = 127, [0][1][1][0][RTW89_KCC][0][119] = 127, [0][1][1][0][RTW89_ACMA][1][119] = 127, @@ -40853,13 +40109,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][1][0][RTW89_THAILAND][1][119] = 127, [0][1][1][0][RTW89_THAILAND][0][119] = 127, [0][0][2][0][RTW89_FCC][1][0] = 24, - [0][0][2][0][RTW89_FCC][2][0] = 56, [0][0][2][0][RTW89_ETSI][1][0] = 66, [0][0][2][0][RTW89_ETSI][0][0] = 28, [0][0][2][0][RTW89_MKK][1][0] = 66, [0][0][2][0][RTW89_MKK][0][0] = 26, [0][0][2][0][RTW89_IC][1][0] = 24, - [0][0][2][0][RTW89_IC][2][0] = 56, [0][0][2][0][RTW89_KCC][1][0] = 24, [0][0][2][0][RTW89_KCC][0][0] = 24, [0][0][2][0][RTW89_ACMA][1][0] = 66, @@ -40872,13 +40126,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][0] = 56, [0][0][2][0][RTW89_THAILAND][0][0] = 24, [0][0][2][0][RTW89_FCC][1][2] = 22, - [0][0][2][0][RTW89_FCC][2][2] = 56, [0][0][2][0][RTW89_ETSI][1][2] = 66, [0][0][2][0][RTW89_ETSI][0][2] = 28, [0][0][2][0][RTW89_MKK][1][2] = 66, [0][0][2][0][RTW89_MKK][0][2] = 26, [0][0][2][0][RTW89_IC][1][2] = 22, - [0][0][2][0][RTW89_IC][2][2] = 56, [0][0][2][0][RTW89_KCC][1][2] = 24, [0][0][2][0][RTW89_KCC][0][2] = 24, [0][0][2][0][RTW89_ACMA][1][2] = 66, @@ -40891,13 +40143,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][2] = 56, [0][0][2][0][RTW89_THAILAND][0][2] = 22, [0][0][2][0][RTW89_FCC][1][4] = 22, - [0][0][2][0][RTW89_FCC][2][4] = 56, [0][0][2][0][RTW89_ETSI][1][4] = 66, [0][0][2][0][RTW89_ETSI][0][4] = 28, [0][0][2][0][RTW89_MKK][1][4] = 66, [0][0][2][0][RTW89_MKK][0][4] = 26, [0][0][2][0][RTW89_IC][1][4] = 22, - [0][0][2][0][RTW89_IC][2][4] = 56, [0][0][2][0][RTW89_KCC][1][4] = 24, [0][0][2][0][RTW89_KCC][0][4] = 24, [0][0][2][0][RTW89_ACMA][1][4] = 66, @@ -40910,13 +40160,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][4] = 56, [0][0][2][0][RTW89_THAILAND][0][4] = 22, [0][0][2][0][RTW89_FCC][1][6] = 22, - [0][0][2][0][RTW89_FCC][2][6] = 56, [0][0][2][0][RTW89_ETSI][1][6] = 66, [0][0][2][0][RTW89_ETSI][0][6] = 28, [0][0][2][0][RTW89_MKK][1][6] = 66, [0][0][2][0][RTW89_MKK][0][6] = 26, [0][0][2][0][RTW89_IC][1][6] = 22, - [0][0][2][0][RTW89_IC][2][6] = 56, [0][0][2][0][RTW89_KCC][1][6] = 24, [0][0][2][0][RTW89_KCC][0][6] = 24, [0][0][2][0][RTW89_ACMA][1][6] = 66, @@ -40929,13 +40177,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][6] = 56, [0][0][2][0][RTW89_THAILAND][0][6] = 22, [0][0][2][0][RTW89_FCC][1][8] = 22, - [0][0][2][0][RTW89_FCC][2][8] = 56, [0][0][2][0][RTW89_ETSI][1][8] = 66, [0][0][2][0][RTW89_ETSI][0][8] = 28, [0][0][2][0][RTW89_MKK][1][8] = 66, [0][0][2][0][RTW89_MKK][0][8] = 26, [0][0][2][0][RTW89_IC][1][8] = 22, - [0][0][2][0][RTW89_IC][2][8] = 56, [0][0][2][0][RTW89_KCC][1][8] = 24, [0][0][2][0][RTW89_KCC][0][8] = 24, [0][0][2][0][RTW89_ACMA][1][8] = 66, @@ -40948,13 +40194,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][8] = 56, [0][0][2][0][RTW89_THAILAND][0][8] = 22, [0][0][2][0][RTW89_FCC][1][10] = 22, - [0][0][2][0][RTW89_FCC][2][10] = 56, [0][0][2][0][RTW89_ETSI][1][10] = 66, [0][0][2][0][RTW89_ETSI][0][10] = 28, [0][0][2][0][RTW89_MKK][1][10] = 66, [0][0][2][0][RTW89_MKK][0][10] = 26, [0][0][2][0][RTW89_IC][1][10] = 22, - [0][0][2][0][RTW89_IC][2][10] = 56, [0][0][2][0][RTW89_KCC][1][10] = 24, [0][0][2][0][RTW89_KCC][0][10] = 24, [0][0][2][0][RTW89_ACMA][1][10] = 66, @@ -40967,13 +40211,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][10] = 56, [0][0][2][0][RTW89_THAILAND][0][10] = 22, [0][0][2][0][RTW89_FCC][1][12] = 22, - [0][0][2][0][RTW89_FCC][2][12] = 56, [0][0][2][0][RTW89_ETSI][1][12] = 66, [0][0][2][0][RTW89_ETSI][0][12] = 28, [0][0][2][0][RTW89_MKK][1][12] = 66, [0][0][2][0][RTW89_MKK][0][12] = 26, [0][0][2][0][RTW89_IC][1][12] = 22, - [0][0][2][0][RTW89_IC][2][12] = 56, [0][0][2][0][RTW89_KCC][1][12] = 24, [0][0][2][0][RTW89_KCC][0][12] = 24, [0][0][2][0][RTW89_ACMA][1][12] = 66, @@ -40986,13 +40228,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][12] = 56, [0][0][2][0][RTW89_THAILAND][0][12] = 22, [0][0][2][0][RTW89_FCC][1][14] = 22, - [0][0][2][0][RTW89_FCC][2][14] = 56, [0][0][2][0][RTW89_ETSI][1][14] = 66, [0][0][2][0][RTW89_ETSI][0][14] = 28, [0][0][2][0][RTW89_MKK][1][14] = 66, [0][0][2][0][RTW89_MKK][0][14] = 26, [0][0][2][0][RTW89_IC][1][14] = 22, - [0][0][2][0][RTW89_IC][2][14] = 56, [0][0][2][0][RTW89_KCC][1][14] = 24, [0][0][2][0][RTW89_KCC][0][14] = 24, [0][0][2][0][RTW89_ACMA][1][14] = 66, @@ -41005,13 +40245,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][14] = 56, [0][0][2][0][RTW89_THAILAND][0][14] = 22, [0][0][2][0][RTW89_FCC][1][15] = 22, - [0][0][2][0][RTW89_FCC][2][15] = 56, [0][0][2][0][RTW89_ETSI][1][15] = 66, [0][0][2][0][RTW89_ETSI][0][15] = 28, [0][0][2][0][RTW89_MKK][1][15] = 66, [0][0][2][0][RTW89_MKK][0][15] = 26, [0][0][2][0][RTW89_IC][1][15] = 22, - [0][0][2][0][RTW89_IC][2][15] = 56, [0][0][2][0][RTW89_KCC][1][15] = 24, [0][0][2][0][RTW89_KCC][0][15] = 24, [0][0][2][0][RTW89_ACMA][1][15] = 66, @@ -41024,13 +40262,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][15] = 56, [0][0][2][0][RTW89_THAILAND][0][15] = 22, [0][0][2][0][RTW89_FCC][1][17] = 22, - [0][0][2][0][RTW89_FCC][2][17] = 56, [0][0][2][0][RTW89_ETSI][1][17] = 66, [0][0][2][0][RTW89_ETSI][0][17] = 28, [0][0][2][0][RTW89_MKK][1][17] = 66, [0][0][2][0][RTW89_MKK][0][17] = 26, [0][0][2][0][RTW89_IC][1][17] = 22, - [0][0][2][0][RTW89_IC][2][17] = 56, [0][0][2][0][RTW89_KCC][1][17] = 24, [0][0][2][0][RTW89_KCC][0][17] = 24, [0][0][2][0][RTW89_ACMA][1][17] = 66, @@ -41043,13 +40279,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][17] = 56, [0][0][2][0][RTW89_THAILAND][0][17] = 22, [0][0][2][0][RTW89_FCC][1][19] = 22, - [0][0][2][0][RTW89_FCC][2][19] = 56, [0][0][2][0][RTW89_ETSI][1][19] = 66, [0][0][2][0][RTW89_ETSI][0][19] = 28, [0][0][2][0][RTW89_MKK][1][19] = 66, [0][0][2][0][RTW89_MKK][0][19] = 26, [0][0][2][0][RTW89_IC][1][19] = 22, - [0][0][2][0][RTW89_IC][2][19] = 56, [0][0][2][0][RTW89_KCC][1][19] = 24, [0][0][2][0][RTW89_KCC][0][19] = 24, [0][0][2][0][RTW89_ACMA][1][19] = 66, @@ -41062,13 +40296,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][19] = 56, [0][0][2][0][RTW89_THAILAND][0][19] = 22, [0][0][2][0][RTW89_FCC][1][21] = 22, - [0][0][2][0][RTW89_FCC][2][21] = 56, [0][0][2][0][RTW89_ETSI][1][21] = 66, [0][0][2][0][RTW89_ETSI][0][21] = 28, [0][0][2][0][RTW89_MKK][1][21] = 66, [0][0][2][0][RTW89_MKK][0][21] = 26, [0][0][2][0][RTW89_IC][1][21] = 22, - [0][0][2][0][RTW89_IC][2][21] = 56, [0][0][2][0][RTW89_KCC][1][21] = 24, [0][0][2][0][RTW89_KCC][0][21] = 24, [0][0][2][0][RTW89_ACMA][1][21] = 66, @@ -41081,13 +40313,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][21] = 56, [0][0][2][0][RTW89_THAILAND][0][21] = 22, [0][0][2][0][RTW89_FCC][1][23] = 22, - [0][0][2][0][RTW89_FCC][2][23] = 70, [0][0][2][0][RTW89_ETSI][1][23] = 66, [0][0][2][0][RTW89_ETSI][0][23] = 28, [0][0][2][0][RTW89_MKK][1][23] = 66, [0][0][2][0][RTW89_MKK][0][23] = 26, [0][0][2][0][RTW89_IC][1][23] = 22, - [0][0][2][0][RTW89_IC][2][23] = 70, [0][0][2][0][RTW89_KCC][1][23] = 24, [0][0][2][0][RTW89_KCC][0][23] = 26, [0][0][2][0][RTW89_ACMA][1][23] = 66, @@ -41100,13 +40330,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][23] = 66, [0][0][2][0][RTW89_THAILAND][0][23] = 22, [0][0][2][0][RTW89_FCC][1][25] = 22, - [0][0][2][0][RTW89_FCC][2][25] = 70, [0][0][2][0][RTW89_ETSI][1][25] = 66, [0][0][2][0][RTW89_ETSI][0][25] = 28, [0][0][2][0][RTW89_MKK][1][25] = 66, [0][0][2][0][RTW89_MKK][0][25] = 26, [0][0][2][0][RTW89_IC][1][25] = 22, - [0][0][2][0][RTW89_IC][2][25] = 70, [0][0][2][0][RTW89_KCC][1][25] = 24, [0][0][2][0][RTW89_KCC][0][25] = 26, [0][0][2][0][RTW89_ACMA][1][25] = 66, @@ -41119,13 +40347,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][25] = 66, [0][0][2][0][RTW89_THAILAND][0][25] = 22, [0][0][2][0][RTW89_FCC][1][27] = 22, - [0][0][2][0][RTW89_FCC][2][27] = 70, [0][0][2][0][RTW89_ETSI][1][27] = 66, [0][0][2][0][RTW89_ETSI][0][27] = 28, [0][0][2][0][RTW89_MKK][1][27] = 66, [0][0][2][0][RTW89_MKK][0][27] = 26, [0][0][2][0][RTW89_IC][1][27] = 22, - [0][0][2][0][RTW89_IC][2][27] = 70, [0][0][2][0][RTW89_KCC][1][27] = 24, [0][0][2][0][RTW89_KCC][0][27] = 26, [0][0][2][0][RTW89_ACMA][1][27] = 66, @@ -41138,13 +40364,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][27] = 66, [0][0][2][0][RTW89_THAILAND][0][27] = 22, [0][0][2][0][RTW89_FCC][1][29] = 22, - [0][0][2][0][RTW89_FCC][2][29] = 70, [0][0][2][0][RTW89_ETSI][1][29] = 66, [0][0][2][0][RTW89_ETSI][0][29] = 28, [0][0][2][0][RTW89_MKK][1][29] = 66, [0][0][2][0][RTW89_MKK][0][29] = 26, [0][0][2][0][RTW89_IC][1][29] = 22, - [0][0][2][0][RTW89_IC][2][29] = 70, [0][0][2][0][RTW89_KCC][1][29] = 24, [0][0][2][0][RTW89_KCC][0][29] = 26, [0][0][2][0][RTW89_ACMA][1][29] = 66, @@ -41157,13 +40381,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][29] = 66, [0][0][2][0][RTW89_THAILAND][0][29] = 22, [0][0][2][0][RTW89_FCC][1][30] = 22, - [0][0][2][0][RTW89_FCC][2][30] = 70, [0][0][2][0][RTW89_ETSI][1][30] = 66, [0][0][2][0][RTW89_ETSI][0][30] = 28, [0][0][2][0][RTW89_MKK][1][30] = 66, [0][0][2][0][RTW89_MKK][0][30] = 26, [0][0][2][0][RTW89_IC][1][30] = 22, - [0][0][2][0][RTW89_IC][2][30] = 70, [0][0][2][0][RTW89_KCC][1][30] = 24, [0][0][2][0][RTW89_KCC][0][30] = 26, [0][0][2][0][RTW89_ACMA][1][30] = 66, @@ -41176,13 +40398,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][30] = 66, [0][0][2][0][RTW89_THAILAND][0][30] = 22, [0][0][2][0][RTW89_FCC][1][32] = 22, - [0][0][2][0][RTW89_FCC][2][32] = 70, [0][0][2][0][RTW89_ETSI][1][32] = 66, [0][0][2][0][RTW89_ETSI][0][32] = 28, [0][0][2][0][RTW89_MKK][1][32] = 66, [0][0][2][0][RTW89_MKK][0][32] = 26, [0][0][2][0][RTW89_IC][1][32] = 22, - [0][0][2][0][RTW89_IC][2][32] = 70, [0][0][2][0][RTW89_KCC][1][32] = 24, [0][0][2][0][RTW89_KCC][0][32] = 26, [0][0][2][0][RTW89_ACMA][1][32] = 66, @@ -41195,13 +40415,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][32] = 66, [0][0][2][0][RTW89_THAILAND][0][32] = 22, [0][0][2][0][RTW89_FCC][1][34] = 22, - [0][0][2][0][RTW89_FCC][2][34] = 70, [0][0][2][0][RTW89_ETSI][1][34] = 66, [0][0][2][0][RTW89_ETSI][0][34] = 28, [0][0][2][0][RTW89_MKK][1][34] = 66, [0][0][2][0][RTW89_MKK][0][34] = 26, [0][0][2][0][RTW89_IC][1][34] = 22, - [0][0][2][0][RTW89_IC][2][34] = 70, [0][0][2][0][RTW89_KCC][1][34] = 24, [0][0][2][0][RTW89_KCC][0][34] = 26, [0][0][2][0][RTW89_ACMA][1][34] = 66, @@ -41214,13 +40432,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][34] = 66, [0][0][2][0][RTW89_THAILAND][0][34] = 22, [0][0][2][0][RTW89_FCC][1][36] = 22, - [0][0][2][0][RTW89_FCC][2][36] = 70, [0][0][2][0][RTW89_ETSI][1][36] = 66, [0][0][2][0][RTW89_ETSI][0][36] = 28, [0][0][2][0][RTW89_MKK][1][36] = 66, [0][0][2][0][RTW89_MKK][0][36] = 26, [0][0][2][0][RTW89_IC][1][36] = 22, - [0][0][2][0][RTW89_IC][2][36] = 70, [0][0][2][0][RTW89_KCC][1][36] = 24, [0][0][2][0][RTW89_KCC][0][36] = 26, [0][0][2][0][RTW89_ACMA][1][36] = 66, @@ -41233,13 +40449,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][36] = 66, [0][0][2][0][RTW89_THAILAND][0][36] = 22, [0][0][2][0][RTW89_FCC][1][38] = 22, - [0][0][2][0][RTW89_FCC][2][38] = 70, [0][0][2][0][RTW89_ETSI][1][38] = 66, [0][0][2][0][RTW89_ETSI][0][38] = 28, [0][0][2][0][RTW89_MKK][1][38] = 66, [0][0][2][0][RTW89_MKK][0][38] = 26, [0][0][2][0][RTW89_IC][1][38] = 22, - [0][0][2][0][RTW89_IC][2][38] = 70, [0][0][2][0][RTW89_KCC][1][38] = 24, [0][0][2][0][RTW89_KCC][0][38] = 26, [0][0][2][0][RTW89_ACMA][1][38] = 66, @@ -41252,13 +40466,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][38] = 66, [0][0][2][0][RTW89_THAILAND][0][38] = 22, [0][0][2][0][RTW89_FCC][1][40] = 22, - [0][0][2][0][RTW89_FCC][2][40] = 70, [0][0][2][0][RTW89_ETSI][1][40] = 66, [0][0][2][0][RTW89_ETSI][0][40] = 28, [0][0][2][0][RTW89_MKK][1][40] = 66, [0][0][2][0][RTW89_MKK][0][40] = 26, [0][0][2][0][RTW89_IC][1][40] = 22, - [0][0][2][0][RTW89_IC][2][40] = 70, [0][0][2][0][RTW89_KCC][1][40] = 24, [0][0][2][0][RTW89_KCC][0][40] = 26, [0][0][2][0][RTW89_ACMA][1][40] = 66, @@ -41271,13 +40483,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][40] = 66, [0][0][2][0][RTW89_THAILAND][0][40] = 22, [0][0][2][0][RTW89_FCC][1][42] = 22, - [0][0][2][0][RTW89_FCC][2][42] = 70, [0][0][2][0][RTW89_ETSI][1][42] = 66, [0][0][2][0][RTW89_ETSI][0][42] = 28, [0][0][2][0][RTW89_MKK][1][42] = 66, [0][0][2][0][RTW89_MKK][0][42] = 26, [0][0][2][0][RTW89_IC][1][42] = 22, - [0][0][2][0][RTW89_IC][2][42] = 70, [0][0][2][0][RTW89_KCC][1][42] = 24, [0][0][2][0][RTW89_KCC][0][42] = 26, [0][0][2][0][RTW89_ACMA][1][42] = 66, @@ -41290,13 +40500,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][42] = 66, [0][0][2][0][RTW89_THAILAND][0][42] = 22, [0][0][2][0][RTW89_FCC][1][44] = 22, - [0][0][2][0][RTW89_FCC][2][44] = 70, [0][0][2][0][RTW89_ETSI][1][44] = 66, [0][0][2][0][RTW89_ETSI][0][44] = 30, [0][0][2][0][RTW89_MKK][1][44] = 44, [0][0][2][0][RTW89_MKK][0][44] = 28, [0][0][2][0][RTW89_IC][1][44] = 22, - [0][0][2][0][RTW89_IC][2][44] = 70, [0][0][2][0][RTW89_KCC][1][44] = 24, [0][0][2][0][RTW89_KCC][0][44] = 26, [0][0][2][0][RTW89_ACMA][1][44] = 66, @@ -41309,13 +40517,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][44] = 68, [0][0][2][0][RTW89_THAILAND][0][44] = 22, [0][0][2][0][RTW89_FCC][1][45] = 22, - [0][0][2][0][RTW89_FCC][2][45] = 127, [0][0][2][0][RTW89_ETSI][1][45] = 127, [0][0][2][0][RTW89_ETSI][0][45] = 127, [0][0][2][0][RTW89_MKK][1][45] = 127, [0][0][2][0][RTW89_MKK][0][45] = 127, [0][0][2][0][RTW89_IC][1][45] = 22, - [0][0][2][0][RTW89_IC][2][45] = 70, [0][0][2][0][RTW89_KCC][1][45] = 24, [0][0][2][0][RTW89_KCC][0][45] = 127, [0][0][2][0][RTW89_ACMA][1][45] = 127, @@ -41328,13 +40534,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][45] = 127, [0][0][2][0][RTW89_THAILAND][0][45] = 127, [0][0][2][0][RTW89_FCC][1][47] = 22, - [0][0][2][0][RTW89_FCC][2][47] = 127, [0][0][2][0][RTW89_ETSI][1][47] = 127, [0][0][2][0][RTW89_ETSI][0][47] = 127, [0][0][2][0][RTW89_MKK][1][47] = 127, [0][0][2][0][RTW89_MKK][0][47] = 127, [0][0][2][0][RTW89_IC][1][47] = 22, - [0][0][2][0][RTW89_IC][2][47] = 70, [0][0][2][0][RTW89_KCC][1][47] = 24, [0][0][2][0][RTW89_KCC][0][47] = 127, [0][0][2][0][RTW89_ACMA][1][47] = 127, @@ -41347,13 +40551,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][47] = 127, [0][0][2][0][RTW89_THAILAND][0][47] = 127, [0][0][2][0][RTW89_FCC][1][49] = 24, - [0][0][2][0][RTW89_FCC][2][49] = 127, [0][0][2][0][RTW89_ETSI][1][49] = 127, [0][0][2][0][RTW89_ETSI][0][49] = 127, [0][0][2][0][RTW89_MKK][1][49] = 127, [0][0][2][0][RTW89_MKK][0][49] = 127, [0][0][2][0][RTW89_IC][1][49] = 24, - [0][0][2][0][RTW89_IC][2][49] = 70, [0][0][2][0][RTW89_KCC][1][49] = 24, [0][0][2][0][RTW89_KCC][0][49] = 127, [0][0][2][0][RTW89_ACMA][1][49] = 127, @@ -41366,13 +40568,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][49] = 127, [0][0][2][0][RTW89_THAILAND][0][49] = 127, [0][0][2][0][RTW89_FCC][1][51] = 22, - [0][0][2][0][RTW89_FCC][2][51] = 127, [0][0][2][0][RTW89_ETSI][1][51] = 127, [0][0][2][0][RTW89_ETSI][0][51] = 127, [0][0][2][0][RTW89_MKK][1][51] = 127, [0][0][2][0][RTW89_MKK][0][51] = 127, [0][0][2][0][RTW89_IC][1][51] = 22, - [0][0][2][0][RTW89_IC][2][51] = 70, [0][0][2][0][RTW89_KCC][1][51] = 24, [0][0][2][0][RTW89_KCC][0][51] = 127, [0][0][2][0][RTW89_ACMA][1][51] = 127, @@ -41385,13 +40585,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][51] = 127, [0][0][2][0][RTW89_THAILAND][0][51] = 127, [0][0][2][0][RTW89_FCC][1][53] = 22, - [0][0][2][0][RTW89_FCC][2][53] = 127, [0][0][2][0][RTW89_ETSI][1][53] = 127, [0][0][2][0][RTW89_ETSI][0][53] = 127, [0][0][2][0][RTW89_MKK][1][53] = 127, [0][0][2][0][RTW89_MKK][0][53] = 127, [0][0][2][0][RTW89_IC][1][53] = 22, - [0][0][2][0][RTW89_IC][2][53] = 70, [0][0][2][0][RTW89_KCC][1][53] = 24, [0][0][2][0][RTW89_KCC][0][53] = 127, [0][0][2][0][RTW89_ACMA][1][53] = 127, @@ -41404,13 +40602,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][53] = 127, [0][0][2][0][RTW89_THAILAND][0][53] = 127, [0][0][2][0][RTW89_FCC][1][55] = 22, - [0][0][2][0][RTW89_FCC][2][55] = 68, [0][0][2][0][RTW89_ETSI][1][55] = 127, [0][0][2][0][RTW89_ETSI][0][55] = 127, [0][0][2][0][RTW89_MKK][1][55] = 127, [0][0][2][0][RTW89_MKK][0][55] = 127, [0][0][2][0][RTW89_IC][1][55] = 22, - [0][0][2][0][RTW89_IC][2][55] = 68, [0][0][2][0][RTW89_KCC][1][55] = 26, [0][0][2][0][RTW89_KCC][0][55] = 127, [0][0][2][0][RTW89_ACMA][1][55] = 127, @@ -41423,13 +40619,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][55] = 127, [0][0][2][0][RTW89_THAILAND][0][55] = 127, [0][0][2][0][RTW89_FCC][1][57] = 22, - [0][0][2][0][RTW89_FCC][2][57] = 68, [0][0][2][0][RTW89_ETSI][1][57] = 127, [0][0][2][0][RTW89_ETSI][0][57] = 127, [0][0][2][0][RTW89_MKK][1][57] = 127, [0][0][2][0][RTW89_MKK][0][57] = 127, [0][0][2][0][RTW89_IC][1][57] = 22, - [0][0][2][0][RTW89_IC][2][57] = 68, [0][0][2][0][RTW89_KCC][1][57] = 26, [0][0][2][0][RTW89_KCC][0][57] = 127, [0][0][2][0][RTW89_ACMA][1][57] = 127, @@ -41442,13 +40636,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][57] = 127, [0][0][2][0][RTW89_THAILAND][0][57] = 127, [0][0][2][0][RTW89_FCC][1][59] = 22, - [0][0][2][0][RTW89_FCC][2][59] = 68, [0][0][2][0][RTW89_ETSI][1][59] = 127, [0][0][2][0][RTW89_ETSI][0][59] = 127, [0][0][2][0][RTW89_MKK][1][59] = 127, [0][0][2][0][RTW89_MKK][0][59] = 127, [0][0][2][0][RTW89_IC][1][59] = 22, - [0][0][2][0][RTW89_IC][2][59] = 68, [0][0][2][0][RTW89_KCC][1][59] = 26, [0][0][2][0][RTW89_KCC][0][59] = 127, [0][0][2][0][RTW89_ACMA][1][59] = 127, @@ -41461,13 +40653,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][59] = 127, [0][0][2][0][RTW89_THAILAND][0][59] = 127, [0][0][2][0][RTW89_FCC][1][60] = 22, - [0][0][2][0][RTW89_FCC][2][60] = 68, [0][0][2][0][RTW89_ETSI][1][60] = 127, [0][0][2][0][RTW89_ETSI][0][60] = 127, [0][0][2][0][RTW89_MKK][1][60] = 127, [0][0][2][0][RTW89_MKK][0][60] = 127, [0][0][2][0][RTW89_IC][1][60] = 22, - [0][0][2][0][RTW89_IC][2][60] = 68, [0][0][2][0][RTW89_KCC][1][60] = 26, [0][0][2][0][RTW89_KCC][0][60] = 127, [0][0][2][0][RTW89_ACMA][1][60] = 127, @@ -41480,13 +40670,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][60] = 127, [0][0][2][0][RTW89_THAILAND][0][60] = 127, [0][0][2][0][RTW89_FCC][1][62] = 22, - [0][0][2][0][RTW89_FCC][2][62] = 68, [0][0][2][0][RTW89_ETSI][1][62] = 127, [0][0][2][0][RTW89_ETSI][0][62] = 127, [0][0][2][0][RTW89_MKK][1][62] = 127, [0][0][2][0][RTW89_MKK][0][62] = 127, [0][0][2][0][RTW89_IC][1][62] = 22, - [0][0][2][0][RTW89_IC][2][62] = 68, [0][0][2][0][RTW89_KCC][1][62] = 26, [0][0][2][0][RTW89_KCC][0][62] = 127, [0][0][2][0][RTW89_ACMA][1][62] = 127, @@ -41499,13 +40687,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][62] = 127, [0][0][2][0][RTW89_THAILAND][0][62] = 127, [0][0][2][0][RTW89_FCC][1][64] = 22, - [0][0][2][0][RTW89_FCC][2][64] = 68, [0][0][2][0][RTW89_ETSI][1][64] = 127, [0][0][2][0][RTW89_ETSI][0][64] = 127, [0][0][2][0][RTW89_MKK][1][64] = 127, [0][0][2][0][RTW89_MKK][0][64] = 127, [0][0][2][0][RTW89_IC][1][64] = 22, - [0][0][2][0][RTW89_IC][2][64] = 68, [0][0][2][0][RTW89_KCC][1][64] = 26, [0][0][2][0][RTW89_KCC][0][64] = 127, [0][0][2][0][RTW89_ACMA][1][64] = 127, @@ -41518,13 +40704,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][64] = 127, [0][0][2][0][RTW89_THAILAND][0][64] = 127, [0][0][2][0][RTW89_FCC][1][66] = 22, - [0][0][2][0][RTW89_FCC][2][66] = 68, [0][0][2][0][RTW89_ETSI][1][66] = 127, [0][0][2][0][RTW89_ETSI][0][66] = 127, [0][0][2][0][RTW89_MKK][1][66] = 127, [0][0][2][0][RTW89_MKK][0][66] = 127, [0][0][2][0][RTW89_IC][1][66] = 22, - [0][0][2][0][RTW89_IC][2][66] = 68, [0][0][2][0][RTW89_KCC][1][66] = 26, [0][0][2][0][RTW89_KCC][0][66] = 127, [0][0][2][0][RTW89_ACMA][1][66] = 127, @@ -41537,13 +40721,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][66] = 127, [0][0][2][0][RTW89_THAILAND][0][66] = 127, [0][0][2][0][RTW89_FCC][1][68] = 22, - [0][0][2][0][RTW89_FCC][2][68] = 68, [0][0][2][0][RTW89_ETSI][1][68] = 127, [0][0][2][0][RTW89_ETSI][0][68] = 127, [0][0][2][0][RTW89_MKK][1][68] = 127, [0][0][2][0][RTW89_MKK][0][68] = 127, [0][0][2][0][RTW89_IC][1][68] = 22, - [0][0][2][0][RTW89_IC][2][68] = 68, [0][0][2][0][RTW89_KCC][1][68] = 26, [0][0][2][0][RTW89_KCC][0][68] = 127, [0][0][2][0][RTW89_ACMA][1][68] = 127, @@ -41556,13 +40738,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][68] = 127, [0][0][2][0][RTW89_THAILAND][0][68] = 127, [0][0][2][0][RTW89_FCC][1][70] = 24, - [0][0][2][0][RTW89_FCC][2][70] = 68, [0][0][2][0][RTW89_ETSI][1][70] = 127, [0][0][2][0][RTW89_ETSI][0][70] = 127, [0][0][2][0][RTW89_MKK][1][70] = 127, [0][0][2][0][RTW89_MKK][0][70] = 127, [0][0][2][0][RTW89_IC][1][70] = 24, - [0][0][2][0][RTW89_IC][2][70] = 68, [0][0][2][0][RTW89_KCC][1][70] = 26, [0][0][2][0][RTW89_KCC][0][70] = 127, [0][0][2][0][RTW89_ACMA][1][70] = 127, @@ -41575,13 +40755,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][70] = 127, [0][0][2][0][RTW89_THAILAND][0][70] = 127, [0][0][2][0][RTW89_FCC][1][72] = 22, - [0][0][2][0][RTW89_FCC][2][72] = 68, [0][0][2][0][RTW89_ETSI][1][72] = 127, [0][0][2][0][RTW89_ETSI][0][72] = 127, [0][0][2][0][RTW89_MKK][1][72] = 127, [0][0][2][0][RTW89_MKK][0][72] = 127, [0][0][2][0][RTW89_IC][1][72] = 22, - [0][0][2][0][RTW89_IC][2][72] = 68, [0][0][2][0][RTW89_KCC][1][72] = 26, [0][0][2][0][RTW89_KCC][0][72] = 127, [0][0][2][0][RTW89_ACMA][1][72] = 127, @@ -41594,13 +40772,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][72] = 127, [0][0][2][0][RTW89_THAILAND][0][72] = 127, [0][0][2][0][RTW89_FCC][1][74] = 22, - [0][0][2][0][RTW89_FCC][2][74] = 68, [0][0][2][0][RTW89_ETSI][1][74] = 127, [0][0][2][0][RTW89_ETSI][0][74] = 127, [0][0][2][0][RTW89_MKK][1][74] = 127, [0][0][2][0][RTW89_MKK][0][74] = 127, [0][0][2][0][RTW89_IC][1][74] = 22, - [0][0][2][0][RTW89_IC][2][74] = 68, [0][0][2][0][RTW89_KCC][1][74] = 26, [0][0][2][0][RTW89_KCC][0][74] = 127, [0][0][2][0][RTW89_ACMA][1][74] = 127, @@ -41613,13 +40789,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][74] = 127, [0][0][2][0][RTW89_THAILAND][0][74] = 127, [0][0][2][0][RTW89_FCC][1][75] = 22, - [0][0][2][0][RTW89_FCC][2][75] = 68, [0][0][2][0][RTW89_ETSI][1][75] = 127, [0][0][2][0][RTW89_ETSI][0][75] = 127, [0][0][2][0][RTW89_MKK][1][75] = 127, [0][0][2][0][RTW89_MKK][0][75] = 127, [0][0][2][0][RTW89_IC][1][75] = 22, - [0][0][2][0][RTW89_IC][2][75] = 68, [0][0][2][0][RTW89_KCC][1][75] = 26, [0][0][2][0][RTW89_KCC][0][75] = 127, [0][0][2][0][RTW89_ACMA][1][75] = 127, @@ -41632,13 +40806,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][75] = 127, [0][0][2][0][RTW89_THAILAND][0][75] = 127, [0][0][2][0][RTW89_FCC][1][77] = 22, - [0][0][2][0][RTW89_FCC][2][77] = 68, [0][0][2][0][RTW89_ETSI][1][77] = 127, [0][0][2][0][RTW89_ETSI][0][77] = 127, [0][0][2][0][RTW89_MKK][1][77] = 127, [0][0][2][0][RTW89_MKK][0][77] = 127, [0][0][2][0][RTW89_IC][1][77] = 22, - [0][0][2][0][RTW89_IC][2][77] = 68, [0][0][2][0][RTW89_KCC][1][77] = 26, [0][0][2][0][RTW89_KCC][0][77] = 127, [0][0][2][0][RTW89_ACMA][1][77] = 127, @@ -41651,13 +40823,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][77] = 127, [0][0][2][0][RTW89_THAILAND][0][77] = 127, [0][0][2][0][RTW89_FCC][1][79] = 22, - [0][0][2][0][RTW89_FCC][2][79] = 68, [0][0][2][0][RTW89_ETSI][1][79] = 127, [0][0][2][0][RTW89_ETSI][0][79] = 127, [0][0][2][0][RTW89_MKK][1][79] = 127, [0][0][2][0][RTW89_MKK][0][79] = 127, [0][0][2][0][RTW89_IC][1][79] = 22, - [0][0][2][0][RTW89_IC][2][79] = 68, [0][0][2][0][RTW89_KCC][1][79] = 26, [0][0][2][0][RTW89_KCC][0][79] = 127, [0][0][2][0][RTW89_ACMA][1][79] = 127, @@ -41670,13 +40840,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][79] = 127, [0][0][2][0][RTW89_THAILAND][0][79] = 127, [0][0][2][0][RTW89_FCC][1][81] = 22, - [0][0][2][0][RTW89_FCC][2][81] = 68, [0][0][2][0][RTW89_ETSI][1][81] = 127, [0][0][2][0][RTW89_ETSI][0][81] = 127, [0][0][2][0][RTW89_MKK][1][81] = 127, [0][0][2][0][RTW89_MKK][0][81] = 127, [0][0][2][0][RTW89_IC][1][81] = 22, - [0][0][2][0][RTW89_IC][2][81] = 68, [0][0][2][0][RTW89_KCC][1][81] = 26, [0][0][2][0][RTW89_KCC][0][81] = 127, [0][0][2][0][RTW89_ACMA][1][81] = 127, @@ -41689,13 +40857,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][81] = 127, [0][0][2][0][RTW89_THAILAND][0][81] = 127, [0][0][2][0][RTW89_FCC][1][83] = 22, - [0][0][2][0][RTW89_FCC][2][83] = 68, [0][0][2][0][RTW89_ETSI][1][83] = 127, [0][0][2][0][RTW89_ETSI][0][83] = 127, [0][0][2][0][RTW89_MKK][1][83] = 127, [0][0][2][0][RTW89_MKK][0][83] = 127, [0][0][2][0][RTW89_IC][1][83] = 22, - [0][0][2][0][RTW89_IC][2][83] = 68, [0][0][2][0][RTW89_KCC][1][83] = 32, [0][0][2][0][RTW89_KCC][0][83] = 127, [0][0][2][0][RTW89_ACMA][1][83] = 127, @@ -41708,13 +40874,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][83] = 127, [0][0][2][0][RTW89_THAILAND][0][83] = 127, [0][0][2][0][RTW89_FCC][1][85] = 22, - [0][0][2][0][RTW89_FCC][2][85] = 68, [0][0][2][0][RTW89_ETSI][1][85] = 127, [0][0][2][0][RTW89_ETSI][0][85] = 127, [0][0][2][0][RTW89_MKK][1][85] = 127, [0][0][2][0][RTW89_MKK][0][85] = 127, [0][0][2][0][RTW89_IC][1][85] = 22, - [0][0][2][0][RTW89_IC][2][85] = 68, [0][0][2][0][RTW89_KCC][1][85] = 32, [0][0][2][0][RTW89_KCC][0][85] = 127, [0][0][2][0][RTW89_ACMA][1][85] = 127, @@ -41727,13 +40891,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][85] = 127, [0][0][2][0][RTW89_THAILAND][0][85] = 127, [0][0][2][0][RTW89_FCC][1][87] = 22, - [0][0][2][0][RTW89_FCC][2][87] = 127, [0][0][2][0][RTW89_ETSI][1][87] = 127, [0][0][2][0][RTW89_ETSI][0][87] = 127, [0][0][2][0][RTW89_MKK][1][87] = 127, [0][0][2][0][RTW89_MKK][0][87] = 127, [0][0][2][0][RTW89_IC][1][87] = 22, - [0][0][2][0][RTW89_IC][2][87] = 127, [0][0][2][0][RTW89_KCC][1][87] = 32, [0][0][2][0][RTW89_KCC][0][87] = 127, [0][0][2][0][RTW89_ACMA][1][87] = 127, @@ -41746,13 +40908,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][87] = 127, [0][0][2][0][RTW89_THAILAND][0][87] = 127, [0][0][2][0][RTW89_FCC][1][89] = 22, - [0][0][2][0][RTW89_FCC][2][89] = 127, [0][0][2][0][RTW89_ETSI][1][89] = 127, [0][0][2][0][RTW89_ETSI][0][89] = 127, [0][0][2][0][RTW89_MKK][1][89] = 127, [0][0][2][0][RTW89_MKK][0][89] = 127, [0][0][2][0][RTW89_IC][1][89] = 22, - [0][0][2][0][RTW89_IC][2][89] = 127, [0][0][2][0][RTW89_KCC][1][89] = 32, [0][0][2][0][RTW89_KCC][0][89] = 127, [0][0][2][0][RTW89_ACMA][1][89] = 127, @@ -41765,13 +40925,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][89] = 127, [0][0][2][0][RTW89_THAILAND][0][89] = 127, [0][0][2][0][RTW89_FCC][1][90] = 22, - [0][0][2][0][RTW89_FCC][2][90] = 127, [0][0][2][0][RTW89_ETSI][1][90] = 127, [0][0][2][0][RTW89_ETSI][0][90] = 127, [0][0][2][0][RTW89_MKK][1][90] = 127, [0][0][2][0][RTW89_MKK][0][90] = 127, [0][0][2][0][RTW89_IC][1][90] = 22, - [0][0][2][0][RTW89_IC][2][90] = 127, [0][0][2][0][RTW89_KCC][1][90] = 32, [0][0][2][0][RTW89_KCC][0][90] = 127, [0][0][2][0][RTW89_ACMA][1][90] = 127, @@ -41784,13 +40942,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][90] = 127, [0][0][2][0][RTW89_THAILAND][0][90] = 127, [0][0][2][0][RTW89_FCC][1][92] = 22, - [0][0][2][0][RTW89_FCC][2][92] = 127, [0][0][2][0][RTW89_ETSI][1][92] = 127, [0][0][2][0][RTW89_ETSI][0][92] = 127, [0][0][2][0][RTW89_MKK][1][92] = 127, [0][0][2][0][RTW89_MKK][0][92] = 127, [0][0][2][0][RTW89_IC][1][92] = 22, - [0][0][2][0][RTW89_IC][2][92] = 127, [0][0][2][0][RTW89_KCC][1][92] = 32, [0][0][2][0][RTW89_KCC][0][92] = 127, [0][0][2][0][RTW89_ACMA][1][92] = 127, @@ -41803,13 +40959,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][92] = 127, [0][0][2][0][RTW89_THAILAND][0][92] = 127, [0][0][2][0][RTW89_FCC][1][94] = 22, - [0][0][2][0][RTW89_FCC][2][94] = 127, [0][0][2][0][RTW89_ETSI][1][94] = 127, [0][0][2][0][RTW89_ETSI][0][94] = 127, [0][0][2][0][RTW89_MKK][1][94] = 127, [0][0][2][0][RTW89_MKK][0][94] = 127, [0][0][2][0][RTW89_IC][1][94] = 22, - [0][0][2][0][RTW89_IC][2][94] = 127, [0][0][2][0][RTW89_KCC][1][94] = 32, [0][0][2][0][RTW89_KCC][0][94] = 127, [0][0][2][0][RTW89_ACMA][1][94] = 127, @@ -41822,13 +40976,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][94] = 127, [0][0][2][0][RTW89_THAILAND][0][94] = 127, [0][0][2][0][RTW89_FCC][1][96] = 22, - [0][0][2][0][RTW89_FCC][2][96] = 127, [0][0][2][0][RTW89_ETSI][1][96] = 127, [0][0][2][0][RTW89_ETSI][0][96] = 127, [0][0][2][0][RTW89_MKK][1][96] = 127, [0][0][2][0][RTW89_MKK][0][96] = 127, [0][0][2][0][RTW89_IC][1][96] = 22, - [0][0][2][0][RTW89_IC][2][96] = 127, [0][0][2][0][RTW89_KCC][1][96] = 32, [0][0][2][0][RTW89_KCC][0][96] = 127, [0][0][2][0][RTW89_ACMA][1][96] = 127, @@ -41841,13 +40993,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][96] = 127, [0][0][2][0][RTW89_THAILAND][0][96] = 127, [0][0][2][0][RTW89_FCC][1][98] = 22, - [0][0][2][0][RTW89_FCC][2][98] = 127, [0][0][2][0][RTW89_ETSI][1][98] = 127, [0][0][2][0][RTW89_ETSI][0][98] = 127, [0][0][2][0][RTW89_MKK][1][98] = 127, [0][0][2][0][RTW89_MKK][0][98] = 127, [0][0][2][0][RTW89_IC][1][98] = 22, - [0][0][2][0][RTW89_IC][2][98] = 127, [0][0][2][0][RTW89_KCC][1][98] = 32, [0][0][2][0][RTW89_KCC][0][98] = 127, [0][0][2][0][RTW89_ACMA][1][98] = 127, @@ -41860,13 +41010,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][98] = 127, [0][0][2][0][RTW89_THAILAND][0][98] = 127, [0][0][2][0][RTW89_FCC][1][100] = 22, - [0][0][2][0][RTW89_FCC][2][100] = 127, [0][0][2][0][RTW89_ETSI][1][100] = 127, [0][0][2][0][RTW89_ETSI][0][100] = 127, [0][0][2][0][RTW89_MKK][1][100] = 127, [0][0][2][0][RTW89_MKK][0][100] = 127, [0][0][2][0][RTW89_IC][1][100] = 22, - [0][0][2][0][RTW89_IC][2][100] = 127, [0][0][2][0][RTW89_KCC][1][100] = 32, [0][0][2][0][RTW89_KCC][0][100] = 127, [0][0][2][0][RTW89_ACMA][1][100] = 127, @@ -41879,13 +41027,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][100] = 127, [0][0][2][0][RTW89_THAILAND][0][100] = 127, [0][0][2][0][RTW89_FCC][1][102] = 22, - [0][0][2][0][RTW89_FCC][2][102] = 127, [0][0][2][0][RTW89_ETSI][1][102] = 127, [0][0][2][0][RTW89_ETSI][0][102] = 127, [0][0][2][0][RTW89_MKK][1][102] = 127, [0][0][2][0][RTW89_MKK][0][102] = 127, [0][0][2][0][RTW89_IC][1][102] = 22, - [0][0][2][0][RTW89_IC][2][102] = 127, [0][0][2][0][RTW89_KCC][1][102] = 32, [0][0][2][0][RTW89_KCC][0][102] = 127, [0][0][2][0][RTW89_ACMA][1][102] = 127, @@ -41898,13 +41044,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][102] = 127, [0][0][2][0][RTW89_THAILAND][0][102] = 127, [0][0][2][0][RTW89_FCC][1][104] = 22, - [0][0][2][0][RTW89_FCC][2][104] = 127, [0][0][2][0][RTW89_ETSI][1][104] = 127, [0][0][2][0][RTW89_ETSI][0][104] = 127, [0][0][2][0][RTW89_MKK][1][104] = 127, [0][0][2][0][RTW89_MKK][0][104] = 127, [0][0][2][0][RTW89_IC][1][104] = 22, - [0][0][2][0][RTW89_IC][2][104] = 127, [0][0][2][0][RTW89_KCC][1][104] = 32, [0][0][2][0][RTW89_KCC][0][104] = 127, [0][0][2][0][RTW89_ACMA][1][104] = 127, @@ -41917,13 +41061,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][104] = 127, [0][0][2][0][RTW89_THAILAND][0][104] = 127, [0][0][2][0][RTW89_FCC][1][105] = 22, - [0][0][2][0][RTW89_FCC][2][105] = 127, [0][0][2][0][RTW89_ETSI][1][105] = 127, [0][0][2][0][RTW89_ETSI][0][105] = 127, [0][0][2][0][RTW89_MKK][1][105] = 127, [0][0][2][0][RTW89_MKK][0][105] = 127, [0][0][2][0][RTW89_IC][1][105] = 22, - [0][0][2][0][RTW89_IC][2][105] = 127, [0][0][2][0][RTW89_KCC][1][105] = 32, [0][0][2][0][RTW89_KCC][0][105] = 127, [0][0][2][0][RTW89_ACMA][1][105] = 127, @@ -41936,13 +41078,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][105] = 127, [0][0][2][0][RTW89_THAILAND][0][105] = 127, [0][0][2][0][RTW89_FCC][1][107] = 24, - [0][0][2][0][RTW89_FCC][2][107] = 127, [0][0][2][0][RTW89_ETSI][1][107] = 127, [0][0][2][0][RTW89_ETSI][0][107] = 127, [0][0][2][0][RTW89_MKK][1][107] = 127, [0][0][2][0][RTW89_MKK][0][107] = 127, [0][0][2][0][RTW89_IC][1][107] = 24, - [0][0][2][0][RTW89_IC][2][107] = 127, [0][0][2][0][RTW89_KCC][1][107] = 32, [0][0][2][0][RTW89_KCC][0][107] = 127, [0][0][2][0][RTW89_ACMA][1][107] = 127, @@ -41955,13 +41095,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][107] = 127, [0][0][2][0][RTW89_THAILAND][0][107] = 127, [0][0][2][0][RTW89_FCC][1][109] = 24, - [0][0][2][0][RTW89_FCC][2][109] = 127, [0][0][2][0][RTW89_ETSI][1][109] = 127, [0][0][2][0][RTW89_ETSI][0][109] = 127, [0][0][2][0][RTW89_MKK][1][109] = 127, [0][0][2][0][RTW89_MKK][0][109] = 127, [0][0][2][0][RTW89_IC][1][109] = 24, - [0][0][2][0][RTW89_IC][2][109] = 127, [0][0][2][0][RTW89_KCC][1][109] = 32, [0][0][2][0][RTW89_KCC][0][109] = 127, [0][0][2][0][RTW89_ACMA][1][109] = 127, @@ -41974,13 +41112,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][109] = 127, [0][0][2][0][RTW89_THAILAND][0][109] = 127, [0][0][2][0][RTW89_FCC][1][111] = 127, - [0][0][2][0][RTW89_FCC][2][111] = 127, [0][0][2][0][RTW89_ETSI][1][111] = 127, [0][0][2][0][RTW89_ETSI][0][111] = 127, [0][0][2][0][RTW89_MKK][1][111] = 127, [0][0][2][0][RTW89_MKK][0][111] = 127, [0][0][2][0][RTW89_IC][1][111] = 127, - [0][0][2][0][RTW89_IC][2][111] = 127, [0][0][2][0][RTW89_KCC][1][111] = 127, [0][0][2][0][RTW89_KCC][0][111] = 127, [0][0][2][0][RTW89_ACMA][1][111] = 127, @@ -41993,13 +41129,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][111] = 127, [0][0][2][0][RTW89_THAILAND][0][111] = 127, [0][0][2][0][RTW89_FCC][1][113] = 127, - [0][0][2][0][RTW89_FCC][2][113] = 127, [0][0][2][0][RTW89_ETSI][1][113] = 127, [0][0][2][0][RTW89_ETSI][0][113] = 127, [0][0][2][0][RTW89_MKK][1][113] = 127, [0][0][2][0][RTW89_MKK][0][113] = 127, [0][0][2][0][RTW89_IC][1][113] = 127, - [0][0][2][0][RTW89_IC][2][113] = 127, [0][0][2][0][RTW89_KCC][1][113] = 127, [0][0][2][0][RTW89_KCC][0][113] = 127, [0][0][2][0][RTW89_ACMA][1][113] = 127, @@ -42012,13 +41146,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][113] = 127, [0][0][2][0][RTW89_THAILAND][0][113] = 127, [0][0][2][0][RTW89_FCC][1][115] = 127, - [0][0][2][0][RTW89_FCC][2][115] = 127, [0][0][2][0][RTW89_ETSI][1][115] = 127, [0][0][2][0][RTW89_ETSI][0][115] = 127, [0][0][2][0][RTW89_MKK][1][115] = 127, [0][0][2][0][RTW89_MKK][0][115] = 127, [0][0][2][0][RTW89_IC][1][115] = 127, - [0][0][2][0][RTW89_IC][2][115] = 127, [0][0][2][0][RTW89_KCC][1][115] = 127, [0][0][2][0][RTW89_KCC][0][115] = 127, [0][0][2][0][RTW89_ACMA][1][115] = 127, @@ -42031,13 +41163,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][115] = 127, [0][0][2][0][RTW89_THAILAND][0][115] = 127, [0][0][2][0][RTW89_FCC][1][117] = 127, - [0][0][2][0][RTW89_FCC][2][117] = 127, [0][0][2][0][RTW89_ETSI][1][117] = 127, [0][0][2][0][RTW89_ETSI][0][117] = 127, [0][0][2][0][RTW89_MKK][1][117] = 127, [0][0][2][0][RTW89_MKK][0][117] = 127, [0][0][2][0][RTW89_IC][1][117] = 127, - [0][0][2][0][RTW89_IC][2][117] = 127, [0][0][2][0][RTW89_KCC][1][117] = 127, [0][0][2][0][RTW89_KCC][0][117] = 127, [0][0][2][0][RTW89_ACMA][1][117] = 127, @@ -42050,13 +41180,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][117] = 127, [0][0][2][0][RTW89_THAILAND][0][117] = 127, [0][0][2][0][RTW89_FCC][1][119] = 127, - [0][0][2][0][RTW89_FCC][2][119] = 127, [0][0][2][0][RTW89_ETSI][1][119] = 127, [0][0][2][0][RTW89_ETSI][0][119] = 127, [0][0][2][0][RTW89_MKK][1][119] = 127, [0][0][2][0][RTW89_MKK][0][119] = 127, [0][0][2][0][RTW89_IC][1][119] = 127, - [0][0][2][0][RTW89_IC][2][119] = 127, [0][0][2][0][RTW89_KCC][1][119] = 127, [0][0][2][0][RTW89_KCC][0][119] = 127, [0][0][2][0][RTW89_ACMA][1][119] = 127, @@ -42069,13 +41197,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][0][2][0][RTW89_THAILAND][1][119] = 127, [0][0][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][0][RTW89_FCC][1][0] = -2, - [0][1][2][0][RTW89_FCC][2][0] = 54, [0][1][2][0][RTW89_ETSI][1][0] = 54, [0][1][2][0][RTW89_ETSI][0][0] = 18, [0][1][2][0][RTW89_MKK][1][0] = 56, [0][1][2][0][RTW89_MKK][0][0] = 16, [0][1][2][0][RTW89_IC][1][0] = -2, - [0][1][2][0][RTW89_IC][2][0] = 54, [0][1][2][0][RTW89_KCC][1][0] = 12, [0][1][2][0][RTW89_KCC][0][0] = 10, [0][1][2][0][RTW89_ACMA][1][0] = 54, @@ -42088,13 +41214,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][0] = 44, [0][1][2][0][RTW89_THAILAND][0][0] = -2, [0][1][2][0][RTW89_FCC][1][2] = -4, - [0][1][2][0][RTW89_FCC][2][2] = 54, [0][1][2][0][RTW89_ETSI][1][2] = 54, [0][1][2][0][RTW89_ETSI][0][2] = 18, [0][1][2][0][RTW89_MKK][1][2] = 54, [0][1][2][0][RTW89_MKK][0][2] = 16, [0][1][2][0][RTW89_IC][1][2] = -4, - [0][1][2][0][RTW89_IC][2][2] = 54, [0][1][2][0][RTW89_KCC][1][2] = 12, [0][1][2][0][RTW89_KCC][0][2] = 12, [0][1][2][0][RTW89_ACMA][1][2] = 54, @@ -42107,13 +41231,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][2] = 44, [0][1][2][0][RTW89_THAILAND][0][2] = -4, [0][1][2][0][RTW89_FCC][1][4] = -4, - [0][1][2][0][RTW89_FCC][2][4] = 54, [0][1][2][0][RTW89_ETSI][1][4] = 54, [0][1][2][0][RTW89_ETSI][0][4] = 18, [0][1][2][0][RTW89_MKK][1][4] = 54, [0][1][2][0][RTW89_MKK][0][4] = 16, [0][1][2][0][RTW89_IC][1][4] = -4, - [0][1][2][0][RTW89_IC][2][4] = 54, [0][1][2][0][RTW89_KCC][1][4] = 12, [0][1][2][0][RTW89_KCC][0][4] = 12, [0][1][2][0][RTW89_ACMA][1][4] = 54, @@ -42126,13 +41248,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][4] = 44, [0][1][2][0][RTW89_THAILAND][0][4] = -4, [0][1][2][0][RTW89_FCC][1][6] = -4, - [0][1][2][0][RTW89_FCC][2][6] = 54, [0][1][2][0][RTW89_ETSI][1][6] = 54, [0][1][2][0][RTW89_ETSI][0][6] = 18, [0][1][2][0][RTW89_MKK][1][6] = 54, [0][1][2][0][RTW89_MKK][0][6] = 16, [0][1][2][0][RTW89_IC][1][6] = -4, - [0][1][2][0][RTW89_IC][2][6] = 54, [0][1][2][0][RTW89_KCC][1][6] = 12, [0][1][2][0][RTW89_KCC][0][6] = 12, [0][1][2][0][RTW89_ACMA][1][6] = 54, @@ -42145,13 +41265,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][6] = 44, [0][1][2][0][RTW89_THAILAND][0][6] = -4, [0][1][2][0][RTW89_FCC][1][8] = -4, - [0][1][2][0][RTW89_FCC][2][8] = 54, [0][1][2][0][RTW89_ETSI][1][8] = 54, [0][1][2][0][RTW89_ETSI][0][8] = 18, [0][1][2][0][RTW89_MKK][1][8] = 54, [0][1][2][0][RTW89_MKK][0][8] = 16, [0][1][2][0][RTW89_IC][1][8] = -4, - [0][1][2][0][RTW89_IC][2][8] = 54, [0][1][2][0][RTW89_KCC][1][8] = 12, [0][1][2][0][RTW89_KCC][0][8] = 12, [0][1][2][0][RTW89_ACMA][1][8] = 54, @@ -42164,13 +41282,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][8] = 44, [0][1][2][0][RTW89_THAILAND][0][8] = -4, [0][1][2][0][RTW89_FCC][1][10] = -4, - [0][1][2][0][RTW89_FCC][2][10] = 54, [0][1][2][0][RTW89_ETSI][1][10] = 54, [0][1][2][0][RTW89_ETSI][0][10] = 18, [0][1][2][0][RTW89_MKK][1][10] = 54, [0][1][2][0][RTW89_MKK][0][10] = 16, [0][1][2][0][RTW89_IC][1][10] = -4, - [0][1][2][0][RTW89_IC][2][10] = 54, [0][1][2][0][RTW89_KCC][1][10] = 12, [0][1][2][0][RTW89_KCC][0][10] = 12, [0][1][2][0][RTW89_ACMA][1][10] = 54, @@ -42183,13 +41299,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][10] = 44, [0][1][2][0][RTW89_THAILAND][0][10] = -4, [0][1][2][0][RTW89_FCC][1][12] = -4, - [0][1][2][0][RTW89_FCC][2][12] = 54, [0][1][2][0][RTW89_ETSI][1][12] = 54, [0][1][2][0][RTW89_ETSI][0][12] = 18, [0][1][2][0][RTW89_MKK][1][12] = 54, [0][1][2][0][RTW89_MKK][0][12] = 16, [0][1][2][0][RTW89_IC][1][12] = -4, - [0][1][2][0][RTW89_IC][2][12] = 54, [0][1][2][0][RTW89_KCC][1][12] = 12, [0][1][2][0][RTW89_KCC][0][12] = 12, [0][1][2][0][RTW89_ACMA][1][12] = 54, @@ -42202,13 +41316,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][12] = 44, [0][1][2][0][RTW89_THAILAND][0][12] = -4, [0][1][2][0][RTW89_FCC][1][14] = -4, - [0][1][2][0][RTW89_FCC][2][14] = 54, [0][1][2][0][RTW89_ETSI][1][14] = 54, [0][1][2][0][RTW89_ETSI][0][14] = 18, [0][1][2][0][RTW89_MKK][1][14] = 54, [0][1][2][0][RTW89_MKK][0][14] = 16, [0][1][2][0][RTW89_IC][1][14] = -4, - [0][1][2][0][RTW89_IC][2][14] = 54, [0][1][2][0][RTW89_KCC][1][14] = 12, [0][1][2][0][RTW89_KCC][0][14] = 12, [0][1][2][0][RTW89_ACMA][1][14] = 54, @@ -42221,13 +41333,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][14] = 44, [0][1][2][0][RTW89_THAILAND][0][14] = -4, [0][1][2][0][RTW89_FCC][1][15] = -4, - [0][1][2][0][RTW89_FCC][2][15] = 54, [0][1][2][0][RTW89_ETSI][1][15] = 54, [0][1][2][0][RTW89_ETSI][0][15] = 18, [0][1][2][0][RTW89_MKK][1][15] = 54, [0][1][2][0][RTW89_MKK][0][15] = 16, [0][1][2][0][RTW89_IC][1][15] = -4, - [0][1][2][0][RTW89_IC][2][15] = 54, [0][1][2][0][RTW89_KCC][1][15] = 12, [0][1][2][0][RTW89_KCC][0][15] = 12, [0][1][2][0][RTW89_ACMA][1][15] = 54, @@ -42240,13 +41350,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][15] = 44, [0][1][2][0][RTW89_THAILAND][0][15] = -4, [0][1][2][0][RTW89_FCC][1][17] = -4, - [0][1][2][0][RTW89_FCC][2][17] = 54, [0][1][2][0][RTW89_ETSI][1][17] = 54, [0][1][2][0][RTW89_ETSI][0][17] = 18, [0][1][2][0][RTW89_MKK][1][17] = 54, [0][1][2][0][RTW89_MKK][0][17] = 16, [0][1][2][0][RTW89_IC][1][17] = -4, - [0][1][2][0][RTW89_IC][2][17] = 54, [0][1][2][0][RTW89_KCC][1][17] = 12, [0][1][2][0][RTW89_KCC][0][17] = 12, [0][1][2][0][RTW89_ACMA][1][17] = 54, @@ -42259,13 +41367,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][17] = 44, [0][1][2][0][RTW89_THAILAND][0][17] = -4, [0][1][2][0][RTW89_FCC][1][19] = -4, - [0][1][2][0][RTW89_FCC][2][19] = 54, [0][1][2][0][RTW89_ETSI][1][19] = 54, [0][1][2][0][RTW89_ETSI][0][19] = 18, [0][1][2][0][RTW89_MKK][1][19] = 54, [0][1][2][0][RTW89_MKK][0][19] = 16, [0][1][2][0][RTW89_IC][1][19] = -4, - [0][1][2][0][RTW89_IC][2][19] = 54, [0][1][2][0][RTW89_KCC][1][19] = 12, [0][1][2][0][RTW89_KCC][0][19] = 12, [0][1][2][0][RTW89_ACMA][1][19] = 54, @@ -42278,13 +41384,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][19] = 44, [0][1][2][0][RTW89_THAILAND][0][19] = -4, [0][1][2][0][RTW89_FCC][1][21] = -4, - [0][1][2][0][RTW89_FCC][2][21] = 54, [0][1][2][0][RTW89_ETSI][1][21] = 54, [0][1][2][0][RTW89_ETSI][0][21] = 18, [0][1][2][0][RTW89_MKK][1][21] = 54, [0][1][2][0][RTW89_MKK][0][21] = 16, [0][1][2][0][RTW89_IC][1][21] = -4, - [0][1][2][0][RTW89_IC][2][21] = 54, [0][1][2][0][RTW89_KCC][1][21] = 12, [0][1][2][0][RTW89_KCC][0][21] = 12, [0][1][2][0][RTW89_ACMA][1][21] = 54, @@ -42297,13 +41401,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][21] = 44, [0][1][2][0][RTW89_THAILAND][0][21] = -4, [0][1][2][0][RTW89_FCC][1][23] = -4, - [0][1][2][0][RTW89_FCC][2][23] = 68, [0][1][2][0][RTW89_ETSI][1][23] = 54, [0][1][2][0][RTW89_ETSI][0][23] = 18, [0][1][2][0][RTW89_MKK][1][23] = 54, [0][1][2][0][RTW89_MKK][0][23] = 16, [0][1][2][0][RTW89_IC][1][23] = -4, - [0][1][2][0][RTW89_IC][2][23] = 68, [0][1][2][0][RTW89_KCC][1][23] = 12, [0][1][2][0][RTW89_KCC][0][23] = 10, [0][1][2][0][RTW89_ACMA][1][23] = 54, @@ -42316,13 +41418,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][23] = 44, [0][1][2][0][RTW89_THAILAND][0][23] = -4, [0][1][2][0][RTW89_FCC][1][25] = -4, - [0][1][2][0][RTW89_FCC][2][25] = 68, [0][1][2][0][RTW89_ETSI][1][25] = 54, [0][1][2][0][RTW89_ETSI][0][25] = 18, [0][1][2][0][RTW89_MKK][1][25] = 54, [0][1][2][0][RTW89_MKK][0][25] = 16, [0][1][2][0][RTW89_IC][1][25] = -4, - [0][1][2][0][RTW89_IC][2][25] = 68, [0][1][2][0][RTW89_KCC][1][25] = 12, [0][1][2][0][RTW89_KCC][0][25] = 14, [0][1][2][0][RTW89_ACMA][1][25] = 54, @@ -42335,13 +41435,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][25] = 42, [0][1][2][0][RTW89_THAILAND][0][25] = -4, [0][1][2][0][RTW89_FCC][1][27] = -4, - [0][1][2][0][RTW89_FCC][2][27] = 68, [0][1][2][0][RTW89_ETSI][1][27] = 54, [0][1][2][0][RTW89_ETSI][0][27] = 18, [0][1][2][0][RTW89_MKK][1][27] = 54, [0][1][2][0][RTW89_MKK][0][27] = 16, [0][1][2][0][RTW89_IC][1][27] = -4, - [0][1][2][0][RTW89_IC][2][27] = 68, [0][1][2][0][RTW89_KCC][1][27] = 12, [0][1][2][0][RTW89_KCC][0][27] = 14, [0][1][2][0][RTW89_ACMA][1][27] = 54, @@ -42354,13 +41452,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][27] = 42, [0][1][2][0][RTW89_THAILAND][0][27] = -4, [0][1][2][0][RTW89_FCC][1][29] = -4, - [0][1][2][0][RTW89_FCC][2][29] = 68, [0][1][2][0][RTW89_ETSI][1][29] = 54, [0][1][2][0][RTW89_ETSI][0][29] = 18, [0][1][2][0][RTW89_MKK][1][29] = 54, [0][1][2][0][RTW89_MKK][0][29] = 16, [0][1][2][0][RTW89_IC][1][29] = -4, - [0][1][2][0][RTW89_IC][2][29] = 68, [0][1][2][0][RTW89_KCC][1][29] = 12, [0][1][2][0][RTW89_KCC][0][29] = 14, [0][1][2][0][RTW89_ACMA][1][29] = 54, @@ -42373,13 +41469,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][29] = 42, [0][1][2][0][RTW89_THAILAND][0][29] = -4, [0][1][2][0][RTW89_FCC][1][30] = -4, - [0][1][2][0][RTW89_FCC][2][30] = 68, [0][1][2][0][RTW89_ETSI][1][30] = 54, [0][1][2][0][RTW89_ETSI][0][30] = 18, [0][1][2][0][RTW89_MKK][1][30] = 54, [0][1][2][0][RTW89_MKK][0][30] = 16, [0][1][2][0][RTW89_IC][1][30] = -4, - [0][1][2][0][RTW89_IC][2][30] = 68, [0][1][2][0][RTW89_KCC][1][30] = 12, [0][1][2][0][RTW89_KCC][0][30] = 14, [0][1][2][0][RTW89_ACMA][1][30] = 54, @@ -42392,13 +41486,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][30] = 42, [0][1][2][0][RTW89_THAILAND][0][30] = -4, [0][1][2][0][RTW89_FCC][1][32] = -4, - [0][1][2][0][RTW89_FCC][2][32] = 68, [0][1][2][0][RTW89_ETSI][1][32] = 54, [0][1][2][0][RTW89_ETSI][0][32] = 18, [0][1][2][0][RTW89_MKK][1][32] = 54, [0][1][2][0][RTW89_MKK][0][32] = 16, [0][1][2][0][RTW89_IC][1][32] = -4, - [0][1][2][0][RTW89_IC][2][32] = 68, [0][1][2][0][RTW89_KCC][1][32] = 12, [0][1][2][0][RTW89_KCC][0][32] = 14, [0][1][2][0][RTW89_ACMA][1][32] = 54, @@ -42411,13 +41503,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][32] = 42, [0][1][2][0][RTW89_THAILAND][0][32] = -4, [0][1][2][0][RTW89_FCC][1][34] = -4, - [0][1][2][0][RTW89_FCC][2][34] = 68, [0][1][2][0][RTW89_ETSI][1][34] = 54, [0][1][2][0][RTW89_ETSI][0][34] = 18, [0][1][2][0][RTW89_MKK][1][34] = 54, [0][1][2][0][RTW89_MKK][0][34] = 16, [0][1][2][0][RTW89_IC][1][34] = -4, - [0][1][2][0][RTW89_IC][2][34] = 68, [0][1][2][0][RTW89_KCC][1][34] = 12, [0][1][2][0][RTW89_KCC][0][34] = 14, [0][1][2][0][RTW89_ACMA][1][34] = 54, @@ -42430,13 +41520,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][34] = 42, [0][1][2][0][RTW89_THAILAND][0][34] = -4, [0][1][2][0][RTW89_FCC][1][36] = -4, - [0][1][2][0][RTW89_FCC][2][36] = 68, [0][1][2][0][RTW89_ETSI][1][36] = 54, [0][1][2][0][RTW89_ETSI][0][36] = 18, [0][1][2][0][RTW89_MKK][1][36] = 54, [0][1][2][0][RTW89_MKK][0][36] = 16, [0][1][2][0][RTW89_IC][1][36] = -4, - [0][1][2][0][RTW89_IC][2][36] = 68, [0][1][2][0][RTW89_KCC][1][36] = 12, [0][1][2][0][RTW89_KCC][0][36] = 14, [0][1][2][0][RTW89_ACMA][1][36] = 54, @@ -42449,13 +41537,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][36] = 42, [0][1][2][0][RTW89_THAILAND][0][36] = -4, [0][1][2][0][RTW89_FCC][1][38] = -4, - [0][1][2][0][RTW89_FCC][2][38] = 68, [0][1][2][0][RTW89_ETSI][1][38] = 54, [0][1][2][0][RTW89_ETSI][0][38] = 18, [0][1][2][0][RTW89_MKK][1][38] = 54, [0][1][2][0][RTW89_MKK][0][38] = 16, [0][1][2][0][RTW89_IC][1][38] = -4, - [0][1][2][0][RTW89_IC][2][38] = 68, [0][1][2][0][RTW89_KCC][1][38] = 12, [0][1][2][0][RTW89_KCC][0][38] = 14, [0][1][2][0][RTW89_ACMA][1][38] = 54, @@ -42468,13 +41554,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][38] = 42, [0][1][2][0][RTW89_THAILAND][0][38] = -4, [0][1][2][0][RTW89_FCC][1][40] = -4, - [0][1][2][0][RTW89_FCC][2][40] = 68, [0][1][2][0][RTW89_ETSI][1][40] = 54, [0][1][2][0][RTW89_ETSI][0][40] = 18, [0][1][2][0][RTW89_MKK][1][40] = 54, [0][1][2][0][RTW89_MKK][0][40] = 16, [0][1][2][0][RTW89_IC][1][40] = -4, - [0][1][2][0][RTW89_IC][2][40] = 68, [0][1][2][0][RTW89_KCC][1][40] = 12, [0][1][2][0][RTW89_KCC][0][40] = 14, [0][1][2][0][RTW89_ACMA][1][40] = 54, @@ -42487,13 +41571,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][40] = 42, [0][1][2][0][RTW89_THAILAND][0][40] = -4, [0][1][2][0][RTW89_FCC][1][42] = -4, - [0][1][2][0][RTW89_FCC][2][42] = 68, [0][1][2][0][RTW89_ETSI][1][42] = 54, [0][1][2][0][RTW89_ETSI][0][42] = 18, [0][1][2][0][RTW89_MKK][1][42] = 54, [0][1][2][0][RTW89_MKK][0][42] = 16, [0][1][2][0][RTW89_IC][1][42] = -4, - [0][1][2][0][RTW89_IC][2][42] = 68, [0][1][2][0][RTW89_KCC][1][42] = 12, [0][1][2][0][RTW89_KCC][0][42] = 14, [0][1][2][0][RTW89_ACMA][1][42] = 54, @@ -42506,13 +41588,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][42] = 42, [0][1][2][0][RTW89_THAILAND][0][42] = -4, [0][1][2][0][RTW89_FCC][1][44] = -2, - [0][1][2][0][RTW89_FCC][2][44] = 68, [0][1][2][0][RTW89_ETSI][1][44] = 54, [0][1][2][0][RTW89_ETSI][0][44] = 18, [0][1][2][0][RTW89_MKK][1][44] = 34, [0][1][2][0][RTW89_MKK][0][44] = 16, [0][1][2][0][RTW89_IC][1][44] = -2, - [0][1][2][0][RTW89_IC][2][44] = 68, [0][1][2][0][RTW89_KCC][1][44] = 12, [0][1][2][0][RTW89_KCC][0][44] = 12, [0][1][2][0][RTW89_ACMA][1][44] = 54, @@ -42525,13 +41605,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][44] = 42, [0][1][2][0][RTW89_THAILAND][0][44] = -2, [0][1][2][0][RTW89_FCC][1][45] = -2, - [0][1][2][0][RTW89_FCC][2][45] = 127, [0][1][2][0][RTW89_ETSI][1][45] = 127, [0][1][2][0][RTW89_ETSI][0][45] = 127, [0][1][2][0][RTW89_MKK][1][45] = 127, [0][1][2][0][RTW89_MKK][0][45] = 127, [0][1][2][0][RTW89_IC][1][45] = -2, - [0][1][2][0][RTW89_IC][2][45] = 70, [0][1][2][0][RTW89_KCC][1][45] = 12, [0][1][2][0][RTW89_KCC][0][45] = 127, [0][1][2][0][RTW89_ACMA][1][45] = 127, @@ -42544,13 +41622,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][45] = 127, [0][1][2][0][RTW89_THAILAND][0][45] = 127, [0][1][2][0][RTW89_FCC][1][47] = -2, - [0][1][2][0][RTW89_FCC][2][47] = 127, [0][1][2][0][RTW89_ETSI][1][47] = 127, [0][1][2][0][RTW89_ETSI][0][47] = 127, [0][1][2][0][RTW89_MKK][1][47] = 127, [0][1][2][0][RTW89_MKK][0][47] = 127, [0][1][2][0][RTW89_IC][1][47] = -2, - [0][1][2][0][RTW89_IC][2][47] = 68, [0][1][2][0][RTW89_KCC][1][47] = 12, [0][1][2][0][RTW89_KCC][0][47] = 127, [0][1][2][0][RTW89_ACMA][1][47] = 127, @@ -42563,13 +41639,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][47] = 127, [0][1][2][0][RTW89_THAILAND][0][47] = 127, [0][1][2][0][RTW89_FCC][1][49] = -2, - [0][1][2][0][RTW89_FCC][2][49] = 127, [0][1][2][0][RTW89_ETSI][1][49] = 127, [0][1][2][0][RTW89_ETSI][0][49] = 127, [0][1][2][0][RTW89_MKK][1][49] = 127, [0][1][2][0][RTW89_MKK][0][49] = 127, [0][1][2][0][RTW89_IC][1][49] = -2, - [0][1][2][0][RTW89_IC][2][49] = 68, [0][1][2][0][RTW89_KCC][1][49] = 12, [0][1][2][0][RTW89_KCC][0][49] = 127, [0][1][2][0][RTW89_ACMA][1][49] = 127, @@ -42582,13 +41656,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][49] = 127, [0][1][2][0][RTW89_THAILAND][0][49] = 127, [0][1][2][0][RTW89_FCC][1][51] = -2, - [0][1][2][0][RTW89_FCC][2][51] = 127, [0][1][2][0][RTW89_ETSI][1][51] = 127, [0][1][2][0][RTW89_ETSI][0][51] = 127, [0][1][2][0][RTW89_MKK][1][51] = 127, [0][1][2][0][RTW89_MKK][0][51] = 127, [0][1][2][0][RTW89_IC][1][51] = -2, - [0][1][2][0][RTW89_IC][2][51] = 68, [0][1][2][0][RTW89_KCC][1][51] = 12, [0][1][2][0][RTW89_KCC][0][51] = 127, [0][1][2][0][RTW89_ACMA][1][51] = 127, @@ -42601,13 +41673,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][51] = 127, [0][1][2][0][RTW89_THAILAND][0][51] = 127, [0][1][2][0][RTW89_FCC][1][53] = -2, - [0][1][2][0][RTW89_FCC][2][53] = 127, [0][1][2][0][RTW89_ETSI][1][53] = 127, [0][1][2][0][RTW89_ETSI][0][53] = 127, [0][1][2][0][RTW89_MKK][1][53] = 127, [0][1][2][0][RTW89_MKK][0][53] = 127, [0][1][2][0][RTW89_IC][1][53] = -2, - [0][1][2][0][RTW89_IC][2][53] = 68, [0][1][2][0][RTW89_KCC][1][53] = 12, [0][1][2][0][RTW89_KCC][0][53] = 127, [0][1][2][0][RTW89_ACMA][1][53] = 127, @@ -42620,13 +41690,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][53] = 127, [0][1][2][0][RTW89_THAILAND][0][53] = 127, [0][1][2][0][RTW89_FCC][1][55] = -2, - [0][1][2][0][RTW89_FCC][2][55] = 68, [0][1][2][0][RTW89_ETSI][1][55] = 127, [0][1][2][0][RTW89_ETSI][0][55] = 127, [0][1][2][0][RTW89_MKK][1][55] = 127, [0][1][2][0][RTW89_MKK][0][55] = 127, [0][1][2][0][RTW89_IC][1][55] = -2, - [0][1][2][0][RTW89_IC][2][55] = 68, [0][1][2][0][RTW89_KCC][1][55] = 12, [0][1][2][0][RTW89_KCC][0][55] = 127, [0][1][2][0][RTW89_ACMA][1][55] = 127, @@ -42639,13 +41707,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][55] = 127, [0][1][2][0][RTW89_THAILAND][0][55] = 127, [0][1][2][0][RTW89_FCC][1][57] = -2, - [0][1][2][0][RTW89_FCC][2][57] = 68, [0][1][2][0][RTW89_ETSI][1][57] = 127, [0][1][2][0][RTW89_ETSI][0][57] = 127, [0][1][2][0][RTW89_MKK][1][57] = 127, [0][1][2][0][RTW89_MKK][0][57] = 127, [0][1][2][0][RTW89_IC][1][57] = -2, - [0][1][2][0][RTW89_IC][2][57] = 68, [0][1][2][0][RTW89_KCC][1][57] = 12, [0][1][2][0][RTW89_KCC][0][57] = 127, [0][1][2][0][RTW89_ACMA][1][57] = 127, @@ -42658,13 +41724,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][57] = 127, [0][1][2][0][RTW89_THAILAND][0][57] = 127, [0][1][2][0][RTW89_FCC][1][59] = -2, - [0][1][2][0][RTW89_FCC][2][59] = 68, [0][1][2][0][RTW89_ETSI][1][59] = 127, [0][1][2][0][RTW89_ETSI][0][59] = 127, [0][1][2][0][RTW89_MKK][1][59] = 127, [0][1][2][0][RTW89_MKK][0][59] = 127, [0][1][2][0][RTW89_IC][1][59] = -2, - [0][1][2][0][RTW89_IC][2][59] = 68, [0][1][2][0][RTW89_KCC][1][59] = 12, [0][1][2][0][RTW89_KCC][0][59] = 127, [0][1][2][0][RTW89_ACMA][1][59] = 127, @@ -42677,13 +41741,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][59] = 127, [0][1][2][0][RTW89_THAILAND][0][59] = 127, [0][1][2][0][RTW89_FCC][1][60] = -2, - [0][1][2][0][RTW89_FCC][2][60] = 68, [0][1][2][0][RTW89_ETSI][1][60] = 127, [0][1][2][0][RTW89_ETSI][0][60] = 127, [0][1][2][0][RTW89_MKK][1][60] = 127, [0][1][2][0][RTW89_MKK][0][60] = 127, [0][1][2][0][RTW89_IC][1][60] = -2, - [0][1][2][0][RTW89_IC][2][60] = 68, [0][1][2][0][RTW89_KCC][1][60] = 12, [0][1][2][0][RTW89_KCC][0][60] = 127, [0][1][2][0][RTW89_ACMA][1][60] = 127, @@ -42696,13 +41758,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][60] = 127, [0][1][2][0][RTW89_THAILAND][0][60] = 127, [0][1][2][0][RTW89_FCC][1][62] = -2, - [0][1][2][0][RTW89_FCC][2][62] = 68, [0][1][2][0][RTW89_ETSI][1][62] = 127, [0][1][2][0][RTW89_ETSI][0][62] = 127, [0][1][2][0][RTW89_MKK][1][62] = 127, [0][1][2][0][RTW89_MKK][0][62] = 127, [0][1][2][0][RTW89_IC][1][62] = -2, - [0][1][2][0][RTW89_IC][2][62] = 68, [0][1][2][0][RTW89_KCC][1][62] = 12, [0][1][2][0][RTW89_KCC][0][62] = 127, [0][1][2][0][RTW89_ACMA][1][62] = 127, @@ -42715,13 +41775,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][62] = 127, [0][1][2][0][RTW89_THAILAND][0][62] = 127, [0][1][2][0][RTW89_FCC][1][64] = -2, - [0][1][2][0][RTW89_FCC][2][64] = 68, [0][1][2][0][RTW89_ETSI][1][64] = 127, [0][1][2][0][RTW89_ETSI][0][64] = 127, [0][1][2][0][RTW89_MKK][1][64] = 127, [0][1][2][0][RTW89_MKK][0][64] = 127, [0][1][2][0][RTW89_IC][1][64] = -2, - [0][1][2][0][RTW89_IC][2][64] = 68, [0][1][2][0][RTW89_KCC][1][64] = 12, [0][1][2][0][RTW89_KCC][0][64] = 127, [0][1][2][0][RTW89_ACMA][1][64] = 127, @@ -42734,13 +41792,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][64] = 127, [0][1][2][0][RTW89_THAILAND][0][64] = 127, [0][1][2][0][RTW89_FCC][1][66] = -2, - [0][1][2][0][RTW89_FCC][2][66] = 68, [0][1][2][0][RTW89_ETSI][1][66] = 127, [0][1][2][0][RTW89_ETSI][0][66] = 127, [0][1][2][0][RTW89_MKK][1][66] = 127, [0][1][2][0][RTW89_MKK][0][66] = 127, [0][1][2][0][RTW89_IC][1][66] = -2, - [0][1][2][0][RTW89_IC][2][66] = 68, [0][1][2][0][RTW89_KCC][1][66] = 12, [0][1][2][0][RTW89_KCC][0][66] = 127, [0][1][2][0][RTW89_ACMA][1][66] = 127, @@ -42753,13 +41809,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][66] = 127, [0][1][2][0][RTW89_THAILAND][0][66] = 127, [0][1][2][0][RTW89_FCC][1][68] = -2, - [0][1][2][0][RTW89_FCC][2][68] = 68, [0][1][2][0][RTW89_ETSI][1][68] = 127, [0][1][2][0][RTW89_ETSI][0][68] = 127, [0][1][2][0][RTW89_MKK][1][68] = 127, [0][1][2][0][RTW89_MKK][0][68] = 127, [0][1][2][0][RTW89_IC][1][68] = -2, - [0][1][2][0][RTW89_IC][2][68] = 68, [0][1][2][0][RTW89_KCC][1][68] = 12, [0][1][2][0][RTW89_KCC][0][68] = 127, [0][1][2][0][RTW89_ACMA][1][68] = 127, @@ -42772,13 +41826,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][68] = 127, [0][1][2][0][RTW89_THAILAND][0][68] = 127, [0][1][2][0][RTW89_FCC][1][70] = -2, - [0][1][2][0][RTW89_FCC][2][70] = 68, [0][1][2][0][RTW89_ETSI][1][70] = 127, [0][1][2][0][RTW89_ETSI][0][70] = 127, [0][1][2][0][RTW89_MKK][1][70] = 127, [0][1][2][0][RTW89_MKK][0][70] = 127, [0][1][2][0][RTW89_IC][1][70] = -2, - [0][1][2][0][RTW89_IC][2][70] = 68, [0][1][2][0][RTW89_KCC][1][70] = 12, [0][1][2][0][RTW89_KCC][0][70] = 127, [0][1][2][0][RTW89_ACMA][1][70] = 127, @@ -42791,13 +41843,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][70] = 127, [0][1][2][0][RTW89_THAILAND][0][70] = 127, [0][1][2][0][RTW89_FCC][1][72] = -2, - [0][1][2][0][RTW89_FCC][2][72] = 68, [0][1][2][0][RTW89_ETSI][1][72] = 127, [0][1][2][0][RTW89_ETSI][0][72] = 127, [0][1][2][0][RTW89_MKK][1][72] = 127, [0][1][2][0][RTW89_MKK][0][72] = 127, [0][1][2][0][RTW89_IC][1][72] = -2, - [0][1][2][0][RTW89_IC][2][72] = 68, [0][1][2][0][RTW89_KCC][1][72] = 12, [0][1][2][0][RTW89_KCC][0][72] = 127, [0][1][2][0][RTW89_ACMA][1][72] = 127, @@ -42810,13 +41860,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][72] = 127, [0][1][2][0][RTW89_THAILAND][0][72] = 127, [0][1][2][0][RTW89_FCC][1][74] = -2, - [0][1][2][0][RTW89_FCC][2][74] = 68, [0][1][2][0][RTW89_ETSI][1][74] = 127, [0][1][2][0][RTW89_ETSI][0][74] = 127, [0][1][2][0][RTW89_MKK][1][74] = 127, [0][1][2][0][RTW89_MKK][0][74] = 127, [0][1][2][0][RTW89_IC][1][74] = -2, - [0][1][2][0][RTW89_IC][2][74] = 68, [0][1][2][0][RTW89_KCC][1][74] = 12, [0][1][2][0][RTW89_KCC][0][74] = 127, [0][1][2][0][RTW89_ACMA][1][74] = 127, @@ -42829,13 +41877,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][74] = 127, [0][1][2][0][RTW89_THAILAND][0][74] = 127, [0][1][2][0][RTW89_FCC][1][75] = -2, - [0][1][2][0][RTW89_FCC][2][75] = 68, [0][1][2][0][RTW89_ETSI][1][75] = 127, [0][1][2][0][RTW89_ETSI][0][75] = 127, [0][1][2][0][RTW89_MKK][1][75] = 127, [0][1][2][0][RTW89_MKK][0][75] = 127, [0][1][2][0][RTW89_IC][1][75] = -2, - [0][1][2][0][RTW89_IC][2][75] = 68, [0][1][2][0][RTW89_KCC][1][75] = 12, [0][1][2][0][RTW89_KCC][0][75] = 127, [0][1][2][0][RTW89_ACMA][1][75] = 127, @@ -42848,13 +41894,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][75] = 127, [0][1][2][0][RTW89_THAILAND][0][75] = 127, [0][1][2][0][RTW89_FCC][1][77] = -2, - [0][1][2][0][RTW89_FCC][2][77] = 68, [0][1][2][0][RTW89_ETSI][1][77] = 127, [0][1][2][0][RTW89_ETSI][0][77] = 127, [0][1][2][0][RTW89_MKK][1][77] = 127, [0][1][2][0][RTW89_MKK][0][77] = 127, [0][1][2][0][RTW89_IC][1][77] = -2, - [0][1][2][0][RTW89_IC][2][77] = 68, [0][1][2][0][RTW89_KCC][1][77] = 12, [0][1][2][0][RTW89_KCC][0][77] = 127, [0][1][2][0][RTW89_ACMA][1][77] = 127, @@ -42867,13 +41911,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][77] = 127, [0][1][2][0][RTW89_THAILAND][0][77] = 127, [0][1][2][0][RTW89_FCC][1][79] = -2, - [0][1][2][0][RTW89_FCC][2][79] = 68, [0][1][2][0][RTW89_ETSI][1][79] = 127, [0][1][2][0][RTW89_ETSI][0][79] = 127, [0][1][2][0][RTW89_MKK][1][79] = 127, [0][1][2][0][RTW89_MKK][0][79] = 127, [0][1][2][0][RTW89_IC][1][79] = -2, - [0][1][2][0][RTW89_IC][2][79] = 68, [0][1][2][0][RTW89_KCC][1][79] = 12, [0][1][2][0][RTW89_KCC][0][79] = 127, [0][1][2][0][RTW89_ACMA][1][79] = 127, @@ -42886,13 +41928,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][79] = 127, [0][1][2][0][RTW89_THAILAND][0][79] = 127, [0][1][2][0][RTW89_FCC][1][81] = -2, - [0][1][2][0][RTW89_FCC][2][81] = 68, [0][1][2][0][RTW89_ETSI][1][81] = 127, [0][1][2][0][RTW89_ETSI][0][81] = 127, [0][1][2][0][RTW89_MKK][1][81] = 127, [0][1][2][0][RTW89_MKK][0][81] = 127, [0][1][2][0][RTW89_IC][1][81] = -2, - [0][1][2][0][RTW89_IC][2][81] = 68, [0][1][2][0][RTW89_KCC][1][81] = 12, [0][1][2][0][RTW89_KCC][0][81] = 127, [0][1][2][0][RTW89_ACMA][1][81] = 127, @@ -42905,13 +41945,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][81] = 127, [0][1][2][0][RTW89_THAILAND][0][81] = 127, [0][1][2][0][RTW89_FCC][1][83] = -2, - [0][1][2][0][RTW89_FCC][2][83] = 68, [0][1][2][0][RTW89_ETSI][1][83] = 127, [0][1][2][0][RTW89_ETSI][0][83] = 127, [0][1][2][0][RTW89_MKK][1][83] = 127, [0][1][2][0][RTW89_MKK][0][83] = 127, [0][1][2][0][RTW89_IC][1][83] = -2, - [0][1][2][0][RTW89_IC][2][83] = 68, [0][1][2][0][RTW89_KCC][1][83] = 20, [0][1][2][0][RTW89_KCC][0][83] = 127, [0][1][2][0][RTW89_ACMA][1][83] = 127, @@ -42924,13 +41962,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][83] = 127, [0][1][2][0][RTW89_THAILAND][0][83] = 127, [0][1][2][0][RTW89_FCC][1][85] = -2, - [0][1][2][0][RTW89_FCC][2][85] = 68, [0][1][2][0][RTW89_ETSI][1][85] = 127, [0][1][2][0][RTW89_ETSI][0][85] = 127, [0][1][2][0][RTW89_MKK][1][85] = 127, [0][1][2][0][RTW89_MKK][0][85] = 127, [0][1][2][0][RTW89_IC][1][85] = -2, - [0][1][2][0][RTW89_IC][2][85] = 68, [0][1][2][0][RTW89_KCC][1][85] = 20, [0][1][2][0][RTW89_KCC][0][85] = 127, [0][1][2][0][RTW89_ACMA][1][85] = 127, @@ -42943,13 +41979,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][85] = 127, [0][1][2][0][RTW89_THAILAND][0][85] = 127, [0][1][2][0][RTW89_FCC][1][87] = -2, - [0][1][2][0][RTW89_FCC][2][87] = 127, [0][1][2][0][RTW89_ETSI][1][87] = 127, [0][1][2][0][RTW89_ETSI][0][87] = 127, [0][1][2][0][RTW89_MKK][1][87] = 127, [0][1][2][0][RTW89_MKK][0][87] = 127, [0][1][2][0][RTW89_IC][1][87] = -2, - [0][1][2][0][RTW89_IC][2][87] = 127, [0][1][2][0][RTW89_KCC][1][87] = 20, [0][1][2][0][RTW89_KCC][0][87] = 127, [0][1][2][0][RTW89_ACMA][1][87] = 127, @@ -42962,13 +41996,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][87] = 127, [0][1][2][0][RTW89_THAILAND][0][87] = 127, [0][1][2][0][RTW89_FCC][1][89] = -2, - [0][1][2][0][RTW89_FCC][2][89] = 127, [0][1][2][0][RTW89_ETSI][1][89] = 127, [0][1][2][0][RTW89_ETSI][0][89] = 127, [0][1][2][0][RTW89_MKK][1][89] = 127, [0][1][2][0][RTW89_MKK][0][89] = 127, [0][1][2][0][RTW89_IC][1][89] = -2, - [0][1][2][0][RTW89_IC][2][89] = 127, [0][1][2][0][RTW89_KCC][1][89] = 20, [0][1][2][0][RTW89_KCC][0][89] = 127, [0][1][2][0][RTW89_ACMA][1][89] = 127, @@ -42981,13 +42013,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][89] = 127, [0][1][2][0][RTW89_THAILAND][0][89] = 127, [0][1][2][0][RTW89_FCC][1][90] = -2, - [0][1][2][0][RTW89_FCC][2][90] = 127, [0][1][2][0][RTW89_ETSI][1][90] = 127, [0][1][2][0][RTW89_ETSI][0][90] = 127, [0][1][2][0][RTW89_MKK][1][90] = 127, [0][1][2][0][RTW89_MKK][0][90] = 127, [0][1][2][0][RTW89_IC][1][90] = -2, - [0][1][2][0][RTW89_IC][2][90] = 127, [0][1][2][0][RTW89_KCC][1][90] = 20, [0][1][2][0][RTW89_KCC][0][90] = 127, [0][1][2][0][RTW89_ACMA][1][90] = 127, @@ -43000,13 +42030,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][90] = 127, [0][1][2][0][RTW89_THAILAND][0][90] = 127, [0][1][2][0][RTW89_FCC][1][92] = -2, - [0][1][2][0][RTW89_FCC][2][92] = 127, [0][1][2][0][RTW89_ETSI][1][92] = 127, [0][1][2][0][RTW89_ETSI][0][92] = 127, [0][1][2][0][RTW89_MKK][1][92] = 127, [0][1][2][0][RTW89_MKK][0][92] = 127, [0][1][2][0][RTW89_IC][1][92] = -2, - [0][1][2][0][RTW89_IC][2][92] = 127, [0][1][2][0][RTW89_KCC][1][92] = 20, [0][1][2][0][RTW89_KCC][0][92] = 127, [0][1][2][0][RTW89_ACMA][1][92] = 127, @@ -43019,13 +42047,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][92] = 127, [0][1][2][0][RTW89_THAILAND][0][92] = 127, [0][1][2][0][RTW89_FCC][1][94] = -2, - [0][1][2][0][RTW89_FCC][2][94] = 127, [0][1][2][0][RTW89_ETSI][1][94] = 127, [0][1][2][0][RTW89_ETSI][0][94] = 127, [0][1][2][0][RTW89_MKK][1][94] = 127, [0][1][2][0][RTW89_MKK][0][94] = 127, [0][1][2][0][RTW89_IC][1][94] = -2, - [0][1][2][0][RTW89_IC][2][94] = 127, [0][1][2][0][RTW89_KCC][1][94] = 20, [0][1][2][0][RTW89_KCC][0][94] = 127, [0][1][2][0][RTW89_ACMA][1][94] = 127, @@ -43038,13 +42064,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][94] = 127, [0][1][2][0][RTW89_THAILAND][0][94] = 127, [0][1][2][0][RTW89_FCC][1][96] = -2, - [0][1][2][0][RTW89_FCC][2][96] = 127, [0][1][2][0][RTW89_ETSI][1][96] = 127, [0][1][2][0][RTW89_ETSI][0][96] = 127, [0][1][2][0][RTW89_MKK][1][96] = 127, [0][1][2][0][RTW89_MKK][0][96] = 127, [0][1][2][0][RTW89_IC][1][96] = -2, - [0][1][2][0][RTW89_IC][2][96] = 127, [0][1][2][0][RTW89_KCC][1][96] = 20, [0][1][2][0][RTW89_KCC][0][96] = 127, [0][1][2][0][RTW89_ACMA][1][96] = 127, @@ -43057,13 +42081,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][96] = 127, [0][1][2][0][RTW89_THAILAND][0][96] = 127, [0][1][2][0][RTW89_FCC][1][98] = -2, - [0][1][2][0][RTW89_FCC][2][98] = 127, [0][1][2][0][RTW89_ETSI][1][98] = 127, [0][1][2][0][RTW89_ETSI][0][98] = 127, [0][1][2][0][RTW89_MKK][1][98] = 127, [0][1][2][0][RTW89_MKK][0][98] = 127, [0][1][2][0][RTW89_IC][1][98] = -2, - [0][1][2][0][RTW89_IC][2][98] = 127, [0][1][2][0][RTW89_KCC][1][98] = 20, [0][1][2][0][RTW89_KCC][0][98] = 127, [0][1][2][0][RTW89_ACMA][1][98] = 127, @@ -43076,13 +42098,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][98] = 127, [0][1][2][0][RTW89_THAILAND][0][98] = 127, [0][1][2][0][RTW89_FCC][1][100] = -2, - [0][1][2][0][RTW89_FCC][2][100] = 127, [0][1][2][0][RTW89_ETSI][1][100] = 127, [0][1][2][0][RTW89_ETSI][0][100] = 127, [0][1][2][0][RTW89_MKK][1][100] = 127, [0][1][2][0][RTW89_MKK][0][100] = 127, [0][1][2][0][RTW89_IC][1][100] = -2, - [0][1][2][0][RTW89_IC][2][100] = 127, [0][1][2][0][RTW89_KCC][1][100] = 20, [0][1][2][0][RTW89_KCC][0][100] = 127, [0][1][2][0][RTW89_ACMA][1][100] = 127, @@ -43095,13 +42115,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][100] = 127, [0][1][2][0][RTW89_THAILAND][0][100] = 127, [0][1][2][0][RTW89_FCC][1][102] = -2, - [0][1][2][0][RTW89_FCC][2][102] = 127, [0][1][2][0][RTW89_ETSI][1][102] = 127, [0][1][2][0][RTW89_ETSI][0][102] = 127, [0][1][2][0][RTW89_MKK][1][102] = 127, [0][1][2][0][RTW89_MKK][0][102] = 127, [0][1][2][0][RTW89_IC][1][102] = -2, - [0][1][2][0][RTW89_IC][2][102] = 127, [0][1][2][0][RTW89_KCC][1][102] = 20, [0][1][2][0][RTW89_KCC][0][102] = 127, [0][1][2][0][RTW89_ACMA][1][102] = 127, @@ -43114,13 +42132,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][102] = 127, [0][1][2][0][RTW89_THAILAND][0][102] = 127, [0][1][2][0][RTW89_FCC][1][104] = -2, - [0][1][2][0][RTW89_FCC][2][104] = 127, [0][1][2][0][RTW89_ETSI][1][104] = 127, [0][1][2][0][RTW89_ETSI][0][104] = 127, [0][1][2][0][RTW89_MKK][1][104] = 127, [0][1][2][0][RTW89_MKK][0][104] = 127, [0][1][2][0][RTW89_IC][1][104] = -2, - [0][1][2][0][RTW89_IC][2][104] = 127, [0][1][2][0][RTW89_KCC][1][104] = 20, [0][1][2][0][RTW89_KCC][0][104] = 127, [0][1][2][0][RTW89_ACMA][1][104] = 127, @@ -43133,13 +42149,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][104] = 127, [0][1][2][0][RTW89_THAILAND][0][104] = 127, [0][1][2][0][RTW89_FCC][1][105] = -2, - [0][1][2][0][RTW89_FCC][2][105] = 127, [0][1][2][0][RTW89_ETSI][1][105] = 127, [0][1][2][0][RTW89_ETSI][0][105] = 127, [0][1][2][0][RTW89_MKK][1][105] = 127, [0][1][2][0][RTW89_MKK][0][105] = 127, [0][1][2][0][RTW89_IC][1][105] = -2, - [0][1][2][0][RTW89_IC][2][105] = 127, [0][1][2][0][RTW89_KCC][1][105] = 20, [0][1][2][0][RTW89_KCC][0][105] = 127, [0][1][2][0][RTW89_ACMA][1][105] = 127, @@ -43152,13 +42166,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][105] = 127, [0][1][2][0][RTW89_THAILAND][0][105] = 127, [0][1][2][0][RTW89_FCC][1][107] = 1, - [0][1][2][0][RTW89_FCC][2][107] = 127, [0][1][2][0][RTW89_ETSI][1][107] = 127, [0][1][2][0][RTW89_ETSI][0][107] = 127, [0][1][2][0][RTW89_MKK][1][107] = 127, [0][1][2][0][RTW89_MKK][0][107] = 127, [0][1][2][0][RTW89_IC][1][107] = 1, - [0][1][2][0][RTW89_IC][2][107] = 127, [0][1][2][0][RTW89_KCC][1][107] = 20, [0][1][2][0][RTW89_KCC][0][107] = 127, [0][1][2][0][RTW89_ACMA][1][107] = 127, @@ -43171,13 +42183,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][107] = 127, [0][1][2][0][RTW89_THAILAND][0][107] = 127, [0][1][2][0][RTW89_FCC][1][109] = 1, - [0][1][2][0][RTW89_FCC][2][109] = 127, [0][1][2][0][RTW89_ETSI][1][109] = 127, [0][1][2][0][RTW89_ETSI][0][109] = 127, [0][1][2][0][RTW89_MKK][1][109] = 127, [0][1][2][0][RTW89_MKK][0][109] = 127, [0][1][2][0][RTW89_IC][1][109] = 1, - [0][1][2][0][RTW89_IC][2][109] = 127, [0][1][2][0][RTW89_KCC][1][109] = 20, [0][1][2][0][RTW89_KCC][0][109] = 127, [0][1][2][0][RTW89_ACMA][1][109] = 127, @@ -43190,13 +42200,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][109] = 127, [0][1][2][0][RTW89_THAILAND][0][109] = 127, [0][1][2][0][RTW89_FCC][1][111] = 127, - [0][1][2][0][RTW89_FCC][2][111] = 127, [0][1][2][0][RTW89_ETSI][1][111] = 127, [0][1][2][0][RTW89_ETSI][0][111] = 127, [0][1][2][0][RTW89_MKK][1][111] = 127, [0][1][2][0][RTW89_MKK][0][111] = 127, [0][1][2][0][RTW89_IC][1][111] = 127, - [0][1][2][0][RTW89_IC][2][111] = 127, [0][1][2][0][RTW89_KCC][1][111] = 127, [0][1][2][0][RTW89_KCC][0][111] = 127, [0][1][2][0][RTW89_ACMA][1][111] = 127, @@ -43209,13 +42217,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][111] = 127, [0][1][2][0][RTW89_THAILAND][0][111] = 127, [0][1][2][0][RTW89_FCC][1][113] = 127, - [0][1][2][0][RTW89_FCC][2][113] = 127, [0][1][2][0][RTW89_ETSI][1][113] = 127, [0][1][2][0][RTW89_ETSI][0][113] = 127, [0][1][2][0][RTW89_MKK][1][113] = 127, [0][1][2][0][RTW89_MKK][0][113] = 127, [0][1][2][0][RTW89_IC][1][113] = 127, - [0][1][2][0][RTW89_IC][2][113] = 127, [0][1][2][0][RTW89_KCC][1][113] = 127, [0][1][2][0][RTW89_KCC][0][113] = 127, [0][1][2][0][RTW89_ACMA][1][113] = 127, @@ -43228,13 +42234,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][113] = 127, [0][1][2][0][RTW89_THAILAND][0][113] = 127, [0][1][2][0][RTW89_FCC][1][115] = 127, - [0][1][2][0][RTW89_FCC][2][115] = 127, [0][1][2][0][RTW89_ETSI][1][115] = 127, [0][1][2][0][RTW89_ETSI][0][115] = 127, [0][1][2][0][RTW89_MKK][1][115] = 127, [0][1][2][0][RTW89_MKK][0][115] = 127, [0][1][2][0][RTW89_IC][1][115] = 127, - [0][1][2][0][RTW89_IC][2][115] = 127, [0][1][2][0][RTW89_KCC][1][115] = 127, [0][1][2][0][RTW89_KCC][0][115] = 127, [0][1][2][0][RTW89_ACMA][1][115] = 127, @@ -43247,13 +42251,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][115] = 127, [0][1][2][0][RTW89_THAILAND][0][115] = 127, [0][1][2][0][RTW89_FCC][1][117] = 127, - [0][1][2][0][RTW89_FCC][2][117] = 127, [0][1][2][0][RTW89_ETSI][1][117] = 127, [0][1][2][0][RTW89_ETSI][0][117] = 127, [0][1][2][0][RTW89_MKK][1][117] = 127, [0][1][2][0][RTW89_MKK][0][117] = 127, [0][1][2][0][RTW89_IC][1][117] = 127, - [0][1][2][0][RTW89_IC][2][117] = 127, [0][1][2][0][RTW89_KCC][1][117] = 127, [0][1][2][0][RTW89_KCC][0][117] = 127, [0][1][2][0][RTW89_ACMA][1][117] = 127, @@ -43266,13 +42268,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][117] = 127, [0][1][2][0][RTW89_THAILAND][0][117] = 127, [0][1][2][0][RTW89_FCC][1][119] = 127, - [0][1][2][0][RTW89_FCC][2][119] = 127, [0][1][2][0][RTW89_ETSI][1][119] = 127, [0][1][2][0][RTW89_ETSI][0][119] = 127, [0][1][2][0][RTW89_MKK][1][119] = 127, [0][1][2][0][RTW89_MKK][0][119] = 127, [0][1][2][0][RTW89_IC][1][119] = 127, - [0][1][2][0][RTW89_IC][2][119] = 127, [0][1][2][0][RTW89_KCC][1][119] = 127, [0][1][2][0][RTW89_KCC][0][119] = 127, [0][1][2][0][RTW89_ACMA][1][119] = 127, @@ -43285,13 +42285,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][0][RTW89_THAILAND][1][119] = 127, [0][1][2][0][RTW89_THAILAND][0][119] = 127, [0][1][2][1][RTW89_FCC][1][0] = -2, - [0][1][2][1][RTW89_FCC][2][0] = 54, [0][1][2][1][RTW89_ETSI][1][0] = 42, [0][1][2][1][RTW89_ETSI][0][0] = 6, [0][1][2][1][RTW89_MKK][1][0] = 56, [0][1][2][1][RTW89_MKK][0][0] = 16, [0][1][2][1][RTW89_IC][1][0] = -2, - [0][1][2][1][RTW89_IC][2][0] = 54, [0][1][2][1][RTW89_KCC][1][0] = 12, [0][1][2][1][RTW89_KCC][0][0] = 10, [0][1][2][1][RTW89_ACMA][1][0] = 42, @@ -43304,13 +42302,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][0] = 44, [0][1][2][1][RTW89_THAILAND][0][0] = -2, [0][1][2][1][RTW89_FCC][1][2] = -4, - [0][1][2][1][RTW89_FCC][2][2] = 54, [0][1][2][1][RTW89_ETSI][1][2] = 42, [0][1][2][1][RTW89_ETSI][0][2] = 6, [0][1][2][1][RTW89_MKK][1][2] = 54, [0][1][2][1][RTW89_MKK][0][2] = 16, [0][1][2][1][RTW89_IC][1][2] = -4, - [0][1][2][1][RTW89_IC][2][2] = 54, [0][1][2][1][RTW89_KCC][1][2] = 12, [0][1][2][1][RTW89_KCC][0][2] = 12, [0][1][2][1][RTW89_ACMA][1][2] = 42, @@ -43323,13 +42319,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][2] = 44, [0][1][2][1][RTW89_THAILAND][0][2] = -4, [0][1][2][1][RTW89_FCC][1][4] = -4, - [0][1][2][1][RTW89_FCC][2][4] = 54, [0][1][2][1][RTW89_ETSI][1][4] = 42, [0][1][2][1][RTW89_ETSI][0][4] = 6, [0][1][2][1][RTW89_MKK][1][4] = 54, [0][1][2][1][RTW89_MKK][0][4] = 16, [0][1][2][1][RTW89_IC][1][4] = -4, - [0][1][2][1][RTW89_IC][2][4] = 54, [0][1][2][1][RTW89_KCC][1][4] = 12, [0][1][2][1][RTW89_KCC][0][4] = 12, [0][1][2][1][RTW89_ACMA][1][4] = 42, @@ -43342,13 +42336,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][4] = 44, [0][1][2][1][RTW89_THAILAND][0][4] = -4, [0][1][2][1][RTW89_FCC][1][6] = -4, - [0][1][2][1][RTW89_FCC][2][6] = 54, [0][1][2][1][RTW89_ETSI][1][6] = 42, [0][1][2][1][RTW89_ETSI][0][6] = 6, [0][1][2][1][RTW89_MKK][1][6] = 54, [0][1][2][1][RTW89_MKK][0][6] = 16, [0][1][2][1][RTW89_IC][1][6] = -4, - [0][1][2][1][RTW89_IC][2][6] = 54, [0][1][2][1][RTW89_KCC][1][6] = 12, [0][1][2][1][RTW89_KCC][0][6] = 12, [0][1][2][1][RTW89_ACMA][1][6] = 42, @@ -43361,13 +42353,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][6] = 44, [0][1][2][1][RTW89_THAILAND][0][6] = -4, [0][1][2][1][RTW89_FCC][1][8] = -4, - [0][1][2][1][RTW89_FCC][2][8] = 54, [0][1][2][1][RTW89_ETSI][1][8] = 42, [0][1][2][1][RTW89_ETSI][0][8] = 6, [0][1][2][1][RTW89_MKK][1][8] = 54, [0][1][2][1][RTW89_MKK][0][8] = 16, [0][1][2][1][RTW89_IC][1][8] = -4, - [0][1][2][1][RTW89_IC][2][8] = 54, [0][1][2][1][RTW89_KCC][1][8] = 12, [0][1][2][1][RTW89_KCC][0][8] = 12, [0][1][2][1][RTW89_ACMA][1][8] = 42, @@ -43380,13 +42370,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][8] = 44, [0][1][2][1][RTW89_THAILAND][0][8] = -4, [0][1][2][1][RTW89_FCC][1][10] = -4, - [0][1][2][1][RTW89_FCC][2][10] = 54, [0][1][2][1][RTW89_ETSI][1][10] = 42, [0][1][2][1][RTW89_ETSI][0][10] = 6, [0][1][2][1][RTW89_MKK][1][10] = 54, [0][1][2][1][RTW89_MKK][0][10] = 16, [0][1][2][1][RTW89_IC][1][10] = -4, - [0][1][2][1][RTW89_IC][2][10] = 54, [0][1][2][1][RTW89_KCC][1][10] = 12, [0][1][2][1][RTW89_KCC][0][10] = 12, [0][1][2][1][RTW89_ACMA][1][10] = 42, @@ -43399,13 +42387,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][10] = 44, [0][1][2][1][RTW89_THAILAND][0][10] = -4, [0][1][2][1][RTW89_FCC][1][12] = -4, - [0][1][2][1][RTW89_FCC][2][12] = 54, [0][1][2][1][RTW89_ETSI][1][12] = 42, [0][1][2][1][RTW89_ETSI][0][12] = 6, [0][1][2][1][RTW89_MKK][1][12] = 54, [0][1][2][1][RTW89_MKK][0][12] = 16, [0][1][2][1][RTW89_IC][1][12] = -4, - [0][1][2][1][RTW89_IC][2][12] = 54, [0][1][2][1][RTW89_KCC][1][12] = 12, [0][1][2][1][RTW89_KCC][0][12] = 12, [0][1][2][1][RTW89_ACMA][1][12] = 42, @@ -43418,13 +42404,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][12] = 44, [0][1][2][1][RTW89_THAILAND][0][12] = -4, [0][1][2][1][RTW89_FCC][1][14] = -4, - [0][1][2][1][RTW89_FCC][2][14] = 54, [0][1][2][1][RTW89_ETSI][1][14] = 42, [0][1][2][1][RTW89_ETSI][0][14] = 6, [0][1][2][1][RTW89_MKK][1][14] = 54, [0][1][2][1][RTW89_MKK][0][14] = 16, [0][1][2][1][RTW89_IC][1][14] = -4, - [0][1][2][1][RTW89_IC][2][14] = 54, [0][1][2][1][RTW89_KCC][1][14] = 12, [0][1][2][1][RTW89_KCC][0][14] = 12, [0][1][2][1][RTW89_ACMA][1][14] = 42, @@ -43437,13 +42421,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][14] = 44, [0][1][2][1][RTW89_THAILAND][0][14] = -4, [0][1][2][1][RTW89_FCC][1][15] = -4, - [0][1][2][1][RTW89_FCC][2][15] = 54, [0][1][2][1][RTW89_ETSI][1][15] = 42, [0][1][2][1][RTW89_ETSI][0][15] = 6, [0][1][2][1][RTW89_MKK][1][15] = 54, [0][1][2][1][RTW89_MKK][0][15] = 16, [0][1][2][1][RTW89_IC][1][15] = -4, - [0][1][2][1][RTW89_IC][2][15] = 54, [0][1][2][1][RTW89_KCC][1][15] = 12, [0][1][2][1][RTW89_KCC][0][15] = 12, [0][1][2][1][RTW89_ACMA][1][15] = 42, @@ -43456,13 +42438,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][15] = 44, [0][1][2][1][RTW89_THAILAND][0][15] = -4, [0][1][2][1][RTW89_FCC][1][17] = -4, - [0][1][2][1][RTW89_FCC][2][17] = 54, [0][1][2][1][RTW89_ETSI][1][17] = 42, [0][1][2][1][RTW89_ETSI][0][17] = 6, [0][1][2][1][RTW89_MKK][1][17] = 54, [0][1][2][1][RTW89_MKK][0][17] = 16, [0][1][2][1][RTW89_IC][1][17] = -4, - [0][1][2][1][RTW89_IC][2][17] = 54, [0][1][2][1][RTW89_KCC][1][17] = 12, [0][1][2][1][RTW89_KCC][0][17] = 12, [0][1][2][1][RTW89_ACMA][1][17] = 42, @@ -43475,13 +42455,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][17] = 44, [0][1][2][1][RTW89_THAILAND][0][17] = -4, [0][1][2][1][RTW89_FCC][1][19] = -4, - [0][1][2][1][RTW89_FCC][2][19] = 54, [0][1][2][1][RTW89_ETSI][1][19] = 42, [0][1][2][1][RTW89_ETSI][0][19] = 6, [0][1][2][1][RTW89_MKK][1][19] = 54, [0][1][2][1][RTW89_MKK][0][19] = 16, [0][1][2][1][RTW89_IC][1][19] = -4, - [0][1][2][1][RTW89_IC][2][19] = 54, [0][1][2][1][RTW89_KCC][1][19] = 12, [0][1][2][1][RTW89_KCC][0][19] = 12, [0][1][2][1][RTW89_ACMA][1][19] = 42, @@ -43494,13 +42472,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][19] = 44, [0][1][2][1][RTW89_THAILAND][0][19] = -4, [0][1][2][1][RTW89_FCC][1][21] = -4, - [0][1][2][1][RTW89_FCC][2][21] = 54, [0][1][2][1][RTW89_ETSI][1][21] = 42, [0][1][2][1][RTW89_ETSI][0][21] = 6, [0][1][2][1][RTW89_MKK][1][21] = 54, [0][1][2][1][RTW89_MKK][0][21] = 16, [0][1][2][1][RTW89_IC][1][21] = -4, - [0][1][2][1][RTW89_IC][2][21] = 54, [0][1][2][1][RTW89_KCC][1][21] = 12, [0][1][2][1][RTW89_KCC][0][21] = 12, [0][1][2][1][RTW89_ACMA][1][21] = 42, @@ -43513,13 +42489,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][21] = 44, [0][1][2][1][RTW89_THAILAND][0][21] = -4, [0][1][2][1][RTW89_FCC][1][23] = -4, - [0][1][2][1][RTW89_FCC][2][23] = 68, [0][1][2][1][RTW89_ETSI][1][23] = 42, [0][1][2][1][RTW89_ETSI][0][23] = 6, [0][1][2][1][RTW89_MKK][1][23] = 54, [0][1][2][1][RTW89_MKK][0][23] = 16, [0][1][2][1][RTW89_IC][1][23] = -4, - [0][1][2][1][RTW89_IC][2][23] = 68, [0][1][2][1][RTW89_KCC][1][23] = 12, [0][1][2][1][RTW89_KCC][0][23] = 10, [0][1][2][1][RTW89_ACMA][1][23] = 42, @@ -43532,13 +42506,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][23] = 44, [0][1][2][1][RTW89_THAILAND][0][23] = -4, [0][1][2][1][RTW89_FCC][1][25] = -4, - [0][1][2][1][RTW89_FCC][2][25] = 68, [0][1][2][1][RTW89_ETSI][1][25] = 42, [0][1][2][1][RTW89_ETSI][0][25] = 6, [0][1][2][1][RTW89_MKK][1][25] = 54, [0][1][2][1][RTW89_MKK][0][25] = 16, [0][1][2][1][RTW89_IC][1][25] = -4, - [0][1][2][1][RTW89_IC][2][25] = 68, [0][1][2][1][RTW89_KCC][1][25] = 12, [0][1][2][1][RTW89_KCC][0][25] = 14, [0][1][2][1][RTW89_ACMA][1][25] = 42, @@ -43551,13 +42523,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][25] = 42, [0][1][2][1][RTW89_THAILAND][0][25] = -4, [0][1][2][1][RTW89_FCC][1][27] = -4, - [0][1][2][1][RTW89_FCC][2][27] = 68, [0][1][2][1][RTW89_ETSI][1][27] = 42, [0][1][2][1][RTW89_ETSI][0][27] = 6, [0][1][2][1][RTW89_MKK][1][27] = 54, [0][1][2][1][RTW89_MKK][0][27] = 16, [0][1][2][1][RTW89_IC][1][27] = -4, - [0][1][2][1][RTW89_IC][2][27] = 68, [0][1][2][1][RTW89_KCC][1][27] = 12, [0][1][2][1][RTW89_KCC][0][27] = 14, [0][1][2][1][RTW89_ACMA][1][27] = 42, @@ -43570,13 +42540,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][27] = 42, [0][1][2][1][RTW89_THAILAND][0][27] = -4, [0][1][2][1][RTW89_FCC][1][29] = -4, - [0][1][2][1][RTW89_FCC][2][29] = 68, [0][1][2][1][RTW89_ETSI][1][29] = 42, [0][1][2][1][RTW89_ETSI][0][29] = 6, [0][1][2][1][RTW89_MKK][1][29] = 54, [0][1][2][1][RTW89_MKK][0][29] = 16, [0][1][2][1][RTW89_IC][1][29] = -4, - [0][1][2][1][RTW89_IC][2][29] = 68, [0][1][2][1][RTW89_KCC][1][29] = 12, [0][1][2][1][RTW89_KCC][0][29] = 14, [0][1][2][1][RTW89_ACMA][1][29] = 42, @@ -43589,13 +42557,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][29] = 42, [0][1][2][1][RTW89_THAILAND][0][29] = -4, [0][1][2][1][RTW89_FCC][1][30] = -4, - [0][1][2][1][RTW89_FCC][2][30] = 68, [0][1][2][1][RTW89_ETSI][1][30] = 42, [0][1][2][1][RTW89_ETSI][0][30] = 6, [0][1][2][1][RTW89_MKK][1][30] = 54, [0][1][2][1][RTW89_MKK][0][30] = 16, [0][1][2][1][RTW89_IC][1][30] = -4, - [0][1][2][1][RTW89_IC][2][30] = 68, [0][1][2][1][RTW89_KCC][1][30] = 12, [0][1][2][1][RTW89_KCC][0][30] = 14, [0][1][2][1][RTW89_ACMA][1][30] = 42, @@ -43608,13 +42574,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][30] = 42, [0][1][2][1][RTW89_THAILAND][0][30] = -4, [0][1][2][1][RTW89_FCC][1][32] = -4, - [0][1][2][1][RTW89_FCC][2][32] = 68, [0][1][2][1][RTW89_ETSI][1][32] = 42, [0][1][2][1][RTW89_ETSI][0][32] = 6, [0][1][2][1][RTW89_MKK][1][32] = 54, [0][1][2][1][RTW89_MKK][0][32] = 16, [0][1][2][1][RTW89_IC][1][32] = -4, - [0][1][2][1][RTW89_IC][2][32] = 68, [0][1][2][1][RTW89_KCC][1][32] = 12, [0][1][2][1][RTW89_KCC][0][32] = 14, [0][1][2][1][RTW89_ACMA][1][32] = 42, @@ -43627,13 +42591,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][32] = 42, [0][1][2][1][RTW89_THAILAND][0][32] = -4, [0][1][2][1][RTW89_FCC][1][34] = -4, - [0][1][2][1][RTW89_FCC][2][34] = 68, [0][1][2][1][RTW89_ETSI][1][34] = 42, [0][1][2][1][RTW89_ETSI][0][34] = 6, [0][1][2][1][RTW89_MKK][1][34] = 54, [0][1][2][1][RTW89_MKK][0][34] = 16, [0][1][2][1][RTW89_IC][1][34] = -4, - [0][1][2][1][RTW89_IC][2][34] = 68, [0][1][2][1][RTW89_KCC][1][34] = 12, [0][1][2][1][RTW89_KCC][0][34] = 14, [0][1][2][1][RTW89_ACMA][1][34] = 42, @@ -43646,13 +42608,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][34] = 42, [0][1][2][1][RTW89_THAILAND][0][34] = -4, [0][1][2][1][RTW89_FCC][1][36] = -4, - [0][1][2][1][RTW89_FCC][2][36] = 68, [0][1][2][1][RTW89_ETSI][1][36] = 42, [0][1][2][1][RTW89_ETSI][0][36] = 6, [0][1][2][1][RTW89_MKK][1][36] = 54, [0][1][2][1][RTW89_MKK][0][36] = 16, [0][1][2][1][RTW89_IC][1][36] = -4, - [0][1][2][1][RTW89_IC][2][36] = 68, [0][1][2][1][RTW89_KCC][1][36] = 12, [0][1][2][1][RTW89_KCC][0][36] = 14, [0][1][2][1][RTW89_ACMA][1][36] = 42, @@ -43665,13 +42625,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][36] = 42, [0][1][2][1][RTW89_THAILAND][0][36] = -4, [0][1][2][1][RTW89_FCC][1][38] = -4, - [0][1][2][1][RTW89_FCC][2][38] = 68, [0][1][2][1][RTW89_ETSI][1][38] = 42, [0][1][2][1][RTW89_ETSI][0][38] = 6, [0][1][2][1][RTW89_MKK][1][38] = 54, [0][1][2][1][RTW89_MKK][0][38] = 16, [0][1][2][1][RTW89_IC][1][38] = -4, - [0][1][2][1][RTW89_IC][2][38] = 68, [0][1][2][1][RTW89_KCC][1][38] = 12, [0][1][2][1][RTW89_KCC][0][38] = 14, [0][1][2][1][RTW89_ACMA][1][38] = 42, @@ -43684,13 +42642,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][38] = 42, [0][1][2][1][RTW89_THAILAND][0][38] = -4, [0][1][2][1][RTW89_FCC][1][40] = -4, - [0][1][2][1][RTW89_FCC][2][40] = 68, [0][1][2][1][RTW89_ETSI][1][40] = 42, [0][1][2][1][RTW89_ETSI][0][40] = 6, [0][1][2][1][RTW89_MKK][1][40] = 54, [0][1][2][1][RTW89_MKK][0][40] = 16, [0][1][2][1][RTW89_IC][1][40] = -4, - [0][1][2][1][RTW89_IC][2][40] = 68, [0][1][2][1][RTW89_KCC][1][40] = 12, [0][1][2][1][RTW89_KCC][0][40] = 14, [0][1][2][1][RTW89_ACMA][1][40] = 42, @@ -43703,13 +42659,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][40] = 42, [0][1][2][1][RTW89_THAILAND][0][40] = -4, [0][1][2][1][RTW89_FCC][1][42] = -4, - [0][1][2][1][RTW89_FCC][2][42] = 68, [0][1][2][1][RTW89_ETSI][1][42] = 42, [0][1][2][1][RTW89_ETSI][0][42] = 6, [0][1][2][1][RTW89_MKK][1][42] = 54, [0][1][2][1][RTW89_MKK][0][42] = 16, [0][1][2][1][RTW89_IC][1][42] = -4, - [0][1][2][1][RTW89_IC][2][42] = 68, [0][1][2][1][RTW89_KCC][1][42] = 12, [0][1][2][1][RTW89_KCC][0][42] = 14, [0][1][2][1][RTW89_ACMA][1][42] = 42, @@ -43722,13 +42676,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][42] = 42, [0][1][2][1][RTW89_THAILAND][0][42] = -4, [0][1][2][1][RTW89_FCC][1][44] = -2, - [0][1][2][1][RTW89_FCC][2][44] = 68, [0][1][2][1][RTW89_ETSI][1][44] = 42, [0][1][2][1][RTW89_ETSI][0][44] = 6, [0][1][2][1][RTW89_MKK][1][44] = 34, [0][1][2][1][RTW89_MKK][0][44] = 16, [0][1][2][1][RTW89_IC][1][44] = -2, - [0][1][2][1][RTW89_IC][2][44] = 68, [0][1][2][1][RTW89_KCC][1][44] = 12, [0][1][2][1][RTW89_KCC][0][44] = 12, [0][1][2][1][RTW89_ACMA][1][44] = 42, @@ -43741,13 +42693,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][44] = 42, [0][1][2][1][RTW89_THAILAND][0][44] = -2, [0][1][2][1][RTW89_FCC][1][45] = -2, - [0][1][2][1][RTW89_FCC][2][45] = 127, [0][1][2][1][RTW89_ETSI][1][45] = 127, [0][1][2][1][RTW89_ETSI][0][45] = 127, [0][1][2][1][RTW89_MKK][1][45] = 127, [0][1][2][1][RTW89_MKK][0][45] = 127, [0][1][2][1][RTW89_IC][1][45] = -2, - [0][1][2][1][RTW89_IC][2][45] = 70, [0][1][2][1][RTW89_KCC][1][45] = 12, [0][1][2][1][RTW89_KCC][0][45] = 127, [0][1][2][1][RTW89_ACMA][1][45] = 127, @@ -43760,13 +42710,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][45] = 127, [0][1][2][1][RTW89_THAILAND][0][45] = 127, [0][1][2][1][RTW89_FCC][1][47] = -2, - [0][1][2][1][RTW89_FCC][2][47] = 127, [0][1][2][1][RTW89_ETSI][1][47] = 127, [0][1][2][1][RTW89_ETSI][0][47] = 127, [0][1][2][1][RTW89_MKK][1][47] = 127, [0][1][2][1][RTW89_MKK][0][47] = 127, [0][1][2][1][RTW89_IC][1][47] = -2, - [0][1][2][1][RTW89_IC][2][47] = 68, [0][1][2][1][RTW89_KCC][1][47] = 12, [0][1][2][1][RTW89_KCC][0][47] = 127, [0][1][2][1][RTW89_ACMA][1][47] = 127, @@ -43779,13 +42727,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][47] = 127, [0][1][2][1][RTW89_THAILAND][0][47] = 127, [0][1][2][1][RTW89_FCC][1][49] = -2, - [0][1][2][1][RTW89_FCC][2][49] = 127, [0][1][2][1][RTW89_ETSI][1][49] = 127, [0][1][2][1][RTW89_ETSI][0][49] = 127, [0][1][2][1][RTW89_MKK][1][49] = 127, [0][1][2][1][RTW89_MKK][0][49] = 127, [0][1][2][1][RTW89_IC][1][49] = -2, - [0][1][2][1][RTW89_IC][2][49] = 68, [0][1][2][1][RTW89_KCC][1][49] = 12, [0][1][2][1][RTW89_KCC][0][49] = 127, [0][1][2][1][RTW89_ACMA][1][49] = 127, @@ -43798,13 +42744,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][49] = 127, [0][1][2][1][RTW89_THAILAND][0][49] = 127, [0][1][2][1][RTW89_FCC][1][51] = -2, - [0][1][2][1][RTW89_FCC][2][51] = 127, [0][1][2][1][RTW89_ETSI][1][51] = 127, [0][1][2][1][RTW89_ETSI][0][51] = 127, [0][1][2][1][RTW89_MKK][1][51] = 127, [0][1][2][1][RTW89_MKK][0][51] = 127, [0][1][2][1][RTW89_IC][1][51] = -2, - [0][1][2][1][RTW89_IC][2][51] = 68, [0][1][2][1][RTW89_KCC][1][51] = 12, [0][1][2][1][RTW89_KCC][0][51] = 127, [0][1][2][1][RTW89_ACMA][1][51] = 127, @@ -43817,13 +42761,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][51] = 127, [0][1][2][1][RTW89_THAILAND][0][51] = 127, [0][1][2][1][RTW89_FCC][1][53] = -2, - [0][1][2][1][RTW89_FCC][2][53] = 127, [0][1][2][1][RTW89_ETSI][1][53] = 127, [0][1][2][1][RTW89_ETSI][0][53] = 127, [0][1][2][1][RTW89_MKK][1][53] = 127, [0][1][2][1][RTW89_MKK][0][53] = 127, [0][1][2][1][RTW89_IC][1][53] = -2, - [0][1][2][1][RTW89_IC][2][53] = 68, [0][1][2][1][RTW89_KCC][1][53] = 12, [0][1][2][1][RTW89_KCC][0][53] = 127, [0][1][2][1][RTW89_ACMA][1][53] = 127, @@ -43836,13 +42778,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][53] = 127, [0][1][2][1][RTW89_THAILAND][0][53] = 127, [0][1][2][1][RTW89_FCC][1][55] = -2, - [0][1][2][1][RTW89_FCC][2][55] = 68, [0][1][2][1][RTW89_ETSI][1][55] = 127, [0][1][2][1][RTW89_ETSI][0][55] = 127, [0][1][2][1][RTW89_MKK][1][55] = 127, [0][1][2][1][RTW89_MKK][0][55] = 127, [0][1][2][1][RTW89_IC][1][55] = -2, - [0][1][2][1][RTW89_IC][2][55] = 68, [0][1][2][1][RTW89_KCC][1][55] = 12, [0][1][2][1][RTW89_KCC][0][55] = 127, [0][1][2][1][RTW89_ACMA][1][55] = 127, @@ -43855,13 +42795,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][55] = 127, [0][1][2][1][RTW89_THAILAND][0][55] = 127, [0][1][2][1][RTW89_FCC][1][57] = -2, - [0][1][2][1][RTW89_FCC][2][57] = 68, [0][1][2][1][RTW89_ETSI][1][57] = 127, [0][1][2][1][RTW89_ETSI][0][57] = 127, [0][1][2][1][RTW89_MKK][1][57] = 127, [0][1][2][1][RTW89_MKK][0][57] = 127, [0][1][2][1][RTW89_IC][1][57] = -2, - [0][1][2][1][RTW89_IC][2][57] = 68, [0][1][2][1][RTW89_KCC][1][57] = 12, [0][1][2][1][RTW89_KCC][0][57] = 127, [0][1][2][1][RTW89_ACMA][1][57] = 127, @@ -43874,13 +42812,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][57] = 127, [0][1][2][1][RTW89_THAILAND][0][57] = 127, [0][1][2][1][RTW89_FCC][1][59] = -2, - [0][1][2][1][RTW89_FCC][2][59] = 68, [0][1][2][1][RTW89_ETSI][1][59] = 127, [0][1][2][1][RTW89_ETSI][0][59] = 127, [0][1][2][1][RTW89_MKK][1][59] = 127, [0][1][2][1][RTW89_MKK][0][59] = 127, [0][1][2][1][RTW89_IC][1][59] = -2, - [0][1][2][1][RTW89_IC][2][59] = 68, [0][1][2][1][RTW89_KCC][1][59] = 12, [0][1][2][1][RTW89_KCC][0][59] = 127, [0][1][2][1][RTW89_ACMA][1][59] = 127, @@ -43893,13 +42829,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][59] = 127, [0][1][2][1][RTW89_THAILAND][0][59] = 127, [0][1][2][1][RTW89_FCC][1][60] = -2, - [0][1][2][1][RTW89_FCC][2][60] = 68, [0][1][2][1][RTW89_ETSI][1][60] = 127, [0][1][2][1][RTW89_ETSI][0][60] = 127, [0][1][2][1][RTW89_MKK][1][60] = 127, [0][1][2][1][RTW89_MKK][0][60] = 127, [0][1][2][1][RTW89_IC][1][60] = -2, - [0][1][2][1][RTW89_IC][2][60] = 68, [0][1][2][1][RTW89_KCC][1][60] = 12, [0][1][2][1][RTW89_KCC][0][60] = 127, [0][1][2][1][RTW89_ACMA][1][60] = 127, @@ -43912,13 +42846,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][60] = 127, [0][1][2][1][RTW89_THAILAND][0][60] = 127, [0][1][2][1][RTW89_FCC][1][62] = -2, - [0][1][2][1][RTW89_FCC][2][62] = 68, [0][1][2][1][RTW89_ETSI][1][62] = 127, [0][1][2][1][RTW89_ETSI][0][62] = 127, [0][1][2][1][RTW89_MKK][1][62] = 127, [0][1][2][1][RTW89_MKK][0][62] = 127, [0][1][2][1][RTW89_IC][1][62] = -2, - [0][1][2][1][RTW89_IC][2][62] = 68, [0][1][2][1][RTW89_KCC][1][62] = 12, [0][1][2][1][RTW89_KCC][0][62] = 127, [0][1][2][1][RTW89_ACMA][1][62] = 127, @@ -43931,13 +42863,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][62] = 127, [0][1][2][1][RTW89_THAILAND][0][62] = 127, [0][1][2][1][RTW89_FCC][1][64] = -2, - [0][1][2][1][RTW89_FCC][2][64] = 68, [0][1][2][1][RTW89_ETSI][1][64] = 127, [0][1][2][1][RTW89_ETSI][0][64] = 127, [0][1][2][1][RTW89_MKK][1][64] = 127, [0][1][2][1][RTW89_MKK][0][64] = 127, [0][1][2][1][RTW89_IC][1][64] = -2, - [0][1][2][1][RTW89_IC][2][64] = 68, [0][1][2][1][RTW89_KCC][1][64] = 12, [0][1][2][1][RTW89_KCC][0][64] = 127, [0][1][2][1][RTW89_ACMA][1][64] = 127, @@ -43950,13 +42880,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][64] = 127, [0][1][2][1][RTW89_THAILAND][0][64] = 127, [0][1][2][1][RTW89_FCC][1][66] = -2, - [0][1][2][1][RTW89_FCC][2][66] = 68, [0][1][2][1][RTW89_ETSI][1][66] = 127, [0][1][2][1][RTW89_ETSI][0][66] = 127, [0][1][2][1][RTW89_MKK][1][66] = 127, [0][1][2][1][RTW89_MKK][0][66] = 127, [0][1][2][1][RTW89_IC][1][66] = -2, - [0][1][2][1][RTW89_IC][2][66] = 68, [0][1][2][1][RTW89_KCC][1][66] = 12, [0][1][2][1][RTW89_KCC][0][66] = 127, [0][1][2][1][RTW89_ACMA][1][66] = 127, @@ -43969,13 +42897,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][66] = 127, [0][1][2][1][RTW89_THAILAND][0][66] = 127, [0][1][2][1][RTW89_FCC][1][68] = -2, - [0][1][2][1][RTW89_FCC][2][68] = 68, [0][1][2][1][RTW89_ETSI][1][68] = 127, [0][1][2][1][RTW89_ETSI][0][68] = 127, [0][1][2][1][RTW89_MKK][1][68] = 127, [0][1][2][1][RTW89_MKK][0][68] = 127, [0][1][2][1][RTW89_IC][1][68] = -2, - [0][1][2][1][RTW89_IC][2][68] = 68, [0][1][2][1][RTW89_KCC][1][68] = 12, [0][1][2][1][RTW89_KCC][0][68] = 127, [0][1][2][1][RTW89_ACMA][1][68] = 127, @@ -43988,13 +42914,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][68] = 127, [0][1][2][1][RTW89_THAILAND][0][68] = 127, [0][1][2][1][RTW89_FCC][1][70] = -2, - [0][1][2][1][RTW89_FCC][2][70] = 68, [0][1][2][1][RTW89_ETSI][1][70] = 127, [0][1][2][1][RTW89_ETSI][0][70] = 127, [0][1][2][1][RTW89_MKK][1][70] = 127, [0][1][2][1][RTW89_MKK][0][70] = 127, [0][1][2][1][RTW89_IC][1][70] = -2, - [0][1][2][1][RTW89_IC][2][70] = 68, [0][1][2][1][RTW89_KCC][1][70] = 12, [0][1][2][1][RTW89_KCC][0][70] = 127, [0][1][2][1][RTW89_ACMA][1][70] = 127, @@ -44007,13 +42931,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][70] = 127, [0][1][2][1][RTW89_THAILAND][0][70] = 127, [0][1][2][1][RTW89_FCC][1][72] = -2, - [0][1][2][1][RTW89_FCC][2][72] = 68, [0][1][2][1][RTW89_ETSI][1][72] = 127, [0][1][2][1][RTW89_ETSI][0][72] = 127, [0][1][2][1][RTW89_MKK][1][72] = 127, [0][1][2][1][RTW89_MKK][0][72] = 127, [0][1][2][1][RTW89_IC][1][72] = -2, - [0][1][2][1][RTW89_IC][2][72] = 68, [0][1][2][1][RTW89_KCC][1][72] = 12, [0][1][2][1][RTW89_KCC][0][72] = 127, [0][1][2][1][RTW89_ACMA][1][72] = 127, @@ -44026,13 +42948,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][72] = 127, [0][1][2][1][RTW89_THAILAND][0][72] = 127, [0][1][2][1][RTW89_FCC][1][74] = -2, - [0][1][2][1][RTW89_FCC][2][74] = 68, [0][1][2][1][RTW89_ETSI][1][74] = 127, [0][1][2][1][RTW89_ETSI][0][74] = 127, [0][1][2][1][RTW89_MKK][1][74] = 127, [0][1][2][1][RTW89_MKK][0][74] = 127, [0][1][2][1][RTW89_IC][1][74] = -2, - [0][1][2][1][RTW89_IC][2][74] = 68, [0][1][2][1][RTW89_KCC][1][74] = 12, [0][1][2][1][RTW89_KCC][0][74] = 127, [0][1][2][1][RTW89_ACMA][1][74] = 127, @@ -44045,13 +42965,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][74] = 127, [0][1][2][1][RTW89_THAILAND][0][74] = 127, [0][1][2][1][RTW89_FCC][1][75] = -2, - [0][1][2][1][RTW89_FCC][2][75] = 68, [0][1][2][1][RTW89_ETSI][1][75] = 127, [0][1][2][1][RTW89_ETSI][0][75] = 127, [0][1][2][1][RTW89_MKK][1][75] = 127, [0][1][2][1][RTW89_MKK][0][75] = 127, [0][1][2][1][RTW89_IC][1][75] = -2, - [0][1][2][1][RTW89_IC][2][75] = 68, [0][1][2][1][RTW89_KCC][1][75] = 12, [0][1][2][1][RTW89_KCC][0][75] = 127, [0][1][2][1][RTW89_ACMA][1][75] = 127, @@ -44064,13 +42982,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][75] = 127, [0][1][2][1][RTW89_THAILAND][0][75] = 127, [0][1][2][1][RTW89_FCC][1][77] = -2, - [0][1][2][1][RTW89_FCC][2][77] = 68, [0][1][2][1][RTW89_ETSI][1][77] = 127, [0][1][2][1][RTW89_ETSI][0][77] = 127, [0][1][2][1][RTW89_MKK][1][77] = 127, [0][1][2][1][RTW89_MKK][0][77] = 127, [0][1][2][1][RTW89_IC][1][77] = -2, - [0][1][2][1][RTW89_IC][2][77] = 68, [0][1][2][1][RTW89_KCC][1][77] = 12, [0][1][2][1][RTW89_KCC][0][77] = 127, [0][1][2][1][RTW89_ACMA][1][77] = 127, @@ -44083,13 +42999,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][77] = 127, [0][1][2][1][RTW89_THAILAND][0][77] = 127, [0][1][2][1][RTW89_FCC][1][79] = -2, - [0][1][2][1][RTW89_FCC][2][79] = 68, [0][1][2][1][RTW89_ETSI][1][79] = 127, [0][1][2][1][RTW89_ETSI][0][79] = 127, [0][1][2][1][RTW89_MKK][1][79] = 127, [0][1][2][1][RTW89_MKK][0][79] = 127, [0][1][2][1][RTW89_IC][1][79] = -2, - [0][1][2][1][RTW89_IC][2][79] = 68, [0][1][2][1][RTW89_KCC][1][79] = 12, [0][1][2][1][RTW89_KCC][0][79] = 127, [0][1][2][1][RTW89_ACMA][1][79] = 127, @@ -44102,13 +43016,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][79] = 127, [0][1][2][1][RTW89_THAILAND][0][79] = 127, [0][1][2][1][RTW89_FCC][1][81] = -2, - [0][1][2][1][RTW89_FCC][2][81] = 68, [0][1][2][1][RTW89_ETSI][1][81] = 127, [0][1][2][1][RTW89_ETSI][0][81] = 127, [0][1][2][1][RTW89_MKK][1][81] = 127, [0][1][2][1][RTW89_MKK][0][81] = 127, [0][1][2][1][RTW89_IC][1][81] = -2, - [0][1][2][1][RTW89_IC][2][81] = 68, [0][1][2][1][RTW89_KCC][1][81] = 12, [0][1][2][1][RTW89_KCC][0][81] = 127, [0][1][2][1][RTW89_ACMA][1][81] = 127, @@ -44121,13 +43033,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][81] = 127, [0][1][2][1][RTW89_THAILAND][0][81] = 127, [0][1][2][1][RTW89_FCC][1][83] = -2, - [0][1][2][1][RTW89_FCC][2][83] = 68, [0][1][2][1][RTW89_ETSI][1][83] = 127, [0][1][2][1][RTW89_ETSI][0][83] = 127, [0][1][2][1][RTW89_MKK][1][83] = 127, [0][1][2][1][RTW89_MKK][0][83] = 127, [0][1][2][1][RTW89_IC][1][83] = -2, - [0][1][2][1][RTW89_IC][2][83] = 68, [0][1][2][1][RTW89_KCC][1][83] = 20, [0][1][2][1][RTW89_KCC][0][83] = 127, [0][1][2][1][RTW89_ACMA][1][83] = 127, @@ -44140,13 +43050,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][83] = 127, [0][1][2][1][RTW89_THAILAND][0][83] = 127, [0][1][2][1][RTW89_FCC][1][85] = -2, - [0][1][2][1][RTW89_FCC][2][85] = 68, [0][1][2][1][RTW89_ETSI][1][85] = 127, [0][1][2][1][RTW89_ETSI][0][85] = 127, [0][1][2][1][RTW89_MKK][1][85] = 127, [0][1][2][1][RTW89_MKK][0][85] = 127, [0][1][2][1][RTW89_IC][1][85] = -2, - [0][1][2][1][RTW89_IC][2][85] = 68, [0][1][2][1][RTW89_KCC][1][85] = 20, [0][1][2][1][RTW89_KCC][0][85] = 127, [0][1][2][1][RTW89_ACMA][1][85] = 127, @@ -44159,13 +43067,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][85] = 127, [0][1][2][1][RTW89_THAILAND][0][85] = 127, [0][1][2][1][RTW89_FCC][1][87] = -2, - [0][1][2][1][RTW89_FCC][2][87] = 127, [0][1][2][1][RTW89_ETSI][1][87] = 127, [0][1][2][1][RTW89_ETSI][0][87] = 127, [0][1][2][1][RTW89_MKK][1][87] = 127, [0][1][2][1][RTW89_MKK][0][87] = 127, [0][1][2][1][RTW89_IC][1][87] = -2, - [0][1][2][1][RTW89_IC][2][87] = 127, [0][1][2][1][RTW89_KCC][1][87] = 20, [0][1][2][1][RTW89_KCC][0][87] = 127, [0][1][2][1][RTW89_ACMA][1][87] = 127, @@ -44178,13 +43084,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][87] = 127, [0][1][2][1][RTW89_THAILAND][0][87] = 127, [0][1][2][1][RTW89_FCC][1][89] = -2, - [0][1][2][1][RTW89_FCC][2][89] = 127, [0][1][2][1][RTW89_ETSI][1][89] = 127, [0][1][2][1][RTW89_ETSI][0][89] = 127, [0][1][2][1][RTW89_MKK][1][89] = 127, [0][1][2][1][RTW89_MKK][0][89] = 127, [0][1][2][1][RTW89_IC][1][89] = -2, - [0][1][2][1][RTW89_IC][2][89] = 127, [0][1][2][1][RTW89_KCC][1][89] = 20, [0][1][2][1][RTW89_KCC][0][89] = 127, [0][1][2][1][RTW89_ACMA][1][89] = 127, @@ -44197,13 +43101,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][89] = 127, [0][1][2][1][RTW89_THAILAND][0][89] = 127, [0][1][2][1][RTW89_FCC][1][90] = -2, - [0][1][2][1][RTW89_FCC][2][90] = 127, [0][1][2][1][RTW89_ETSI][1][90] = 127, [0][1][2][1][RTW89_ETSI][0][90] = 127, [0][1][2][1][RTW89_MKK][1][90] = 127, [0][1][2][1][RTW89_MKK][0][90] = 127, [0][1][2][1][RTW89_IC][1][90] = -2, - [0][1][2][1][RTW89_IC][2][90] = 127, [0][1][2][1][RTW89_KCC][1][90] = 20, [0][1][2][1][RTW89_KCC][0][90] = 127, [0][1][2][1][RTW89_ACMA][1][90] = 127, @@ -44216,13 +43118,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][90] = 127, [0][1][2][1][RTW89_THAILAND][0][90] = 127, [0][1][2][1][RTW89_FCC][1][92] = -2, - [0][1][2][1][RTW89_FCC][2][92] = 127, [0][1][2][1][RTW89_ETSI][1][92] = 127, [0][1][2][1][RTW89_ETSI][0][92] = 127, [0][1][2][1][RTW89_MKK][1][92] = 127, [0][1][2][1][RTW89_MKK][0][92] = 127, [0][1][2][1][RTW89_IC][1][92] = -2, - [0][1][2][1][RTW89_IC][2][92] = 127, [0][1][2][1][RTW89_KCC][1][92] = 20, [0][1][2][1][RTW89_KCC][0][92] = 127, [0][1][2][1][RTW89_ACMA][1][92] = 127, @@ -44235,13 +43135,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][92] = 127, [0][1][2][1][RTW89_THAILAND][0][92] = 127, [0][1][2][1][RTW89_FCC][1][94] = -2, - [0][1][2][1][RTW89_FCC][2][94] = 127, [0][1][2][1][RTW89_ETSI][1][94] = 127, [0][1][2][1][RTW89_ETSI][0][94] = 127, [0][1][2][1][RTW89_MKK][1][94] = 127, [0][1][2][1][RTW89_MKK][0][94] = 127, [0][1][2][1][RTW89_IC][1][94] = -2, - [0][1][2][1][RTW89_IC][2][94] = 127, [0][1][2][1][RTW89_KCC][1][94] = 20, [0][1][2][1][RTW89_KCC][0][94] = 127, [0][1][2][1][RTW89_ACMA][1][94] = 127, @@ -44254,13 +43152,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][94] = 127, [0][1][2][1][RTW89_THAILAND][0][94] = 127, [0][1][2][1][RTW89_FCC][1][96] = -2, - [0][1][2][1][RTW89_FCC][2][96] = 127, [0][1][2][1][RTW89_ETSI][1][96] = 127, [0][1][2][1][RTW89_ETSI][0][96] = 127, [0][1][2][1][RTW89_MKK][1][96] = 127, [0][1][2][1][RTW89_MKK][0][96] = 127, [0][1][2][1][RTW89_IC][1][96] = -2, - [0][1][2][1][RTW89_IC][2][96] = 127, [0][1][2][1][RTW89_KCC][1][96] = 20, [0][1][2][1][RTW89_KCC][0][96] = 127, [0][1][2][1][RTW89_ACMA][1][96] = 127, @@ -44273,13 +43169,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][96] = 127, [0][1][2][1][RTW89_THAILAND][0][96] = 127, [0][1][2][1][RTW89_FCC][1][98] = -2, - [0][1][2][1][RTW89_FCC][2][98] = 127, [0][1][2][1][RTW89_ETSI][1][98] = 127, [0][1][2][1][RTW89_ETSI][0][98] = 127, [0][1][2][1][RTW89_MKK][1][98] = 127, [0][1][2][1][RTW89_MKK][0][98] = 127, [0][1][2][1][RTW89_IC][1][98] = -2, - [0][1][2][1][RTW89_IC][2][98] = 127, [0][1][2][1][RTW89_KCC][1][98] = 20, [0][1][2][1][RTW89_KCC][0][98] = 127, [0][1][2][1][RTW89_ACMA][1][98] = 127, @@ -44292,13 +43186,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][98] = 127, [0][1][2][1][RTW89_THAILAND][0][98] = 127, [0][1][2][1][RTW89_FCC][1][100] = -2, - [0][1][2][1][RTW89_FCC][2][100] = 127, [0][1][2][1][RTW89_ETSI][1][100] = 127, [0][1][2][1][RTW89_ETSI][0][100] = 127, [0][1][2][1][RTW89_MKK][1][100] = 127, [0][1][2][1][RTW89_MKK][0][100] = 127, [0][1][2][1][RTW89_IC][1][100] = -2, - [0][1][2][1][RTW89_IC][2][100] = 127, [0][1][2][1][RTW89_KCC][1][100] = 20, [0][1][2][1][RTW89_KCC][0][100] = 127, [0][1][2][1][RTW89_ACMA][1][100] = 127, @@ -44311,13 +43203,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][100] = 127, [0][1][2][1][RTW89_THAILAND][0][100] = 127, [0][1][2][1][RTW89_FCC][1][102] = -2, - [0][1][2][1][RTW89_FCC][2][102] = 127, [0][1][2][1][RTW89_ETSI][1][102] = 127, [0][1][2][1][RTW89_ETSI][0][102] = 127, [0][1][2][1][RTW89_MKK][1][102] = 127, [0][1][2][1][RTW89_MKK][0][102] = 127, [0][1][2][1][RTW89_IC][1][102] = -2, - [0][1][2][1][RTW89_IC][2][102] = 127, [0][1][2][1][RTW89_KCC][1][102] = 20, [0][1][2][1][RTW89_KCC][0][102] = 127, [0][1][2][1][RTW89_ACMA][1][102] = 127, @@ -44330,13 +43220,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][102] = 127, [0][1][2][1][RTW89_THAILAND][0][102] = 127, [0][1][2][1][RTW89_FCC][1][104] = -2, - [0][1][2][1][RTW89_FCC][2][104] = 127, [0][1][2][1][RTW89_ETSI][1][104] = 127, [0][1][2][1][RTW89_ETSI][0][104] = 127, [0][1][2][1][RTW89_MKK][1][104] = 127, [0][1][2][1][RTW89_MKK][0][104] = 127, [0][1][2][1][RTW89_IC][1][104] = -2, - [0][1][2][1][RTW89_IC][2][104] = 127, [0][1][2][1][RTW89_KCC][1][104] = 20, [0][1][2][1][RTW89_KCC][0][104] = 127, [0][1][2][1][RTW89_ACMA][1][104] = 127, @@ -44349,13 +43237,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][104] = 127, [0][1][2][1][RTW89_THAILAND][0][104] = 127, [0][1][2][1][RTW89_FCC][1][105] = -2, - [0][1][2][1][RTW89_FCC][2][105] = 127, [0][1][2][1][RTW89_ETSI][1][105] = 127, [0][1][2][1][RTW89_ETSI][0][105] = 127, [0][1][2][1][RTW89_MKK][1][105] = 127, [0][1][2][1][RTW89_MKK][0][105] = 127, [0][1][2][1][RTW89_IC][1][105] = -2, - [0][1][2][1][RTW89_IC][2][105] = 127, [0][1][2][1][RTW89_KCC][1][105] = 20, [0][1][2][1][RTW89_KCC][0][105] = 127, [0][1][2][1][RTW89_ACMA][1][105] = 127, @@ -44368,13 +43254,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][105] = 127, [0][1][2][1][RTW89_THAILAND][0][105] = 127, [0][1][2][1][RTW89_FCC][1][107] = 1, - [0][1][2][1][RTW89_FCC][2][107] = 127, [0][1][2][1][RTW89_ETSI][1][107] = 127, [0][1][2][1][RTW89_ETSI][0][107] = 127, [0][1][2][1][RTW89_MKK][1][107] = 127, [0][1][2][1][RTW89_MKK][0][107] = 127, [0][1][2][1][RTW89_IC][1][107] = 1, - [0][1][2][1][RTW89_IC][2][107] = 127, [0][1][2][1][RTW89_KCC][1][107] = 20, [0][1][2][1][RTW89_KCC][0][107] = 127, [0][1][2][1][RTW89_ACMA][1][107] = 127, @@ -44387,13 +43271,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][107] = 127, [0][1][2][1][RTW89_THAILAND][0][107] = 127, [0][1][2][1][RTW89_FCC][1][109] = 1, - [0][1][2][1][RTW89_FCC][2][109] = 127, [0][1][2][1][RTW89_ETSI][1][109] = 127, [0][1][2][1][RTW89_ETSI][0][109] = 127, [0][1][2][1][RTW89_MKK][1][109] = 127, [0][1][2][1][RTW89_MKK][0][109] = 127, [0][1][2][1][RTW89_IC][1][109] = 1, - [0][1][2][1][RTW89_IC][2][109] = 127, [0][1][2][1][RTW89_KCC][1][109] = 20, [0][1][2][1][RTW89_KCC][0][109] = 127, [0][1][2][1][RTW89_ACMA][1][109] = 127, @@ -44406,13 +43288,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][109] = 127, [0][1][2][1][RTW89_THAILAND][0][109] = 127, [0][1][2][1][RTW89_FCC][1][111] = 127, - [0][1][2][1][RTW89_FCC][2][111] = 127, [0][1][2][1][RTW89_ETSI][1][111] = 127, [0][1][2][1][RTW89_ETSI][0][111] = 127, [0][1][2][1][RTW89_MKK][1][111] = 127, [0][1][2][1][RTW89_MKK][0][111] = 127, [0][1][2][1][RTW89_IC][1][111] = 127, - [0][1][2][1][RTW89_IC][2][111] = 127, [0][1][2][1][RTW89_KCC][1][111] = 127, [0][1][2][1][RTW89_KCC][0][111] = 127, [0][1][2][1][RTW89_ACMA][1][111] = 127, @@ -44425,13 +43305,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][111] = 127, [0][1][2][1][RTW89_THAILAND][0][111] = 127, [0][1][2][1][RTW89_FCC][1][113] = 127, - [0][1][2][1][RTW89_FCC][2][113] = 127, [0][1][2][1][RTW89_ETSI][1][113] = 127, [0][1][2][1][RTW89_ETSI][0][113] = 127, [0][1][2][1][RTW89_MKK][1][113] = 127, [0][1][2][1][RTW89_MKK][0][113] = 127, [0][1][2][1][RTW89_IC][1][113] = 127, - [0][1][2][1][RTW89_IC][2][113] = 127, [0][1][2][1][RTW89_KCC][1][113] = 127, [0][1][2][1][RTW89_KCC][0][113] = 127, [0][1][2][1][RTW89_ACMA][1][113] = 127, @@ -44444,13 +43322,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][113] = 127, [0][1][2][1][RTW89_THAILAND][0][113] = 127, [0][1][2][1][RTW89_FCC][1][115] = 127, - [0][1][2][1][RTW89_FCC][2][115] = 127, [0][1][2][1][RTW89_ETSI][1][115] = 127, [0][1][2][1][RTW89_ETSI][0][115] = 127, [0][1][2][1][RTW89_MKK][1][115] = 127, [0][1][2][1][RTW89_MKK][0][115] = 127, [0][1][2][1][RTW89_IC][1][115] = 127, - [0][1][2][1][RTW89_IC][2][115] = 127, [0][1][2][1][RTW89_KCC][1][115] = 127, [0][1][2][1][RTW89_KCC][0][115] = 127, [0][1][2][1][RTW89_ACMA][1][115] = 127, @@ -44463,13 +43339,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][115] = 127, [0][1][2][1][RTW89_THAILAND][0][115] = 127, [0][1][2][1][RTW89_FCC][1][117] = 127, - [0][1][2][1][RTW89_FCC][2][117] = 127, [0][1][2][1][RTW89_ETSI][1][117] = 127, [0][1][2][1][RTW89_ETSI][0][117] = 127, [0][1][2][1][RTW89_MKK][1][117] = 127, [0][1][2][1][RTW89_MKK][0][117] = 127, [0][1][2][1][RTW89_IC][1][117] = 127, - [0][1][2][1][RTW89_IC][2][117] = 127, [0][1][2][1][RTW89_KCC][1][117] = 127, [0][1][2][1][RTW89_KCC][0][117] = 127, [0][1][2][1][RTW89_ACMA][1][117] = 127, @@ -44482,13 +43356,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][117] = 127, [0][1][2][1][RTW89_THAILAND][0][117] = 127, [0][1][2][1][RTW89_FCC][1][119] = 127, - [0][1][2][1][RTW89_FCC][2][119] = 127, [0][1][2][1][RTW89_ETSI][1][119] = 127, [0][1][2][1][RTW89_ETSI][0][119] = 127, [0][1][2][1][RTW89_MKK][1][119] = 127, [0][1][2][1][RTW89_MKK][0][119] = 127, [0][1][2][1][RTW89_IC][1][119] = 127, - [0][1][2][1][RTW89_IC][2][119] = 127, [0][1][2][1][RTW89_KCC][1][119] = 127, [0][1][2][1][RTW89_KCC][0][119] = 127, [0][1][2][1][RTW89_ACMA][1][119] = 127, @@ -44501,13 +43373,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [0][1][2][1][RTW89_THAILAND][1][119] = 127, [0][1][2][1][RTW89_THAILAND][0][119] = 127, [1][0][2][0][RTW89_FCC][1][1] = 34, - [1][0][2][0][RTW89_FCC][2][1] = 70, [1][0][2][0][RTW89_ETSI][1][1] = 66, [1][0][2][0][RTW89_ETSI][0][1] = 30, [1][0][2][0][RTW89_MKK][1][1] = 62, [1][0][2][0][RTW89_MKK][0][1] = 26, [1][0][2][0][RTW89_IC][1][1] = 34, - [1][0][2][0][RTW89_IC][2][1] = 70, [1][0][2][0][RTW89_KCC][1][1] = 40, [1][0][2][0][RTW89_KCC][0][1] = 24, [1][0][2][0][RTW89_ACMA][1][1] = 66, @@ -44520,13 +43390,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][1] = 68, [1][0][2][0][RTW89_THAILAND][0][1] = 30, [1][0][2][0][RTW89_FCC][1][5] = 34, - [1][0][2][0][RTW89_FCC][2][5] = 70, [1][0][2][0][RTW89_ETSI][1][5] = 66, [1][0][2][0][RTW89_ETSI][0][5] = 30, [1][0][2][0][RTW89_MKK][1][5] = 62, [1][0][2][0][RTW89_MKK][0][5] = 26, [1][0][2][0][RTW89_IC][1][5] = 34, - [1][0][2][0][RTW89_IC][2][5] = 70, [1][0][2][0][RTW89_KCC][1][5] = 40, [1][0][2][0][RTW89_KCC][0][5] = 24, [1][0][2][0][RTW89_ACMA][1][5] = 66, @@ -44539,13 +43407,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][5] = 68, [1][0][2][0][RTW89_THAILAND][0][5] = 30, [1][0][2][0][RTW89_FCC][1][9] = 34, - [1][0][2][0][RTW89_FCC][2][9] = 70, [1][0][2][0][RTW89_ETSI][1][9] = 66, [1][0][2][0][RTW89_ETSI][0][9] = 30, [1][0][2][0][RTW89_MKK][1][9] = 62, [1][0][2][0][RTW89_MKK][0][9] = 26, [1][0][2][0][RTW89_IC][1][9] = 34, - [1][0][2][0][RTW89_IC][2][9] = 70, [1][0][2][0][RTW89_KCC][1][9] = 40, [1][0][2][0][RTW89_KCC][0][9] = 24, [1][0][2][0][RTW89_ACMA][1][9] = 66, @@ -44558,13 +43424,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][9] = 68, [1][0][2][0][RTW89_THAILAND][0][9] = 30, [1][0][2][0][RTW89_FCC][1][13] = 34, - [1][0][2][0][RTW89_FCC][2][13] = 70, [1][0][2][0][RTW89_ETSI][1][13] = 66, [1][0][2][0][RTW89_ETSI][0][13] = 30, [1][0][2][0][RTW89_MKK][1][13] = 62, [1][0][2][0][RTW89_MKK][0][13] = 26, [1][0][2][0][RTW89_IC][1][13] = 34, - [1][0][2][0][RTW89_IC][2][13] = 70, [1][0][2][0][RTW89_KCC][1][13] = 40, [1][0][2][0][RTW89_KCC][0][13] = 24, [1][0][2][0][RTW89_ACMA][1][13] = 66, @@ -44577,13 +43441,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][13] = 68, [1][0][2][0][RTW89_THAILAND][0][13] = 30, [1][0][2][0][RTW89_FCC][1][16] = 34, - [1][0][2][0][RTW89_FCC][2][16] = 70, [1][0][2][0][RTW89_ETSI][1][16] = 66, [1][0][2][0][RTW89_ETSI][0][16] = 30, [1][0][2][0][RTW89_MKK][1][16] = 62, [1][0][2][0][RTW89_MKK][0][16] = 26, [1][0][2][0][RTW89_IC][1][16] = 34, - [1][0][2][0][RTW89_IC][2][16] = 70, [1][0][2][0][RTW89_KCC][1][16] = 40, [1][0][2][0][RTW89_KCC][0][16] = 24, [1][0][2][0][RTW89_ACMA][1][16] = 66, @@ -44596,13 +43458,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][16] = 68, [1][0][2][0][RTW89_THAILAND][0][16] = 30, [1][0][2][0][RTW89_FCC][1][20] = 34, - [1][0][2][0][RTW89_FCC][2][20] = 70, [1][0][2][0][RTW89_ETSI][1][20] = 66, [1][0][2][0][RTW89_ETSI][0][20] = 30, [1][0][2][0][RTW89_MKK][1][20] = 62, [1][0][2][0][RTW89_MKK][0][20] = 26, [1][0][2][0][RTW89_IC][1][20] = 34, - [1][0][2][0][RTW89_IC][2][20] = 70, [1][0][2][0][RTW89_KCC][1][20] = 40, [1][0][2][0][RTW89_KCC][0][20] = 24, [1][0][2][0][RTW89_ACMA][1][20] = 66, @@ -44615,13 +43475,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][20] = 68, [1][0][2][0][RTW89_THAILAND][0][20] = 30, [1][0][2][0][RTW89_FCC][1][24] = 36, - [1][0][2][0][RTW89_FCC][2][24] = 70, [1][0][2][0][RTW89_ETSI][1][24] = 66, [1][0][2][0][RTW89_ETSI][0][24] = 30, [1][0][2][0][RTW89_MKK][1][24] = 64, [1][0][2][0][RTW89_MKK][0][24] = 28, [1][0][2][0][RTW89_IC][1][24] = 36, - [1][0][2][0][RTW89_IC][2][24] = 70, [1][0][2][0][RTW89_KCC][1][24] = 40, [1][0][2][0][RTW89_KCC][0][24] = 26, [1][0][2][0][RTW89_ACMA][1][24] = 66, @@ -44634,13 +43492,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][24] = 68, [1][0][2][0][RTW89_THAILAND][0][24] = 30, [1][0][2][0][RTW89_FCC][1][28] = 34, - [1][0][2][0][RTW89_FCC][2][28] = 70, [1][0][2][0][RTW89_ETSI][1][28] = 66, [1][0][2][0][RTW89_ETSI][0][28] = 30, [1][0][2][0][RTW89_MKK][1][28] = 64, [1][0][2][0][RTW89_MKK][0][28] = 26, [1][0][2][0][RTW89_IC][1][28] = 34, - [1][0][2][0][RTW89_IC][2][28] = 70, [1][0][2][0][RTW89_KCC][1][28] = 40, [1][0][2][0][RTW89_KCC][0][28] = 26, [1][0][2][0][RTW89_ACMA][1][28] = 66, @@ -44653,13 +43509,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][28] = 68, [1][0][2][0][RTW89_THAILAND][0][28] = 30, [1][0][2][0][RTW89_FCC][1][31] = 34, - [1][0][2][0][RTW89_FCC][2][31] = 70, [1][0][2][0][RTW89_ETSI][1][31] = 66, [1][0][2][0][RTW89_ETSI][0][31] = 30, [1][0][2][0][RTW89_MKK][1][31] = 64, [1][0][2][0][RTW89_MKK][0][31] = 26, [1][0][2][0][RTW89_IC][1][31] = 34, - [1][0][2][0][RTW89_IC][2][31] = 70, [1][0][2][0][RTW89_KCC][1][31] = 40, [1][0][2][0][RTW89_KCC][0][31] = 26, [1][0][2][0][RTW89_ACMA][1][31] = 66, @@ -44672,13 +43526,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][31] = 68, [1][0][2][0][RTW89_THAILAND][0][31] = 30, [1][0][2][0][RTW89_FCC][1][35] = 34, - [1][0][2][0][RTW89_FCC][2][35] = 70, [1][0][2][0][RTW89_ETSI][1][35] = 66, [1][0][2][0][RTW89_ETSI][0][35] = 30, [1][0][2][0][RTW89_MKK][1][35] = 64, [1][0][2][0][RTW89_MKK][0][35] = 26, [1][0][2][0][RTW89_IC][1][35] = 34, - [1][0][2][0][RTW89_IC][2][35] = 70, [1][0][2][0][RTW89_KCC][1][35] = 40, [1][0][2][0][RTW89_KCC][0][35] = 26, [1][0][2][0][RTW89_ACMA][1][35] = 66, @@ -44691,13 +43543,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][35] = 68, [1][0][2][0][RTW89_THAILAND][0][35] = 30, [1][0][2][0][RTW89_FCC][1][39] = 34, - [1][0][2][0][RTW89_FCC][2][39] = 70, [1][0][2][0][RTW89_ETSI][1][39] = 66, [1][0][2][0][RTW89_ETSI][0][39] = 30, [1][0][2][0][RTW89_MKK][1][39] = 64, [1][0][2][0][RTW89_MKK][0][39] = 26, [1][0][2][0][RTW89_IC][1][39] = 34, - [1][0][2][0][RTW89_IC][2][39] = 70, [1][0][2][0][RTW89_KCC][1][39] = 40, [1][0][2][0][RTW89_KCC][0][39] = 26, [1][0][2][0][RTW89_ACMA][1][39] = 66, @@ -44710,13 +43560,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][39] = 68, [1][0][2][0][RTW89_THAILAND][0][39] = 30, [1][0][2][0][RTW89_FCC][1][43] = 34, - [1][0][2][0][RTW89_FCC][2][43] = 70, [1][0][2][0][RTW89_ETSI][1][43] = 66, [1][0][2][0][RTW89_ETSI][0][43] = 30, [1][0][2][0][RTW89_MKK][1][43] = 64, [1][0][2][0][RTW89_MKK][0][43] = 26, [1][0][2][0][RTW89_IC][1][43] = 34, - [1][0][2][0][RTW89_IC][2][43] = 70, [1][0][2][0][RTW89_KCC][1][43] = 40, [1][0][2][0][RTW89_KCC][0][43] = 26, [1][0][2][0][RTW89_ACMA][1][43] = 66, @@ -44729,13 +43577,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][43] = 68, [1][0][2][0][RTW89_THAILAND][0][43] = 30, [1][0][2][0][RTW89_FCC][1][46] = 34, - [1][0][2][0][RTW89_FCC][2][46] = 127, [1][0][2][0][RTW89_ETSI][1][46] = 127, [1][0][2][0][RTW89_ETSI][0][46] = 127, [1][0][2][0][RTW89_MKK][1][46] = 127, [1][0][2][0][RTW89_MKK][0][46] = 127, [1][0][2][0][RTW89_IC][1][46] = 34, - [1][0][2][0][RTW89_IC][2][46] = 68, [1][0][2][0][RTW89_KCC][1][46] = 40, [1][0][2][0][RTW89_KCC][0][46] = 127, [1][0][2][0][RTW89_ACMA][1][46] = 127, @@ -44748,13 +43594,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][46] = 127, [1][0][2][0][RTW89_THAILAND][0][46] = 127, [1][0][2][0][RTW89_FCC][1][50] = 34, - [1][0][2][0][RTW89_FCC][2][50] = 127, [1][0][2][0][RTW89_ETSI][1][50] = 127, [1][0][2][0][RTW89_ETSI][0][50] = 127, [1][0][2][0][RTW89_MKK][1][50] = 127, [1][0][2][0][RTW89_MKK][0][50] = 127, [1][0][2][0][RTW89_IC][1][50] = 34, - [1][0][2][0][RTW89_IC][2][50] = 68, [1][0][2][0][RTW89_KCC][1][50] = 40, [1][0][2][0][RTW89_KCC][0][50] = 127, [1][0][2][0][RTW89_ACMA][1][50] = 127, @@ -44767,13 +43611,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][50] = 127, [1][0][2][0][RTW89_THAILAND][0][50] = 127, [1][0][2][0][RTW89_FCC][1][54] = 36, - [1][0][2][0][RTW89_FCC][2][54] = 127, [1][0][2][0][RTW89_ETSI][1][54] = 127, [1][0][2][0][RTW89_ETSI][0][54] = 127, [1][0][2][0][RTW89_MKK][1][54] = 127, [1][0][2][0][RTW89_MKK][0][54] = 127, [1][0][2][0][RTW89_IC][1][54] = 36, - [1][0][2][0][RTW89_IC][2][54] = 127, [1][0][2][0][RTW89_KCC][1][54] = 40, [1][0][2][0][RTW89_KCC][0][54] = 127, [1][0][2][0][RTW89_ACMA][1][54] = 127, @@ -44786,13 +43628,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][54] = 127, [1][0][2][0][RTW89_THAILAND][0][54] = 127, [1][0][2][0][RTW89_FCC][1][58] = 36, - [1][0][2][0][RTW89_FCC][2][58] = 66, [1][0][2][0][RTW89_ETSI][1][58] = 127, [1][0][2][0][RTW89_ETSI][0][58] = 127, [1][0][2][0][RTW89_MKK][1][58] = 127, [1][0][2][0][RTW89_MKK][0][58] = 127, [1][0][2][0][RTW89_IC][1][58] = 36, - [1][0][2][0][RTW89_IC][2][58] = 66, [1][0][2][0][RTW89_KCC][1][58] = 40, [1][0][2][0][RTW89_KCC][0][58] = 127, [1][0][2][0][RTW89_ACMA][1][58] = 127, @@ -44805,13 +43645,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][58] = 127, [1][0][2][0][RTW89_THAILAND][0][58] = 127, [1][0][2][0][RTW89_FCC][1][61] = 34, - [1][0][2][0][RTW89_FCC][2][61] = 66, [1][0][2][0][RTW89_ETSI][1][61] = 127, [1][0][2][0][RTW89_ETSI][0][61] = 127, [1][0][2][0][RTW89_MKK][1][61] = 127, [1][0][2][0][RTW89_MKK][0][61] = 127, [1][0][2][0][RTW89_IC][1][61] = 34, - [1][0][2][0][RTW89_IC][2][61] = 66, [1][0][2][0][RTW89_KCC][1][61] = 40, [1][0][2][0][RTW89_KCC][0][61] = 127, [1][0][2][0][RTW89_ACMA][1][61] = 127, @@ -44824,13 +43662,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][61] = 127, [1][0][2][0][RTW89_THAILAND][0][61] = 127, [1][0][2][0][RTW89_FCC][1][65] = 34, - [1][0][2][0][RTW89_FCC][2][65] = 66, [1][0][2][0][RTW89_ETSI][1][65] = 127, [1][0][2][0][RTW89_ETSI][0][65] = 127, [1][0][2][0][RTW89_MKK][1][65] = 127, [1][0][2][0][RTW89_MKK][0][65] = 127, [1][0][2][0][RTW89_IC][1][65] = 34, - [1][0][2][0][RTW89_IC][2][65] = 66, [1][0][2][0][RTW89_KCC][1][65] = 40, [1][0][2][0][RTW89_KCC][0][65] = 127, [1][0][2][0][RTW89_ACMA][1][65] = 127, @@ -44843,13 +43679,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][65] = 127, [1][0][2][0][RTW89_THAILAND][0][65] = 127, [1][0][2][0][RTW89_FCC][1][69] = 34, - [1][0][2][0][RTW89_FCC][2][69] = 66, [1][0][2][0][RTW89_ETSI][1][69] = 127, [1][0][2][0][RTW89_ETSI][0][69] = 127, [1][0][2][0][RTW89_MKK][1][69] = 127, [1][0][2][0][RTW89_MKK][0][69] = 127, [1][0][2][0][RTW89_IC][1][69] = 34, - [1][0][2][0][RTW89_IC][2][69] = 66, [1][0][2][0][RTW89_KCC][1][69] = 40, [1][0][2][0][RTW89_KCC][0][69] = 127, [1][0][2][0][RTW89_ACMA][1][69] = 127, @@ -44862,13 +43696,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][69] = 127, [1][0][2][0][RTW89_THAILAND][0][69] = 127, [1][0][2][0][RTW89_FCC][1][73] = 34, - [1][0][2][0][RTW89_FCC][2][73] = 66, [1][0][2][0][RTW89_ETSI][1][73] = 127, [1][0][2][0][RTW89_ETSI][0][73] = 127, [1][0][2][0][RTW89_MKK][1][73] = 127, [1][0][2][0][RTW89_MKK][0][73] = 127, [1][0][2][0][RTW89_IC][1][73] = 34, - [1][0][2][0][RTW89_IC][2][73] = 66, [1][0][2][0][RTW89_KCC][1][73] = 40, [1][0][2][0][RTW89_KCC][0][73] = 127, [1][0][2][0][RTW89_ACMA][1][73] = 127, @@ -44881,13 +43713,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][73] = 127, [1][0][2][0][RTW89_THAILAND][0][73] = 127, [1][0][2][0][RTW89_FCC][1][76] = 34, - [1][0][2][0][RTW89_FCC][2][76] = 66, [1][0][2][0][RTW89_ETSI][1][76] = 127, [1][0][2][0][RTW89_ETSI][0][76] = 127, [1][0][2][0][RTW89_MKK][1][76] = 127, [1][0][2][0][RTW89_MKK][0][76] = 127, [1][0][2][0][RTW89_IC][1][76] = 34, - [1][0][2][0][RTW89_IC][2][76] = 66, [1][0][2][0][RTW89_KCC][1][76] = 40, [1][0][2][0][RTW89_KCC][0][76] = 127, [1][0][2][0][RTW89_ACMA][1][76] = 127, @@ -44900,13 +43730,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][76] = 127, [1][0][2][0][RTW89_THAILAND][0][76] = 127, [1][0][2][0][RTW89_FCC][1][80] = 34, - [1][0][2][0][RTW89_FCC][2][80] = 66, [1][0][2][0][RTW89_ETSI][1][80] = 127, [1][0][2][0][RTW89_ETSI][0][80] = 127, [1][0][2][0][RTW89_MKK][1][80] = 127, [1][0][2][0][RTW89_MKK][0][80] = 127, [1][0][2][0][RTW89_IC][1][80] = 34, - [1][0][2][0][RTW89_IC][2][80] = 66, [1][0][2][0][RTW89_KCC][1][80] = 42, [1][0][2][0][RTW89_KCC][0][80] = 127, [1][0][2][0][RTW89_ACMA][1][80] = 127, @@ -44919,13 +43747,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][80] = 127, [1][0][2][0][RTW89_THAILAND][0][80] = 127, [1][0][2][0][RTW89_FCC][1][84] = 34, - [1][0][2][0][RTW89_FCC][2][84] = 66, [1][0][2][0][RTW89_ETSI][1][84] = 127, [1][0][2][0][RTW89_ETSI][0][84] = 127, [1][0][2][0][RTW89_MKK][1][84] = 127, [1][0][2][0][RTW89_MKK][0][84] = 127, [1][0][2][0][RTW89_IC][1][84] = 34, - [1][0][2][0][RTW89_IC][2][84] = 66, [1][0][2][0][RTW89_KCC][1][84] = 42, [1][0][2][0][RTW89_KCC][0][84] = 127, [1][0][2][0][RTW89_ACMA][1][84] = 127, @@ -44938,13 +43764,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][84] = 127, [1][0][2][0][RTW89_THAILAND][0][84] = 127, [1][0][2][0][RTW89_FCC][1][88] = 34, - [1][0][2][0][RTW89_FCC][2][88] = 127, [1][0][2][0][RTW89_ETSI][1][88] = 127, [1][0][2][0][RTW89_ETSI][0][88] = 127, [1][0][2][0][RTW89_MKK][1][88] = 127, [1][0][2][0][RTW89_MKK][0][88] = 127, [1][0][2][0][RTW89_IC][1][88] = 34, - [1][0][2][0][RTW89_IC][2][88] = 127, [1][0][2][0][RTW89_KCC][1][88] = 42, [1][0][2][0][RTW89_KCC][0][88] = 127, [1][0][2][0][RTW89_ACMA][1][88] = 127, @@ -44957,13 +43781,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][88] = 127, [1][0][2][0][RTW89_THAILAND][0][88] = 127, [1][0][2][0][RTW89_FCC][1][91] = 36, - [1][0][2][0][RTW89_FCC][2][91] = 127, [1][0][2][0][RTW89_ETSI][1][91] = 127, [1][0][2][0][RTW89_ETSI][0][91] = 127, [1][0][2][0][RTW89_MKK][1][91] = 127, [1][0][2][0][RTW89_MKK][0][91] = 127, [1][0][2][0][RTW89_IC][1][91] = 36, - [1][0][2][0][RTW89_IC][2][91] = 127, [1][0][2][0][RTW89_KCC][1][91] = 42, [1][0][2][0][RTW89_KCC][0][91] = 127, [1][0][2][0][RTW89_ACMA][1][91] = 127, @@ -44976,13 +43798,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][91] = 127, [1][0][2][0][RTW89_THAILAND][0][91] = 127, [1][0][2][0][RTW89_FCC][1][95] = 34, - [1][0][2][0][RTW89_FCC][2][95] = 127, [1][0][2][0][RTW89_ETSI][1][95] = 127, [1][0][2][0][RTW89_ETSI][0][95] = 127, [1][0][2][0][RTW89_MKK][1][95] = 127, [1][0][2][0][RTW89_MKK][0][95] = 127, [1][0][2][0][RTW89_IC][1][95] = 34, - [1][0][2][0][RTW89_IC][2][95] = 127, [1][0][2][0][RTW89_KCC][1][95] = 42, [1][0][2][0][RTW89_KCC][0][95] = 127, [1][0][2][0][RTW89_ACMA][1][95] = 127, @@ -44995,13 +43815,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][95] = 127, [1][0][2][0][RTW89_THAILAND][0][95] = 127, [1][0][2][0][RTW89_FCC][1][99] = 34, - [1][0][2][0][RTW89_FCC][2][99] = 127, [1][0][2][0][RTW89_ETSI][1][99] = 127, [1][0][2][0][RTW89_ETSI][0][99] = 127, [1][0][2][0][RTW89_MKK][1][99] = 127, [1][0][2][0][RTW89_MKK][0][99] = 127, [1][0][2][0][RTW89_IC][1][99] = 34, - [1][0][2][0][RTW89_IC][2][99] = 127, [1][0][2][0][RTW89_KCC][1][99] = 42, [1][0][2][0][RTW89_KCC][0][99] = 127, [1][0][2][0][RTW89_ACMA][1][99] = 127, @@ -45014,13 +43832,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][99] = 127, [1][0][2][0][RTW89_THAILAND][0][99] = 127, [1][0][2][0][RTW89_FCC][1][103] = 34, - [1][0][2][0][RTW89_FCC][2][103] = 127, [1][0][2][0][RTW89_ETSI][1][103] = 127, [1][0][2][0][RTW89_ETSI][0][103] = 127, [1][0][2][0][RTW89_MKK][1][103] = 127, [1][0][2][0][RTW89_MKK][0][103] = 127, [1][0][2][0][RTW89_IC][1][103] = 34, - [1][0][2][0][RTW89_IC][2][103] = 127, [1][0][2][0][RTW89_KCC][1][103] = 42, [1][0][2][0][RTW89_KCC][0][103] = 127, [1][0][2][0][RTW89_ACMA][1][103] = 127, @@ -45033,13 +43849,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][103] = 127, [1][0][2][0][RTW89_THAILAND][0][103] = 127, [1][0][2][0][RTW89_FCC][1][106] = 36, - [1][0][2][0][RTW89_FCC][2][106] = 127, [1][0][2][0][RTW89_ETSI][1][106] = 127, [1][0][2][0][RTW89_ETSI][0][106] = 127, [1][0][2][0][RTW89_MKK][1][106] = 127, [1][0][2][0][RTW89_MKK][0][106] = 127, [1][0][2][0][RTW89_IC][1][106] = 36, - [1][0][2][0][RTW89_IC][2][106] = 127, [1][0][2][0][RTW89_KCC][1][106] = 42, [1][0][2][0][RTW89_KCC][0][106] = 127, [1][0][2][0][RTW89_ACMA][1][106] = 127, @@ -45052,13 +43866,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][106] = 127, [1][0][2][0][RTW89_THAILAND][0][106] = 127, [1][0][2][0][RTW89_FCC][1][110] = 127, - [1][0][2][0][RTW89_FCC][2][110] = 127, [1][0][2][0][RTW89_ETSI][1][110] = 127, [1][0][2][0][RTW89_ETSI][0][110] = 127, [1][0][2][0][RTW89_MKK][1][110] = 127, [1][0][2][0][RTW89_MKK][0][110] = 127, [1][0][2][0][RTW89_IC][1][110] = 127, - [1][0][2][0][RTW89_IC][2][110] = 127, [1][0][2][0][RTW89_KCC][1][110] = 127, [1][0][2][0][RTW89_KCC][0][110] = 127, [1][0][2][0][RTW89_ACMA][1][110] = 127, @@ -45071,13 +43883,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][110] = 127, [1][0][2][0][RTW89_THAILAND][0][110] = 127, [1][0][2][0][RTW89_FCC][1][114] = 127, - [1][0][2][0][RTW89_FCC][2][114] = 127, [1][0][2][0][RTW89_ETSI][1][114] = 127, [1][0][2][0][RTW89_ETSI][0][114] = 127, [1][0][2][0][RTW89_MKK][1][114] = 127, [1][0][2][0][RTW89_MKK][0][114] = 127, [1][0][2][0][RTW89_IC][1][114] = 127, - [1][0][2][0][RTW89_IC][2][114] = 127, [1][0][2][0][RTW89_KCC][1][114] = 127, [1][0][2][0][RTW89_KCC][0][114] = 127, [1][0][2][0][RTW89_ACMA][1][114] = 127, @@ -45090,13 +43900,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][114] = 127, [1][0][2][0][RTW89_THAILAND][0][114] = 127, [1][0][2][0][RTW89_FCC][1][118] = 127, - [1][0][2][0][RTW89_FCC][2][118] = 127, [1][0][2][0][RTW89_ETSI][1][118] = 127, [1][0][2][0][RTW89_ETSI][0][118] = 127, [1][0][2][0][RTW89_MKK][1][118] = 127, [1][0][2][0][RTW89_MKK][0][118] = 127, [1][0][2][0][RTW89_IC][1][118] = 127, - [1][0][2][0][RTW89_IC][2][118] = 127, [1][0][2][0][RTW89_KCC][1][118] = 127, [1][0][2][0][RTW89_KCC][0][118] = 127, [1][0][2][0][RTW89_ACMA][1][118] = 127, @@ -45109,13 +43917,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][0][2][0][RTW89_THAILAND][1][118] = 127, [1][0][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][0][RTW89_FCC][1][1] = 10, - [1][1][2][0][RTW89_FCC][2][1] = 58, [1][1][2][0][RTW89_ETSI][1][1] = 54, [1][1][2][0][RTW89_ETSI][0][1] = 18, [1][1][2][0][RTW89_MKK][1][1] = 52, [1][1][2][0][RTW89_MKK][0][1] = 12, [1][1][2][0][RTW89_IC][1][1] = 10, - [1][1][2][0][RTW89_IC][2][1] = 58, [1][1][2][0][RTW89_KCC][1][1] = 28, [1][1][2][0][RTW89_KCC][0][1] = 12, [1][1][2][0][RTW89_ACMA][1][1] = 54, @@ -45128,13 +43934,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][1] = 46, [1][1][2][0][RTW89_THAILAND][0][1] = 10, [1][1][2][0][RTW89_FCC][1][5] = 10, - [1][1][2][0][RTW89_FCC][2][5] = 58, [1][1][2][0][RTW89_ETSI][1][5] = 54, [1][1][2][0][RTW89_ETSI][0][5] = 16, [1][1][2][0][RTW89_MKK][1][5] = 52, [1][1][2][0][RTW89_MKK][0][5] = 12, [1][1][2][0][RTW89_IC][1][5] = 10, - [1][1][2][0][RTW89_IC][2][5] = 58, [1][1][2][0][RTW89_KCC][1][5] = 28, [1][1][2][0][RTW89_KCC][0][5] = 12, [1][1][2][0][RTW89_ACMA][1][5] = 54, @@ -45147,13 +43951,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][5] = 46, [1][1][2][0][RTW89_THAILAND][0][5] = 10, [1][1][2][0][RTW89_FCC][1][9] = 10, - [1][1][2][0][RTW89_FCC][2][9] = 58, [1][1][2][0][RTW89_ETSI][1][9] = 54, [1][1][2][0][RTW89_ETSI][0][9] = 16, [1][1][2][0][RTW89_MKK][1][9] = 52, [1][1][2][0][RTW89_MKK][0][9] = 12, [1][1][2][0][RTW89_IC][1][9] = 10, - [1][1][2][0][RTW89_IC][2][9] = 58, [1][1][2][0][RTW89_KCC][1][9] = 28, [1][1][2][0][RTW89_KCC][0][9] = 12, [1][1][2][0][RTW89_ACMA][1][9] = 54, @@ -45166,13 +43968,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][9] = 46, [1][1][2][0][RTW89_THAILAND][0][9] = 10, [1][1][2][0][RTW89_FCC][1][13] = 10, - [1][1][2][0][RTW89_FCC][2][13] = 58, [1][1][2][0][RTW89_ETSI][1][13] = 54, [1][1][2][0][RTW89_ETSI][0][13] = 16, [1][1][2][0][RTW89_MKK][1][13] = 52, [1][1][2][0][RTW89_MKK][0][13] = 12, [1][1][2][0][RTW89_IC][1][13] = 10, - [1][1][2][0][RTW89_IC][2][13] = 58, [1][1][2][0][RTW89_KCC][1][13] = 28, [1][1][2][0][RTW89_KCC][0][13] = 12, [1][1][2][0][RTW89_ACMA][1][13] = 54, @@ -45185,13 +43985,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][13] = 46, [1][1][2][0][RTW89_THAILAND][0][13] = 10, [1][1][2][0][RTW89_FCC][1][16] = 10, - [1][1][2][0][RTW89_FCC][2][16] = 58, [1][1][2][0][RTW89_ETSI][1][16] = 54, [1][1][2][0][RTW89_ETSI][0][16] = 16, [1][1][2][0][RTW89_MKK][1][16] = 52, [1][1][2][0][RTW89_MKK][0][16] = 12, [1][1][2][0][RTW89_IC][1][16] = 10, - [1][1][2][0][RTW89_IC][2][16] = 58, [1][1][2][0][RTW89_KCC][1][16] = 28, [1][1][2][0][RTW89_KCC][0][16] = 12, [1][1][2][0][RTW89_ACMA][1][16] = 54, @@ -45204,13 +44002,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][16] = 46, [1][1][2][0][RTW89_THAILAND][0][16] = 10, [1][1][2][0][RTW89_FCC][1][20] = 10, - [1][1][2][0][RTW89_FCC][2][20] = 58, [1][1][2][0][RTW89_ETSI][1][20] = 54, [1][1][2][0][RTW89_ETSI][0][20] = 16, [1][1][2][0][RTW89_MKK][1][20] = 52, [1][1][2][0][RTW89_MKK][0][20] = 12, [1][1][2][0][RTW89_IC][1][20] = 10, - [1][1][2][0][RTW89_IC][2][20] = 58, [1][1][2][0][RTW89_KCC][1][20] = 28, [1][1][2][0][RTW89_KCC][0][20] = 12, [1][1][2][0][RTW89_ACMA][1][20] = 54, @@ -45223,13 +44019,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][20] = 46, [1][1][2][0][RTW89_THAILAND][0][20] = 10, [1][1][2][0][RTW89_FCC][1][24] = 10, - [1][1][2][0][RTW89_FCC][2][24] = 70, [1][1][2][0][RTW89_ETSI][1][24] = 54, [1][1][2][0][RTW89_ETSI][0][24] = 16, [1][1][2][0][RTW89_MKK][1][24] = 54, [1][1][2][0][RTW89_MKK][0][24] = 14, [1][1][2][0][RTW89_IC][1][24] = 10, - [1][1][2][0][RTW89_IC][2][24] = 70, [1][1][2][0][RTW89_KCC][1][24] = 28, [1][1][2][0][RTW89_KCC][0][24] = 12, [1][1][2][0][RTW89_ACMA][1][24] = 54, @@ -45242,13 +44036,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][24] = 46, [1][1][2][0][RTW89_THAILAND][0][24] = 10, [1][1][2][0][RTW89_FCC][1][28] = 10, - [1][1][2][0][RTW89_FCC][2][28] = 70, [1][1][2][0][RTW89_ETSI][1][28] = 54, [1][1][2][0][RTW89_ETSI][0][28] = 16, [1][1][2][0][RTW89_MKK][1][28] = 52, [1][1][2][0][RTW89_MKK][0][28] = 14, [1][1][2][0][RTW89_IC][1][28] = 10, - [1][1][2][0][RTW89_IC][2][28] = 70, [1][1][2][0][RTW89_KCC][1][28] = 28, [1][1][2][0][RTW89_KCC][0][28] = 14, [1][1][2][0][RTW89_ACMA][1][28] = 54, @@ -45261,13 +44053,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][28] = 46, [1][1][2][0][RTW89_THAILAND][0][28] = 10, [1][1][2][0][RTW89_FCC][1][31] = 10, - [1][1][2][0][RTW89_FCC][2][31] = 70, [1][1][2][0][RTW89_ETSI][1][31] = 54, [1][1][2][0][RTW89_ETSI][0][31] = 16, [1][1][2][0][RTW89_MKK][1][31] = 52, [1][1][2][0][RTW89_MKK][0][31] = 14, [1][1][2][0][RTW89_IC][1][31] = 10, - [1][1][2][0][RTW89_IC][2][31] = 70, [1][1][2][0][RTW89_KCC][1][31] = 28, [1][1][2][0][RTW89_KCC][0][31] = 14, [1][1][2][0][RTW89_ACMA][1][31] = 54, @@ -45280,13 +44070,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][31] = 46, [1][1][2][0][RTW89_THAILAND][0][31] = 10, [1][1][2][0][RTW89_FCC][1][35] = 10, - [1][1][2][0][RTW89_FCC][2][35] = 70, [1][1][2][0][RTW89_ETSI][1][35] = 54, [1][1][2][0][RTW89_ETSI][0][35] = 16, [1][1][2][0][RTW89_MKK][1][35] = 52, [1][1][2][0][RTW89_MKK][0][35] = 14, [1][1][2][0][RTW89_IC][1][35] = 10, - [1][1][2][0][RTW89_IC][2][35] = 70, [1][1][2][0][RTW89_KCC][1][35] = 28, [1][1][2][0][RTW89_KCC][0][35] = 14, [1][1][2][0][RTW89_ACMA][1][35] = 54, @@ -45299,13 +44087,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][35] = 46, [1][1][2][0][RTW89_THAILAND][0][35] = 10, [1][1][2][0][RTW89_FCC][1][39] = 10, - [1][1][2][0][RTW89_FCC][2][39] = 70, [1][1][2][0][RTW89_ETSI][1][39] = 54, [1][1][2][0][RTW89_ETSI][0][39] = 16, [1][1][2][0][RTW89_MKK][1][39] = 52, [1][1][2][0][RTW89_MKK][0][39] = 14, [1][1][2][0][RTW89_IC][1][39] = 10, - [1][1][2][0][RTW89_IC][2][39] = 70, [1][1][2][0][RTW89_KCC][1][39] = 28, [1][1][2][0][RTW89_KCC][0][39] = 14, [1][1][2][0][RTW89_ACMA][1][39] = 54, @@ -45318,13 +44104,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][39] = 46, [1][1][2][0][RTW89_THAILAND][0][39] = 10, [1][1][2][0][RTW89_FCC][1][43] = 10, - [1][1][2][0][RTW89_FCC][2][43] = 70, [1][1][2][0][RTW89_ETSI][1][43] = 54, [1][1][2][0][RTW89_ETSI][0][43] = 16, [1][1][2][0][RTW89_MKK][1][43] = 52, [1][1][2][0][RTW89_MKK][0][43] = 14, [1][1][2][0][RTW89_IC][1][43] = 10, - [1][1][2][0][RTW89_IC][2][43] = 70, [1][1][2][0][RTW89_KCC][1][43] = 28, [1][1][2][0][RTW89_KCC][0][43] = 14, [1][1][2][0][RTW89_ACMA][1][43] = 54, @@ -45337,13 +44121,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][43] = 46, [1][1][2][0][RTW89_THAILAND][0][43] = 10, [1][1][2][0][RTW89_FCC][1][46] = 12, - [1][1][2][0][RTW89_FCC][2][46] = 127, [1][1][2][0][RTW89_ETSI][1][46] = 127, [1][1][2][0][RTW89_ETSI][0][46] = 127, [1][1][2][0][RTW89_MKK][1][46] = 127, [1][1][2][0][RTW89_MKK][0][46] = 127, [1][1][2][0][RTW89_IC][1][46] = 12, - [1][1][2][0][RTW89_IC][2][46] = 68, [1][1][2][0][RTW89_KCC][1][46] = 28, [1][1][2][0][RTW89_KCC][0][46] = 127, [1][1][2][0][RTW89_ACMA][1][46] = 127, @@ -45356,13 +44138,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][46] = 127, [1][1][2][0][RTW89_THAILAND][0][46] = 127, [1][1][2][0][RTW89_FCC][1][50] = 12, - [1][1][2][0][RTW89_FCC][2][50] = 127, [1][1][2][0][RTW89_ETSI][1][50] = 127, [1][1][2][0][RTW89_ETSI][0][50] = 127, [1][1][2][0][RTW89_MKK][1][50] = 127, [1][1][2][0][RTW89_MKK][0][50] = 127, [1][1][2][0][RTW89_IC][1][50] = 12, - [1][1][2][0][RTW89_IC][2][50] = 68, [1][1][2][0][RTW89_KCC][1][50] = 28, [1][1][2][0][RTW89_KCC][0][50] = 127, [1][1][2][0][RTW89_ACMA][1][50] = 127, @@ -45375,13 +44155,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][50] = 127, [1][1][2][0][RTW89_THAILAND][0][50] = 127, [1][1][2][0][RTW89_FCC][1][54] = 10, - [1][1][2][0][RTW89_FCC][2][54] = 127, [1][1][2][0][RTW89_ETSI][1][54] = 127, [1][1][2][0][RTW89_ETSI][0][54] = 127, [1][1][2][0][RTW89_MKK][1][54] = 127, [1][1][2][0][RTW89_MKK][0][54] = 127, [1][1][2][0][RTW89_IC][1][54] = 10, - [1][1][2][0][RTW89_IC][2][54] = 127, [1][1][2][0][RTW89_KCC][1][54] = 28, [1][1][2][0][RTW89_KCC][0][54] = 127, [1][1][2][0][RTW89_ACMA][1][54] = 127, @@ -45394,13 +44172,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][54] = 127, [1][1][2][0][RTW89_THAILAND][0][54] = 127, [1][1][2][0][RTW89_FCC][1][58] = 10, - [1][1][2][0][RTW89_FCC][2][58] = 66, [1][1][2][0][RTW89_ETSI][1][58] = 127, [1][1][2][0][RTW89_ETSI][0][58] = 127, [1][1][2][0][RTW89_MKK][1][58] = 127, [1][1][2][0][RTW89_MKK][0][58] = 127, [1][1][2][0][RTW89_IC][1][58] = 10, - [1][1][2][0][RTW89_IC][2][58] = 66, [1][1][2][0][RTW89_KCC][1][58] = 28, [1][1][2][0][RTW89_KCC][0][58] = 127, [1][1][2][0][RTW89_ACMA][1][58] = 127, @@ -45413,13 +44189,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][58] = 127, [1][1][2][0][RTW89_THAILAND][0][58] = 127, [1][1][2][0][RTW89_FCC][1][61] = 10, - [1][1][2][0][RTW89_FCC][2][61] = 66, [1][1][2][0][RTW89_ETSI][1][61] = 127, [1][1][2][0][RTW89_ETSI][0][61] = 127, [1][1][2][0][RTW89_MKK][1][61] = 127, [1][1][2][0][RTW89_MKK][0][61] = 127, [1][1][2][0][RTW89_IC][1][61] = 10, - [1][1][2][0][RTW89_IC][2][61] = 66, [1][1][2][0][RTW89_KCC][1][61] = 28, [1][1][2][0][RTW89_KCC][0][61] = 127, [1][1][2][0][RTW89_ACMA][1][61] = 127, @@ -45432,13 +44206,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][61] = 127, [1][1][2][0][RTW89_THAILAND][0][61] = 127, [1][1][2][0][RTW89_FCC][1][65] = 10, - [1][1][2][0][RTW89_FCC][2][65] = 66, [1][1][2][0][RTW89_ETSI][1][65] = 127, [1][1][2][0][RTW89_ETSI][0][65] = 127, [1][1][2][0][RTW89_MKK][1][65] = 127, [1][1][2][0][RTW89_MKK][0][65] = 127, [1][1][2][0][RTW89_IC][1][65] = 10, - [1][1][2][0][RTW89_IC][2][65] = 66, [1][1][2][0][RTW89_KCC][1][65] = 28, [1][1][2][0][RTW89_KCC][0][65] = 127, [1][1][2][0][RTW89_ACMA][1][65] = 127, @@ -45451,13 +44223,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][65] = 127, [1][1][2][0][RTW89_THAILAND][0][65] = 127, [1][1][2][0][RTW89_FCC][1][69] = 10, - [1][1][2][0][RTW89_FCC][2][69] = 66, [1][1][2][0][RTW89_ETSI][1][69] = 127, [1][1][2][0][RTW89_ETSI][0][69] = 127, [1][1][2][0][RTW89_MKK][1][69] = 127, [1][1][2][0][RTW89_MKK][0][69] = 127, [1][1][2][0][RTW89_IC][1][69] = 10, - [1][1][2][0][RTW89_IC][2][69] = 66, [1][1][2][0][RTW89_KCC][1][69] = 28, [1][1][2][0][RTW89_KCC][0][69] = 127, [1][1][2][0][RTW89_ACMA][1][69] = 127, @@ -45470,13 +44240,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][69] = 127, [1][1][2][0][RTW89_THAILAND][0][69] = 127, [1][1][2][0][RTW89_FCC][1][73] = 10, - [1][1][2][0][RTW89_FCC][2][73] = 66, [1][1][2][0][RTW89_ETSI][1][73] = 127, [1][1][2][0][RTW89_ETSI][0][73] = 127, [1][1][2][0][RTW89_MKK][1][73] = 127, [1][1][2][0][RTW89_MKK][0][73] = 127, [1][1][2][0][RTW89_IC][1][73] = 10, - [1][1][2][0][RTW89_IC][2][73] = 66, [1][1][2][0][RTW89_KCC][1][73] = 28, [1][1][2][0][RTW89_KCC][0][73] = 127, [1][1][2][0][RTW89_ACMA][1][73] = 127, @@ -45489,13 +44257,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][73] = 127, [1][1][2][0][RTW89_THAILAND][0][73] = 127, [1][1][2][0][RTW89_FCC][1][76] = 10, - [1][1][2][0][RTW89_FCC][2][76] = 66, [1][1][2][0][RTW89_ETSI][1][76] = 127, [1][1][2][0][RTW89_ETSI][0][76] = 127, [1][1][2][0][RTW89_MKK][1][76] = 127, [1][1][2][0][RTW89_MKK][0][76] = 127, [1][1][2][0][RTW89_IC][1][76] = 10, - [1][1][2][0][RTW89_IC][2][76] = 66, [1][1][2][0][RTW89_KCC][1][76] = 28, [1][1][2][0][RTW89_KCC][0][76] = 127, [1][1][2][0][RTW89_ACMA][1][76] = 127, @@ -45508,13 +44274,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][76] = 127, [1][1][2][0][RTW89_THAILAND][0][76] = 127, [1][1][2][0][RTW89_FCC][1][80] = 10, - [1][1][2][0][RTW89_FCC][2][80] = 66, [1][1][2][0][RTW89_ETSI][1][80] = 127, [1][1][2][0][RTW89_ETSI][0][80] = 127, [1][1][2][0][RTW89_MKK][1][80] = 127, [1][1][2][0][RTW89_MKK][0][80] = 127, [1][1][2][0][RTW89_IC][1][80] = 10, - [1][1][2][0][RTW89_IC][2][80] = 66, [1][1][2][0][RTW89_KCC][1][80] = 32, [1][1][2][0][RTW89_KCC][0][80] = 127, [1][1][2][0][RTW89_ACMA][1][80] = 127, @@ -45527,13 +44291,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][80] = 127, [1][1][2][0][RTW89_THAILAND][0][80] = 127, [1][1][2][0][RTW89_FCC][1][84] = 10, - [1][1][2][0][RTW89_FCC][2][84] = 66, [1][1][2][0][RTW89_ETSI][1][84] = 127, [1][1][2][0][RTW89_ETSI][0][84] = 127, [1][1][2][0][RTW89_MKK][1][84] = 127, [1][1][2][0][RTW89_MKK][0][84] = 127, [1][1][2][0][RTW89_IC][1][84] = 10, - [1][1][2][0][RTW89_IC][2][84] = 66, [1][1][2][0][RTW89_KCC][1][84] = 32, [1][1][2][0][RTW89_KCC][0][84] = 127, [1][1][2][0][RTW89_ACMA][1][84] = 127, @@ -45546,13 +44308,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][84] = 127, [1][1][2][0][RTW89_THAILAND][0][84] = 127, [1][1][2][0][RTW89_FCC][1][88] = 10, - [1][1][2][0][RTW89_FCC][2][88] = 127, [1][1][2][0][RTW89_ETSI][1][88] = 127, [1][1][2][0][RTW89_ETSI][0][88] = 127, [1][1][2][0][RTW89_MKK][1][88] = 127, [1][1][2][0][RTW89_MKK][0][88] = 127, [1][1][2][0][RTW89_IC][1][88] = 10, - [1][1][2][0][RTW89_IC][2][88] = 127, [1][1][2][0][RTW89_KCC][1][88] = 32, [1][1][2][0][RTW89_KCC][0][88] = 127, [1][1][2][0][RTW89_ACMA][1][88] = 127, @@ -45565,13 +44325,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][88] = 127, [1][1][2][0][RTW89_THAILAND][0][88] = 127, [1][1][2][0][RTW89_FCC][1][91] = 12, - [1][1][2][0][RTW89_FCC][2][91] = 127, [1][1][2][0][RTW89_ETSI][1][91] = 127, [1][1][2][0][RTW89_ETSI][0][91] = 127, [1][1][2][0][RTW89_MKK][1][91] = 127, [1][1][2][0][RTW89_MKK][0][91] = 127, [1][1][2][0][RTW89_IC][1][91] = 12, - [1][1][2][0][RTW89_IC][2][91] = 127, [1][1][2][0][RTW89_KCC][1][91] = 32, [1][1][2][0][RTW89_KCC][0][91] = 127, [1][1][2][0][RTW89_ACMA][1][91] = 127, @@ -45584,13 +44342,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][91] = 127, [1][1][2][0][RTW89_THAILAND][0][91] = 127, [1][1][2][0][RTW89_FCC][1][95] = 10, - [1][1][2][0][RTW89_FCC][2][95] = 127, [1][1][2][0][RTW89_ETSI][1][95] = 127, [1][1][2][0][RTW89_ETSI][0][95] = 127, [1][1][2][0][RTW89_MKK][1][95] = 127, [1][1][2][0][RTW89_MKK][0][95] = 127, [1][1][2][0][RTW89_IC][1][95] = 10, - [1][1][2][0][RTW89_IC][2][95] = 127, [1][1][2][0][RTW89_KCC][1][95] = 32, [1][1][2][0][RTW89_KCC][0][95] = 127, [1][1][2][0][RTW89_ACMA][1][95] = 127, @@ -45603,13 +44359,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][95] = 127, [1][1][2][0][RTW89_THAILAND][0][95] = 127, [1][1][2][0][RTW89_FCC][1][99] = 10, - [1][1][2][0][RTW89_FCC][2][99] = 127, [1][1][2][0][RTW89_ETSI][1][99] = 127, [1][1][2][0][RTW89_ETSI][0][99] = 127, [1][1][2][0][RTW89_MKK][1][99] = 127, [1][1][2][0][RTW89_MKK][0][99] = 127, [1][1][2][0][RTW89_IC][1][99] = 10, - [1][1][2][0][RTW89_IC][2][99] = 127, [1][1][2][0][RTW89_KCC][1][99] = 32, [1][1][2][0][RTW89_KCC][0][99] = 127, [1][1][2][0][RTW89_ACMA][1][99] = 127, @@ -45622,13 +44376,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][99] = 127, [1][1][2][0][RTW89_THAILAND][0][99] = 127, [1][1][2][0][RTW89_FCC][1][103] = 10, - [1][1][2][0][RTW89_FCC][2][103] = 127, [1][1][2][0][RTW89_ETSI][1][103] = 127, [1][1][2][0][RTW89_ETSI][0][103] = 127, [1][1][2][0][RTW89_MKK][1][103] = 127, [1][1][2][0][RTW89_MKK][0][103] = 127, [1][1][2][0][RTW89_IC][1][103] = 10, - [1][1][2][0][RTW89_IC][2][103] = 127, [1][1][2][0][RTW89_KCC][1][103] = 32, [1][1][2][0][RTW89_KCC][0][103] = 127, [1][1][2][0][RTW89_ACMA][1][103] = 127, @@ -45641,13 +44393,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][103] = 127, [1][1][2][0][RTW89_THAILAND][0][103] = 127, [1][1][2][0][RTW89_FCC][1][106] = 12, - [1][1][2][0][RTW89_FCC][2][106] = 127, [1][1][2][0][RTW89_ETSI][1][106] = 127, [1][1][2][0][RTW89_ETSI][0][106] = 127, [1][1][2][0][RTW89_MKK][1][106] = 127, [1][1][2][0][RTW89_MKK][0][106] = 127, [1][1][2][0][RTW89_IC][1][106] = 12, - [1][1][2][0][RTW89_IC][2][106] = 127, [1][1][2][0][RTW89_KCC][1][106] = 32, [1][1][2][0][RTW89_KCC][0][106] = 127, [1][1][2][0][RTW89_ACMA][1][106] = 127, @@ -45660,13 +44410,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][106] = 127, [1][1][2][0][RTW89_THAILAND][0][106] = 127, [1][1][2][0][RTW89_FCC][1][110] = 127, - [1][1][2][0][RTW89_FCC][2][110] = 127, [1][1][2][0][RTW89_ETSI][1][110] = 127, [1][1][2][0][RTW89_ETSI][0][110] = 127, [1][1][2][0][RTW89_MKK][1][110] = 127, [1][1][2][0][RTW89_MKK][0][110] = 127, [1][1][2][0][RTW89_IC][1][110] = 127, - [1][1][2][0][RTW89_IC][2][110] = 127, [1][1][2][0][RTW89_KCC][1][110] = 127, [1][1][2][0][RTW89_KCC][0][110] = 127, [1][1][2][0][RTW89_ACMA][1][110] = 127, @@ -45679,13 +44427,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][110] = 127, [1][1][2][0][RTW89_THAILAND][0][110] = 127, [1][1][2][0][RTW89_FCC][1][114] = 127, - [1][1][2][0][RTW89_FCC][2][114] = 127, [1][1][2][0][RTW89_ETSI][1][114] = 127, [1][1][2][0][RTW89_ETSI][0][114] = 127, [1][1][2][0][RTW89_MKK][1][114] = 127, [1][1][2][0][RTW89_MKK][0][114] = 127, [1][1][2][0][RTW89_IC][1][114] = 127, - [1][1][2][0][RTW89_IC][2][114] = 127, [1][1][2][0][RTW89_KCC][1][114] = 127, [1][1][2][0][RTW89_KCC][0][114] = 127, [1][1][2][0][RTW89_ACMA][1][114] = 127, @@ -45698,13 +44444,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][114] = 127, [1][1][2][0][RTW89_THAILAND][0][114] = 127, [1][1][2][0][RTW89_FCC][1][118] = 127, - [1][1][2][0][RTW89_FCC][2][118] = 127, [1][1][2][0][RTW89_ETSI][1][118] = 127, [1][1][2][0][RTW89_ETSI][0][118] = 127, [1][1][2][0][RTW89_MKK][1][118] = 127, [1][1][2][0][RTW89_MKK][0][118] = 127, [1][1][2][0][RTW89_IC][1][118] = 127, - [1][1][2][0][RTW89_IC][2][118] = 127, [1][1][2][0][RTW89_KCC][1][118] = 127, [1][1][2][0][RTW89_KCC][0][118] = 127, [1][1][2][0][RTW89_ACMA][1][118] = 127, @@ -45717,13 +44461,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][0][RTW89_THAILAND][1][118] = 127, [1][1][2][0][RTW89_THAILAND][0][118] = 127, [1][1][2][1][RTW89_FCC][1][1] = 10, - [1][1][2][1][RTW89_FCC][2][1] = 58, [1][1][2][1][RTW89_ETSI][1][1] = 42, [1][1][2][1][RTW89_ETSI][0][1] = 6, [1][1][2][1][RTW89_MKK][1][1] = 52, [1][1][2][1][RTW89_MKK][0][1] = 12, [1][1][2][1][RTW89_IC][1][1] = 10, - [1][1][2][1][RTW89_IC][2][1] = 58, [1][1][2][1][RTW89_KCC][1][1] = 28, [1][1][2][1][RTW89_KCC][0][1] = 12, [1][1][2][1][RTW89_ACMA][1][1] = 42, @@ -45736,13 +44478,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][1] = 46, [1][1][2][1][RTW89_THAILAND][0][1] = 6, [1][1][2][1][RTW89_FCC][1][5] = 10, - [1][1][2][1][RTW89_FCC][2][5] = 58, [1][1][2][1][RTW89_ETSI][1][5] = 42, [1][1][2][1][RTW89_ETSI][0][5] = 6, [1][1][2][1][RTW89_MKK][1][5] = 52, [1][1][2][1][RTW89_MKK][0][5] = 12, [1][1][2][1][RTW89_IC][1][5] = 10, - [1][1][2][1][RTW89_IC][2][5] = 58, [1][1][2][1][RTW89_KCC][1][5] = 28, [1][1][2][1][RTW89_KCC][0][5] = 12, [1][1][2][1][RTW89_ACMA][1][5] = 42, @@ -45755,13 +44495,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][5] = 46, [1][1][2][1][RTW89_THAILAND][0][5] = 6, [1][1][2][1][RTW89_FCC][1][9] = 10, - [1][1][2][1][RTW89_FCC][2][9] = 58, [1][1][2][1][RTW89_ETSI][1][9] = 42, [1][1][2][1][RTW89_ETSI][0][9] = 6, [1][1][2][1][RTW89_MKK][1][9] = 52, [1][1][2][1][RTW89_MKK][0][9] = 12, [1][1][2][1][RTW89_IC][1][9] = 10, - [1][1][2][1][RTW89_IC][2][9] = 58, [1][1][2][1][RTW89_KCC][1][9] = 28, [1][1][2][1][RTW89_KCC][0][9] = 12, [1][1][2][1][RTW89_ACMA][1][9] = 42, @@ -45774,13 +44512,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][9] = 46, [1][1][2][1][RTW89_THAILAND][0][9] = 6, [1][1][2][1][RTW89_FCC][1][13] = 10, - [1][1][2][1][RTW89_FCC][2][13] = 58, [1][1][2][1][RTW89_ETSI][1][13] = 42, [1][1][2][1][RTW89_ETSI][0][13] = 6, [1][1][2][1][RTW89_MKK][1][13] = 52, [1][1][2][1][RTW89_MKK][0][13] = 12, [1][1][2][1][RTW89_IC][1][13] = 10, - [1][1][2][1][RTW89_IC][2][13] = 58, [1][1][2][1][RTW89_KCC][1][13] = 28, [1][1][2][1][RTW89_KCC][0][13] = 12, [1][1][2][1][RTW89_ACMA][1][13] = 42, @@ -45793,13 +44529,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][13] = 46, [1][1][2][1][RTW89_THAILAND][0][13] = 6, [1][1][2][1][RTW89_FCC][1][16] = 10, - [1][1][2][1][RTW89_FCC][2][16] = 58, [1][1][2][1][RTW89_ETSI][1][16] = 42, [1][1][2][1][RTW89_ETSI][0][16] = 6, [1][1][2][1][RTW89_MKK][1][16] = 52, [1][1][2][1][RTW89_MKK][0][16] = 12, [1][1][2][1][RTW89_IC][1][16] = 10, - [1][1][2][1][RTW89_IC][2][16] = 58, [1][1][2][1][RTW89_KCC][1][16] = 28, [1][1][2][1][RTW89_KCC][0][16] = 12, [1][1][2][1][RTW89_ACMA][1][16] = 42, @@ -45812,13 +44546,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][16] = 46, [1][1][2][1][RTW89_THAILAND][0][16] = 6, [1][1][2][1][RTW89_FCC][1][20] = 10, - [1][1][2][1][RTW89_FCC][2][20] = 58, [1][1][2][1][RTW89_ETSI][1][20] = 42, [1][1][2][1][RTW89_ETSI][0][20] = 6, [1][1][2][1][RTW89_MKK][1][20] = 52, [1][1][2][1][RTW89_MKK][0][20] = 12, [1][1][2][1][RTW89_IC][1][20] = 10, - [1][1][2][1][RTW89_IC][2][20] = 58, [1][1][2][1][RTW89_KCC][1][20] = 28, [1][1][2][1][RTW89_KCC][0][20] = 12, [1][1][2][1][RTW89_ACMA][1][20] = 42, @@ -45831,13 +44563,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][20] = 46, [1][1][2][1][RTW89_THAILAND][0][20] = 6, [1][1][2][1][RTW89_FCC][1][24] = 10, - [1][1][2][1][RTW89_FCC][2][24] = 70, [1][1][2][1][RTW89_ETSI][1][24] = 42, [1][1][2][1][RTW89_ETSI][0][24] = 6, [1][1][2][1][RTW89_MKK][1][24] = 54, [1][1][2][1][RTW89_MKK][0][24] = 14, [1][1][2][1][RTW89_IC][1][24] = 10, - [1][1][2][1][RTW89_IC][2][24] = 70, [1][1][2][1][RTW89_KCC][1][24] = 28, [1][1][2][1][RTW89_KCC][0][24] = 12, [1][1][2][1][RTW89_ACMA][1][24] = 42, @@ -45850,13 +44580,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][24] = 46, [1][1][2][1][RTW89_THAILAND][0][24] = 6, [1][1][2][1][RTW89_FCC][1][28] = 10, - [1][1][2][1][RTW89_FCC][2][28] = 70, [1][1][2][1][RTW89_ETSI][1][28] = 42, [1][1][2][1][RTW89_ETSI][0][28] = 6, [1][1][2][1][RTW89_MKK][1][28] = 52, [1][1][2][1][RTW89_MKK][0][28] = 14, [1][1][2][1][RTW89_IC][1][28] = 10, - [1][1][2][1][RTW89_IC][2][28] = 70, [1][1][2][1][RTW89_KCC][1][28] = 28, [1][1][2][1][RTW89_KCC][0][28] = 14, [1][1][2][1][RTW89_ACMA][1][28] = 42, @@ -45869,13 +44597,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][28] = 46, [1][1][2][1][RTW89_THAILAND][0][28] = 6, [1][1][2][1][RTW89_FCC][1][31] = 10, - [1][1][2][1][RTW89_FCC][2][31] = 70, [1][1][2][1][RTW89_ETSI][1][31] = 42, [1][1][2][1][RTW89_ETSI][0][31] = 6, [1][1][2][1][RTW89_MKK][1][31] = 52, [1][1][2][1][RTW89_MKK][0][31] = 14, [1][1][2][1][RTW89_IC][1][31] = 10, - [1][1][2][1][RTW89_IC][2][31] = 70, [1][1][2][1][RTW89_KCC][1][31] = 28, [1][1][2][1][RTW89_KCC][0][31] = 14, [1][1][2][1][RTW89_ACMA][1][31] = 42, @@ -45888,13 +44614,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][31] = 46, [1][1][2][1][RTW89_THAILAND][0][31] = 6, [1][1][2][1][RTW89_FCC][1][35] = 10, - [1][1][2][1][RTW89_FCC][2][35] = 70, [1][1][2][1][RTW89_ETSI][1][35] = 42, [1][1][2][1][RTW89_ETSI][0][35] = 6, [1][1][2][1][RTW89_MKK][1][35] = 52, [1][1][2][1][RTW89_MKK][0][35] = 14, [1][1][2][1][RTW89_IC][1][35] = 10, - [1][1][2][1][RTW89_IC][2][35] = 70, [1][1][2][1][RTW89_KCC][1][35] = 28, [1][1][2][1][RTW89_KCC][0][35] = 14, [1][1][2][1][RTW89_ACMA][1][35] = 42, @@ -45907,13 +44631,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][35] = 46, [1][1][2][1][RTW89_THAILAND][0][35] = 6, [1][1][2][1][RTW89_FCC][1][39] = 10, - [1][1][2][1][RTW89_FCC][2][39] = 70, [1][1][2][1][RTW89_ETSI][1][39] = 42, [1][1][2][1][RTW89_ETSI][0][39] = 6, [1][1][2][1][RTW89_MKK][1][39] = 52, [1][1][2][1][RTW89_MKK][0][39] = 14, [1][1][2][1][RTW89_IC][1][39] = 10, - [1][1][2][1][RTW89_IC][2][39] = 70, [1][1][2][1][RTW89_KCC][1][39] = 28, [1][1][2][1][RTW89_KCC][0][39] = 14, [1][1][2][1][RTW89_ACMA][1][39] = 42, @@ -45926,13 +44648,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][39] = 46, [1][1][2][1][RTW89_THAILAND][0][39] = 6, [1][1][2][1][RTW89_FCC][1][43] = 10, - [1][1][2][1][RTW89_FCC][2][43] = 70, [1][1][2][1][RTW89_ETSI][1][43] = 42, [1][1][2][1][RTW89_ETSI][0][43] = 6, [1][1][2][1][RTW89_MKK][1][43] = 52, [1][1][2][1][RTW89_MKK][0][43] = 14, [1][1][2][1][RTW89_IC][1][43] = 10, - [1][1][2][1][RTW89_IC][2][43] = 70, [1][1][2][1][RTW89_KCC][1][43] = 28, [1][1][2][1][RTW89_KCC][0][43] = 14, [1][1][2][1][RTW89_ACMA][1][43] = 42, @@ -45945,13 +44665,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][43] = 46, [1][1][2][1][RTW89_THAILAND][0][43] = 6, [1][1][2][1][RTW89_FCC][1][46] = 12, - [1][1][2][1][RTW89_FCC][2][46] = 127, [1][1][2][1][RTW89_ETSI][1][46] = 127, [1][1][2][1][RTW89_ETSI][0][46] = 127, [1][1][2][1][RTW89_MKK][1][46] = 127, [1][1][2][1][RTW89_MKK][0][46] = 127, [1][1][2][1][RTW89_IC][1][46] = 12, - [1][1][2][1][RTW89_IC][2][46] = 68, [1][1][2][1][RTW89_KCC][1][46] = 28, [1][1][2][1][RTW89_KCC][0][46] = 127, [1][1][2][1][RTW89_ACMA][1][46] = 127, @@ -45964,13 +44682,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][46] = 127, [1][1][2][1][RTW89_THAILAND][0][46] = 127, [1][1][2][1][RTW89_FCC][1][50] = 12, - [1][1][2][1][RTW89_FCC][2][50] = 127, [1][1][2][1][RTW89_ETSI][1][50] = 127, [1][1][2][1][RTW89_ETSI][0][50] = 127, [1][1][2][1][RTW89_MKK][1][50] = 127, [1][1][2][1][RTW89_MKK][0][50] = 127, [1][1][2][1][RTW89_IC][1][50] = 12, - [1][1][2][1][RTW89_IC][2][50] = 68, [1][1][2][1][RTW89_KCC][1][50] = 28, [1][1][2][1][RTW89_KCC][0][50] = 127, [1][1][2][1][RTW89_ACMA][1][50] = 127, @@ -45983,13 +44699,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][50] = 127, [1][1][2][1][RTW89_THAILAND][0][50] = 127, [1][1][2][1][RTW89_FCC][1][54] = 10, - [1][1][2][1][RTW89_FCC][2][54] = 127, [1][1][2][1][RTW89_ETSI][1][54] = 127, [1][1][2][1][RTW89_ETSI][0][54] = 127, [1][1][2][1][RTW89_MKK][1][54] = 127, [1][1][2][1][RTW89_MKK][0][54] = 127, [1][1][2][1][RTW89_IC][1][54] = 10, - [1][1][2][1][RTW89_IC][2][54] = 127, [1][1][2][1][RTW89_KCC][1][54] = 28, [1][1][2][1][RTW89_KCC][0][54] = 127, [1][1][2][1][RTW89_ACMA][1][54] = 127, @@ -46002,13 +44716,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][54] = 127, [1][1][2][1][RTW89_THAILAND][0][54] = 127, [1][1][2][1][RTW89_FCC][1][58] = 10, - [1][1][2][1][RTW89_FCC][2][58] = 66, [1][1][2][1][RTW89_ETSI][1][58] = 127, [1][1][2][1][RTW89_ETSI][0][58] = 127, [1][1][2][1][RTW89_MKK][1][58] = 127, [1][1][2][1][RTW89_MKK][0][58] = 127, [1][1][2][1][RTW89_IC][1][58] = 10, - [1][1][2][1][RTW89_IC][2][58] = 66, [1][1][2][1][RTW89_KCC][1][58] = 28, [1][1][2][1][RTW89_KCC][0][58] = 127, [1][1][2][1][RTW89_ACMA][1][58] = 127, @@ -46021,13 +44733,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][58] = 127, [1][1][2][1][RTW89_THAILAND][0][58] = 127, [1][1][2][1][RTW89_FCC][1][61] = 10, - [1][1][2][1][RTW89_FCC][2][61] = 66, [1][1][2][1][RTW89_ETSI][1][61] = 127, [1][1][2][1][RTW89_ETSI][0][61] = 127, [1][1][2][1][RTW89_MKK][1][61] = 127, [1][1][2][1][RTW89_MKK][0][61] = 127, [1][1][2][1][RTW89_IC][1][61] = 10, - [1][1][2][1][RTW89_IC][2][61] = 66, [1][1][2][1][RTW89_KCC][1][61] = 28, [1][1][2][1][RTW89_KCC][0][61] = 127, [1][1][2][1][RTW89_ACMA][1][61] = 127, @@ -46040,13 +44750,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][61] = 127, [1][1][2][1][RTW89_THAILAND][0][61] = 127, [1][1][2][1][RTW89_FCC][1][65] = 10, - [1][1][2][1][RTW89_FCC][2][65] = 66, [1][1][2][1][RTW89_ETSI][1][65] = 127, [1][1][2][1][RTW89_ETSI][0][65] = 127, [1][1][2][1][RTW89_MKK][1][65] = 127, [1][1][2][1][RTW89_MKK][0][65] = 127, [1][1][2][1][RTW89_IC][1][65] = 10, - [1][1][2][1][RTW89_IC][2][65] = 66, [1][1][2][1][RTW89_KCC][1][65] = 28, [1][1][2][1][RTW89_KCC][0][65] = 127, [1][1][2][1][RTW89_ACMA][1][65] = 127, @@ -46059,13 +44767,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][65] = 127, [1][1][2][1][RTW89_THAILAND][0][65] = 127, [1][1][2][1][RTW89_FCC][1][69] = 10, - [1][1][2][1][RTW89_FCC][2][69] = 66, [1][1][2][1][RTW89_ETSI][1][69] = 127, [1][1][2][1][RTW89_ETSI][0][69] = 127, [1][1][2][1][RTW89_MKK][1][69] = 127, [1][1][2][1][RTW89_MKK][0][69] = 127, [1][1][2][1][RTW89_IC][1][69] = 10, - [1][1][2][1][RTW89_IC][2][69] = 66, [1][1][2][1][RTW89_KCC][1][69] = 28, [1][1][2][1][RTW89_KCC][0][69] = 127, [1][1][2][1][RTW89_ACMA][1][69] = 127, @@ -46078,13 +44784,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][69] = 127, [1][1][2][1][RTW89_THAILAND][0][69] = 127, [1][1][2][1][RTW89_FCC][1][73] = 10, - [1][1][2][1][RTW89_FCC][2][73] = 66, [1][1][2][1][RTW89_ETSI][1][73] = 127, [1][1][2][1][RTW89_ETSI][0][73] = 127, [1][1][2][1][RTW89_MKK][1][73] = 127, [1][1][2][1][RTW89_MKK][0][73] = 127, [1][1][2][1][RTW89_IC][1][73] = 10, - [1][1][2][1][RTW89_IC][2][73] = 66, [1][1][2][1][RTW89_KCC][1][73] = 28, [1][1][2][1][RTW89_KCC][0][73] = 127, [1][1][2][1][RTW89_ACMA][1][73] = 127, @@ -46097,13 +44801,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][73] = 127, [1][1][2][1][RTW89_THAILAND][0][73] = 127, [1][1][2][1][RTW89_FCC][1][76] = 10, - [1][1][2][1][RTW89_FCC][2][76] = 66, [1][1][2][1][RTW89_ETSI][1][76] = 127, [1][1][2][1][RTW89_ETSI][0][76] = 127, [1][1][2][1][RTW89_MKK][1][76] = 127, [1][1][2][1][RTW89_MKK][0][76] = 127, [1][1][2][1][RTW89_IC][1][76] = 10, - [1][1][2][1][RTW89_IC][2][76] = 66, [1][1][2][1][RTW89_KCC][1][76] = 28, [1][1][2][1][RTW89_KCC][0][76] = 127, [1][1][2][1][RTW89_ACMA][1][76] = 127, @@ -46116,13 +44818,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][76] = 127, [1][1][2][1][RTW89_THAILAND][0][76] = 127, [1][1][2][1][RTW89_FCC][1][80] = 10, - [1][1][2][1][RTW89_FCC][2][80] = 66, [1][1][2][1][RTW89_ETSI][1][80] = 127, [1][1][2][1][RTW89_ETSI][0][80] = 127, [1][1][2][1][RTW89_MKK][1][80] = 127, [1][1][2][1][RTW89_MKK][0][80] = 127, [1][1][2][1][RTW89_IC][1][80] = 10, - [1][1][2][1][RTW89_IC][2][80] = 66, [1][1][2][1][RTW89_KCC][1][80] = 32, [1][1][2][1][RTW89_KCC][0][80] = 127, [1][1][2][1][RTW89_ACMA][1][80] = 127, @@ -46135,13 +44835,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][80] = 127, [1][1][2][1][RTW89_THAILAND][0][80] = 127, [1][1][2][1][RTW89_FCC][1][84] = 10, - [1][1][2][1][RTW89_FCC][2][84] = 66, [1][1][2][1][RTW89_ETSI][1][84] = 127, [1][1][2][1][RTW89_ETSI][0][84] = 127, [1][1][2][1][RTW89_MKK][1][84] = 127, [1][1][2][1][RTW89_MKK][0][84] = 127, [1][1][2][1][RTW89_IC][1][84] = 10, - [1][1][2][1][RTW89_IC][2][84] = 66, [1][1][2][1][RTW89_KCC][1][84] = 32, [1][1][2][1][RTW89_KCC][0][84] = 127, [1][1][2][1][RTW89_ACMA][1][84] = 127, @@ -46154,13 +44852,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][84] = 127, [1][1][2][1][RTW89_THAILAND][0][84] = 127, [1][1][2][1][RTW89_FCC][1][88] = 10, - [1][1][2][1][RTW89_FCC][2][88] = 127, [1][1][2][1][RTW89_ETSI][1][88] = 127, [1][1][2][1][RTW89_ETSI][0][88] = 127, [1][1][2][1][RTW89_MKK][1][88] = 127, [1][1][2][1][RTW89_MKK][0][88] = 127, [1][1][2][1][RTW89_IC][1][88] = 10, - [1][1][2][1][RTW89_IC][2][88] = 127, [1][1][2][1][RTW89_KCC][1][88] = 32, [1][1][2][1][RTW89_KCC][0][88] = 127, [1][1][2][1][RTW89_ACMA][1][88] = 127, @@ -46173,13 +44869,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][88] = 127, [1][1][2][1][RTW89_THAILAND][0][88] = 127, [1][1][2][1][RTW89_FCC][1][91] = 12, - [1][1][2][1][RTW89_FCC][2][91] = 127, [1][1][2][1][RTW89_ETSI][1][91] = 127, [1][1][2][1][RTW89_ETSI][0][91] = 127, [1][1][2][1][RTW89_MKK][1][91] = 127, [1][1][2][1][RTW89_MKK][0][91] = 127, [1][1][2][1][RTW89_IC][1][91] = 12, - [1][1][2][1][RTW89_IC][2][91] = 127, [1][1][2][1][RTW89_KCC][1][91] = 32, [1][1][2][1][RTW89_KCC][0][91] = 127, [1][1][2][1][RTW89_ACMA][1][91] = 127, @@ -46192,13 +44886,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][91] = 127, [1][1][2][1][RTW89_THAILAND][0][91] = 127, [1][1][2][1][RTW89_FCC][1][95] = 10, - [1][1][2][1][RTW89_FCC][2][95] = 127, [1][1][2][1][RTW89_ETSI][1][95] = 127, [1][1][2][1][RTW89_ETSI][0][95] = 127, [1][1][2][1][RTW89_MKK][1][95] = 127, [1][1][2][1][RTW89_MKK][0][95] = 127, [1][1][2][1][RTW89_IC][1][95] = 10, - [1][1][2][1][RTW89_IC][2][95] = 127, [1][1][2][1][RTW89_KCC][1][95] = 32, [1][1][2][1][RTW89_KCC][0][95] = 127, [1][1][2][1][RTW89_ACMA][1][95] = 127, @@ -46211,13 +44903,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][95] = 127, [1][1][2][1][RTW89_THAILAND][0][95] = 127, [1][1][2][1][RTW89_FCC][1][99] = 10, - [1][1][2][1][RTW89_FCC][2][99] = 127, [1][1][2][1][RTW89_ETSI][1][99] = 127, [1][1][2][1][RTW89_ETSI][0][99] = 127, [1][1][2][1][RTW89_MKK][1][99] = 127, [1][1][2][1][RTW89_MKK][0][99] = 127, [1][1][2][1][RTW89_IC][1][99] = 10, - [1][1][2][1][RTW89_IC][2][99] = 127, [1][1][2][1][RTW89_KCC][1][99] = 32, [1][1][2][1][RTW89_KCC][0][99] = 127, [1][1][2][1][RTW89_ACMA][1][99] = 127, @@ -46230,13 +44920,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][99] = 127, [1][1][2][1][RTW89_THAILAND][0][99] = 127, [1][1][2][1][RTW89_FCC][1][103] = 10, - [1][1][2][1][RTW89_FCC][2][103] = 127, [1][1][2][1][RTW89_ETSI][1][103] = 127, [1][1][2][1][RTW89_ETSI][0][103] = 127, [1][1][2][1][RTW89_MKK][1][103] = 127, [1][1][2][1][RTW89_MKK][0][103] = 127, [1][1][2][1][RTW89_IC][1][103] = 10, - [1][1][2][1][RTW89_IC][2][103] = 127, [1][1][2][1][RTW89_KCC][1][103] = 32, [1][1][2][1][RTW89_KCC][0][103] = 127, [1][1][2][1][RTW89_ACMA][1][103] = 127, @@ -46249,13 +44937,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][103] = 127, [1][1][2][1][RTW89_THAILAND][0][103] = 127, [1][1][2][1][RTW89_FCC][1][106] = 12, - [1][1][2][1][RTW89_FCC][2][106] = 127, [1][1][2][1][RTW89_ETSI][1][106] = 127, [1][1][2][1][RTW89_ETSI][0][106] = 127, [1][1][2][1][RTW89_MKK][1][106] = 127, [1][1][2][1][RTW89_MKK][0][106] = 127, [1][1][2][1][RTW89_IC][1][106] = 12, - [1][1][2][1][RTW89_IC][2][106] = 127, [1][1][2][1][RTW89_KCC][1][106] = 32, [1][1][2][1][RTW89_KCC][0][106] = 127, [1][1][2][1][RTW89_ACMA][1][106] = 127, @@ -46268,13 +44954,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][106] = 127, [1][1][2][1][RTW89_THAILAND][0][106] = 127, [1][1][2][1][RTW89_FCC][1][110] = 127, - [1][1][2][1][RTW89_FCC][2][110] = 127, [1][1][2][1][RTW89_ETSI][1][110] = 127, [1][1][2][1][RTW89_ETSI][0][110] = 127, [1][1][2][1][RTW89_MKK][1][110] = 127, [1][1][2][1][RTW89_MKK][0][110] = 127, [1][1][2][1][RTW89_IC][1][110] = 127, - [1][1][2][1][RTW89_IC][2][110] = 127, [1][1][2][1][RTW89_KCC][1][110] = 127, [1][1][2][1][RTW89_KCC][0][110] = 127, [1][1][2][1][RTW89_ACMA][1][110] = 127, @@ -46287,13 +44971,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][110] = 127, [1][1][2][1][RTW89_THAILAND][0][110] = 127, [1][1][2][1][RTW89_FCC][1][114] = 127, - [1][1][2][1][RTW89_FCC][2][114] = 127, [1][1][2][1][RTW89_ETSI][1][114] = 127, [1][1][2][1][RTW89_ETSI][0][114] = 127, [1][1][2][1][RTW89_MKK][1][114] = 127, [1][1][2][1][RTW89_MKK][0][114] = 127, [1][1][2][1][RTW89_IC][1][114] = 127, - [1][1][2][1][RTW89_IC][2][114] = 127, [1][1][2][1][RTW89_KCC][1][114] = 127, [1][1][2][1][RTW89_KCC][0][114] = 127, [1][1][2][1][RTW89_ACMA][1][114] = 127, @@ -46306,13 +44988,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][114] = 127, [1][1][2][1][RTW89_THAILAND][0][114] = 127, [1][1][2][1][RTW89_FCC][1][118] = 127, - [1][1][2][1][RTW89_FCC][2][118] = 127, [1][1][2][1][RTW89_ETSI][1][118] = 127, [1][1][2][1][RTW89_ETSI][0][118] = 127, [1][1][2][1][RTW89_MKK][1][118] = 127, [1][1][2][1][RTW89_MKK][0][118] = 127, [1][1][2][1][RTW89_IC][1][118] = 127, - [1][1][2][1][RTW89_IC][2][118] = 127, [1][1][2][1][RTW89_KCC][1][118] = 127, [1][1][2][1][RTW89_KCC][0][118] = 127, [1][1][2][1][RTW89_ACMA][1][118] = 127, @@ -46325,13 +45005,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [1][1][2][1][RTW89_THAILAND][1][118] = 127, [1][1][2][1][RTW89_THAILAND][0][118] = 127, [2][0][2][0][RTW89_FCC][1][3] = 46, - [2][0][2][0][RTW89_FCC][2][3] = 60, [2][0][2][0][RTW89_ETSI][1][3] = 58, [2][0][2][0][RTW89_ETSI][0][3] = 30, [2][0][2][0][RTW89_MKK][1][3] = 58, [2][0][2][0][RTW89_MKK][0][3] = 26, [2][0][2][0][RTW89_IC][1][3] = 46, - [2][0][2][0][RTW89_IC][2][3] = 60, [2][0][2][0][RTW89_KCC][1][3] = 50, [2][0][2][0][RTW89_KCC][0][3] = 24, [2][0][2][0][RTW89_ACMA][1][3] = 58, @@ -46344,13 +45022,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][3] = 58, [2][0][2][0][RTW89_THAILAND][0][3] = 30, [2][0][2][0][RTW89_FCC][1][11] = 46, - [2][0][2][0][RTW89_FCC][2][11] = 60, [2][0][2][0][RTW89_ETSI][1][11] = 58, [2][0][2][0][RTW89_ETSI][0][11] = 30, [2][0][2][0][RTW89_MKK][1][11] = 58, [2][0][2][0][RTW89_MKK][0][11] = 24, [2][0][2][0][RTW89_IC][1][11] = 46, - [2][0][2][0][RTW89_IC][2][11] = 60, [2][0][2][0][RTW89_KCC][1][11] = 50, [2][0][2][0][RTW89_KCC][0][11] = 24, [2][0][2][0][RTW89_ACMA][1][11] = 58, @@ -46363,13 +45039,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][11] = 58, [2][0][2][0][RTW89_THAILAND][0][11] = 30, [2][0][2][0][RTW89_FCC][1][18] = 46, - [2][0][2][0][RTW89_FCC][2][18] = 60, [2][0][2][0][RTW89_ETSI][1][18] = 58, [2][0][2][0][RTW89_ETSI][0][18] = 30, [2][0][2][0][RTW89_MKK][1][18] = 58, [2][0][2][0][RTW89_MKK][0][18] = 24, [2][0][2][0][RTW89_IC][1][18] = 46, - [2][0][2][0][RTW89_IC][2][18] = 60, [2][0][2][0][RTW89_KCC][1][18] = 50, [2][0][2][0][RTW89_KCC][0][18] = 24, [2][0][2][0][RTW89_ACMA][1][18] = 58, @@ -46382,13 +45056,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][18] = 58, [2][0][2][0][RTW89_THAILAND][0][18] = 30, [2][0][2][0][RTW89_FCC][1][26] = 46, - [2][0][2][0][RTW89_FCC][2][26] = 60, [2][0][2][0][RTW89_ETSI][1][26] = 58, [2][0][2][0][RTW89_ETSI][0][26] = 30, [2][0][2][0][RTW89_MKK][1][26] = 58, [2][0][2][0][RTW89_MKK][0][26] = 24, [2][0][2][0][RTW89_IC][1][26] = 46, - [2][0][2][0][RTW89_IC][2][26] = 60, [2][0][2][0][RTW89_KCC][1][26] = 50, [2][0][2][0][RTW89_KCC][0][26] = 26, [2][0][2][0][RTW89_ACMA][1][26] = 58, @@ -46401,13 +45073,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][26] = 58, [2][0][2][0][RTW89_THAILAND][0][26] = 30, [2][0][2][0][RTW89_FCC][1][33] = 46, - [2][0][2][0][RTW89_FCC][2][33] = 60, [2][0][2][0][RTW89_ETSI][1][33] = 58, [2][0][2][0][RTW89_ETSI][0][33] = 30, [2][0][2][0][RTW89_MKK][1][33] = 58, [2][0][2][0][RTW89_MKK][0][33] = 24, [2][0][2][0][RTW89_IC][1][33] = 46, - [2][0][2][0][RTW89_IC][2][33] = 60, [2][0][2][0][RTW89_KCC][1][33] = 50, [2][0][2][0][RTW89_KCC][0][33] = 24, [2][0][2][0][RTW89_ACMA][1][33] = 58, @@ -46420,13 +45090,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][33] = 58, [2][0][2][0][RTW89_THAILAND][0][33] = 30, [2][0][2][0][RTW89_FCC][1][41] = 46, - [2][0][2][0][RTW89_FCC][2][41] = 60, [2][0][2][0][RTW89_ETSI][1][41] = 58, [2][0][2][0][RTW89_ETSI][0][41] = 30, [2][0][2][0][RTW89_MKK][1][41] = 58, [2][0][2][0][RTW89_MKK][0][41] = 24, [2][0][2][0][RTW89_IC][1][41] = 46, - [2][0][2][0][RTW89_IC][2][41] = 60, [2][0][2][0][RTW89_KCC][1][41] = 50, [2][0][2][0][RTW89_KCC][0][41] = 24, [2][0][2][0][RTW89_ACMA][1][41] = 58, @@ -46439,13 +45107,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][41] = 58, [2][0][2][0][RTW89_THAILAND][0][41] = 30, [2][0][2][0][RTW89_FCC][1][48] = 46, - [2][0][2][0][RTW89_FCC][2][48] = 127, [2][0][2][0][RTW89_ETSI][1][48] = 127, [2][0][2][0][RTW89_ETSI][0][48] = 127, [2][0][2][0][RTW89_MKK][1][48] = 127, [2][0][2][0][RTW89_MKK][0][48] = 127, [2][0][2][0][RTW89_IC][1][48] = 46, - [2][0][2][0][RTW89_IC][2][48] = 60, [2][0][2][0][RTW89_KCC][1][48] = 48, [2][0][2][0][RTW89_KCC][0][48] = 127, [2][0][2][0][RTW89_ACMA][1][48] = 127, @@ -46458,13 +45124,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][48] = 127, [2][0][2][0][RTW89_THAILAND][0][48] = 127, [2][0][2][0][RTW89_FCC][1][56] = 46, - [2][0][2][0][RTW89_FCC][2][56] = 127, [2][0][2][0][RTW89_ETSI][1][56] = 127, [2][0][2][0][RTW89_ETSI][0][56] = 127, [2][0][2][0][RTW89_MKK][1][56] = 127, [2][0][2][0][RTW89_MKK][0][56] = 127, [2][0][2][0][RTW89_IC][1][56] = 46, - [2][0][2][0][RTW89_IC][2][56] = 58, [2][0][2][0][RTW89_KCC][1][56] = 48, [2][0][2][0][RTW89_KCC][0][56] = 127, [2][0][2][0][RTW89_ACMA][1][56] = 127, @@ -46477,13 +45141,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][56] = 127, [2][0][2][0][RTW89_THAILAND][0][56] = 127, [2][0][2][0][RTW89_FCC][1][63] = 46, - [2][0][2][0][RTW89_FCC][2][63] = 58, [2][0][2][0][RTW89_ETSI][1][63] = 127, [2][0][2][0][RTW89_ETSI][0][63] = 127, [2][0][2][0][RTW89_MKK][1][63] = 127, [2][0][2][0][RTW89_MKK][0][63] = 127, [2][0][2][0][RTW89_IC][1][63] = 46, - [2][0][2][0][RTW89_IC][2][63] = 58, [2][0][2][0][RTW89_KCC][1][63] = 48, [2][0][2][0][RTW89_KCC][0][63] = 127, [2][0][2][0][RTW89_ACMA][1][63] = 127, @@ -46496,13 +45158,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][63] = 127, [2][0][2][0][RTW89_THAILAND][0][63] = 127, [2][0][2][0][RTW89_FCC][1][71] = 46, - [2][0][2][0][RTW89_FCC][2][71] = 58, [2][0][2][0][RTW89_ETSI][1][71] = 127, [2][0][2][0][RTW89_ETSI][0][71] = 127, [2][0][2][0][RTW89_MKK][1][71] = 127, [2][0][2][0][RTW89_MKK][0][71] = 127, [2][0][2][0][RTW89_IC][1][71] = 46, - [2][0][2][0][RTW89_IC][2][71] = 58, [2][0][2][0][RTW89_KCC][1][71] = 48, [2][0][2][0][RTW89_KCC][0][71] = 127, [2][0][2][0][RTW89_ACMA][1][71] = 127, @@ -46515,13 +45175,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][71] = 127, [2][0][2][0][RTW89_THAILAND][0][71] = 127, [2][0][2][0][RTW89_FCC][1][78] = 46, - [2][0][2][0][RTW89_FCC][2][78] = 58, [2][0][2][0][RTW89_ETSI][1][78] = 127, [2][0][2][0][RTW89_ETSI][0][78] = 127, [2][0][2][0][RTW89_MKK][1][78] = 127, [2][0][2][0][RTW89_MKK][0][78] = 127, [2][0][2][0][RTW89_IC][1][78] = 46, - [2][0][2][0][RTW89_IC][2][78] = 58, [2][0][2][0][RTW89_KCC][1][78] = 52, [2][0][2][0][RTW89_KCC][0][78] = 127, [2][0][2][0][RTW89_ACMA][1][78] = 127, @@ -46534,13 +45192,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][78] = 127, [2][0][2][0][RTW89_THAILAND][0][78] = 127, [2][0][2][0][RTW89_FCC][1][86] = 46, - [2][0][2][0][RTW89_FCC][2][86] = 127, [2][0][2][0][RTW89_ETSI][1][86] = 127, [2][0][2][0][RTW89_ETSI][0][86] = 127, [2][0][2][0][RTW89_MKK][1][86] = 127, [2][0][2][0][RTW89_MKK][0][86] = 127, [2][0][2][0][RTW89_IC][1][86] = 46, - [2][0][2][0][RTW89_IC][2][86] = 127, [2][0][2][0][RTW89_KCC][1][86] = 52, [2][0][2][0][RTW89_KCC][0][86] = 127, [2][0][2][0][RTW89_ACMA][1][86] = 127, @@ -46553,13 +45209,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][86] = 127, [2][0][2][0][RTW89_THAILAND][0][86] = 127, [2][0][2][0][RTW89_FCC][1][93] = 46, - [2][0][2][0][RTW89_FCC][2][93] = 127, [2][0][2][0][RTW89_ETSI][1][93] = 127, [2][0][2][0][RTW89_ETSI][0][93] = 127, [2][0][2][0][RTW89_MKK][1][93] = 127, [2][0][2][0][RTW89_MKK][0][93] = 127, [2][0][2][0][RTW89_IC][1][93] = 46, - [2][0][2][0][RTW89_IC][2][93] = 127, [2][0][2][0][RTW89_KCC][1][93] = 50, [2][0][2][0][RTW89_KCC][0][93] = 127, [2][0][2][0][RTW89_ACMA][1][93] = 127, @@ -46572,13 +45226,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][93] = 127, [2][0][2][0][RTW89_THAILAND][0][93] = 127, [2][0][2][0][RTW89_FCC][1][101] = 44, - [2][0][2][0][RTW89_FCC][2][101] = 127, [2][0][2][0][RTW89_ETSI][1][101] = 127, [2][0][2][0][RTW89_ETSI][0][101] = 127, [2][0][2][0][RTW89_MKK][1][101] = 127, [2][0][2][0][RTW89_MKK][0][101] = 127, [2][0][2][0][RTW89_IC][1][101] = 44, - [2][0][2][0][RTW89_IC][2][101] = 127, [2][0][2][0][RTW89_KCC][1][101] = 50, [2][0][2][0][RTW89_KCC][0][101] = 127, [2][0][2][0][RTW89_ACMA][1][101] = 127, @@ -46591,13 +45243,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][101] = 127, [2][0][2][0][RTW89_THAILAND][0][101] = 127, [2][0][2][0][RTW89_FCC][1][108] = 127, - [2][0][2][0][RTW89_FCC][2][108] = 127, [2][0][2][0][RTW89_ETSI][1][108] = 127, [2][0][2][0][RTW89_ETSI][0][108] = 127, [2][0][2][0][RTW89_MKK][1][108] = 127, [2][0][2][0][RTW89_MKK][0][108] = 127, [2][0][2][0][RTW89_IC][1][108] = 127, - [2][0][2][0][RTW89_IC][2][108] = 127, [2][0][2][0][RTW89_KCC][1][108] = 127, [2][0][2][0][RTW89_KCC][0][108] = 127, [2][0][2][0][RTW89_ACMA][1][108] = 127, @@ -46610,13 +45260,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][108] = 127, [2][0][2][0][RTW89_THAILAND][0][108] = 127, [2][0][2][0][RTW89_FCC][1][116] = 127, - [2][0][2][0][RTW89_FCC][2][116] = 127, [2][0][2][0][RTW89_ETSI][1][116] = 127, [2][0][2][0][RTW89_ETSI][0][116] = 127, [2][0][2][0][RTW89_MKK][1][116] = 127, [2][0][2][0][RTW89_MKK][0][116] = 127, [2][0][2][0][RTW89_IC][1][116] = 127, - [2][0][2][0][RTW89_IC][2][116] = 127, [2][0][2][0][RTW89_KCC][1][116] = 127, [2][0][2][0][RTW89_KCC][0][116] = 127, [2][0][2][0][RTW89_ACMA][1][116] = 127, @@ -46629,13 +45277,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][0][2][0][RTW89_THAILAND][1][116] = 127, [2][0][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][0][RTW89_FCC][1][3] = 22, - [2][1][2][0][RTW89_FCC][2][3] = 50, [2][1][2][0][RTW89_ETSI][1][3] = 54, [2][1][2][0][RTW89_ETSI][0][3] = 16, [2][1][2][0][RTW89_MKK][1][3] = 52, [2][1][2][0][RTW89_MKK][0][3] = 14, [2][1][2][0][RTW89_IC][1][3] = 22, - [2][1][2][0][RTW89_IC][2][3] = 50, [2][1][2][0][RTW89_KCC][1][3] = 38, [2][1][2][0][RTW89_KCC][0][3] = 12, [2][1][2][0][RTW89_ACMA][1][3] = 54, @@ -46648,13 +45294,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][3] = 46, [2][1][2][0][RTW89_THAILAND][0][3] = 18, [2][1][2][0][RTW89_FCC][1][11] = 20, - [2][1][2][0][RTW89_FCC][2][11] = 50, [2][1][2][0][RTW89_ETSI][1][11] = 54, [2][1][2][0][RTW89_ETSI][0][11] = 16, [2][1][2][0][RTW89_MKK][1][11] = 52, [2][1][2][0][RTW89_MKK][0][11] = 12, [2][1][2][0][RTW89_IC][1][11] = 20, - [2][1][2][0][RTW89_IC][2][11] = 50, [2][1][2][0][RTW89_KCC][1][11] = 38, [2][1][2][0][RTW89_KCC][0][11] = 12, [2][1][2][0][RTW89_ACMA][1][11] = 54, @@ -46667,13 +45311,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][11] = 46, [2][1][2][0][RTW89_THAILAND][0][11] = 18, [2][1][2][0][RTW89_FCC][1][18] = 20, - [2][1][2][0][RTW89_FCC][2][18] = 50, [2][1][2][0][RTW89_ETSI][1][18] = 54, [2][1][2][0][RTW89_ETSI][0][18] = 16, [2][1][2][0][RTW89_MKK][1][18] = 52, [2][1][2][0][RTW89_MKK][0][18] = 12, [2][1][2][0][RTW89_IC][1][18] = 20, - [2][1][2][0][RTW89_IC][2][18] = 50, [2][1][2][0][RTW89_KCC][1][18] = 38, [2][1][2][0][RTW89_KCC][0][18] = 12, [2][1][2][0][RTW89_ACMA][1][18] = 54, @@ -46686,13 +45328,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][18] = 46, [2][1][2][0][RTW89_THAILAND][0][18] = 18, [2][1][2][0][RTW89_FCC][1][26] = 20, - [2][1][2][0][RTW89_FCC][2][26] = 60, [2][1][2][0][RTW89_ETSI][1][26] = 54, [2][1][2][0][RTW89_ETSI][0][26] = 16, [2][1][2][0][RTW89_MKK][1][26] = 52, [2][1][2][0][RTW89_MKK][0][26] = 12, [2][1][2][0][RTW89_IC][1][26] = 20, - [2][1][2][0][RTW89_IC][2][26] = 60, [2][1][2][0][RTW89_KCC][1][26] = 38, [2][1][2][0][RTW89_KCC][0][26] = 12, [2][1][2][0][RTW89_ACMA][1][26] = 54, @@ -46705,13 +45345,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][26] = 46, [2][1][2][0][RTW89_THAILAND][0][26] = 18, [2][1][2][0][RTW89_FCC][1][33] = 20, - [2][1][2][0][RTW89_FCC][2][33] = 60, [2][1][2][0][RTW89_ETSI][1][33] = 54, [2][1][2][0][RTW89_ETSI][0][33] = 16, [2][1][2][0][RTW89_MKK][1][33] = 48, [2][1][2][0][RTW89_MKK][0][33] = 12, [2][1][2][0][RTW89_IC][1][33] = 20, - [2][1][2][0][RTW89_IC][2][33] = 60, [2][1][2][0][RTW89_KCC][1][33] = 38, [2][1][2][0][RTW89_KCC][0][33] = 12, [2][1][2][0][RTW89_ACMA][1][33] = 54, @@ -46724,13 +45362,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][33] = 46, [2][1][2][0][RTW89_THAILAND][0][33] = 18, [2][1][2][0][RTW89_FCC][1][41] = 22, - [2][1][2][0][RTW89_FCC][2][41] = 60, [2][1][2][0][RTW89_ETSI][1][41] = 54, [2][1][2][0][RTW89_ETSI][0][41] = 18, [2][1][2][0][RTW89_MKK][1][41] = 48, [2][1][2][0][RTW89_MKK][0][41] = 12, [2][1][2][0][RTW89_IC][1][41] = 22, - [2][1][2][0][RTW89_IC][2][41] = 60, [2][1][2][0][RTW89_KCC][1][41] = 38, [2][1][2][0][RTW89_KCC][0][41] = 12, [2][1][2][0][RTW89_ACMA][1][41] = 54, @@ -46743,13 +45379,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][41] = 46, [2][1][2][0][RTW89_THAILAND][0][41] = 18, [2][1][2][0][RTW89_FCC][1][48] = 22, - [2][1][2][0][RTW89_FCC][2][48] = 127, [2][1][2][0][RTW89_ETSI][1][48] = 127, [2][1][2][0][RTW89_ETSI][0][48] = 127, [2][1][2][0][RTW89_MKK][1][48] = 127, [2][1][2][0][RTW89_MKK][0][48] = 127, [2][1][2][0][RTW89_IC][1][48] = 22, - [2][1][2][0][RTW89_IC][2][48] = 60, [2][1][2][0][RTW89_KCC][1][48] = 38, [2][1][2][0][RTW89_KCC][0][48] = 127, [2][1][2][0][RTW89_ACMA][1][48] = 127, @@ -46762,13 +45396,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][48] = 127, [2][1][2][0][RTW89_THAILAND][0][48] = 127, [2][1][2][0][RTW89_FCC][1][56] = 20, - [2][1][2][0][RTW89_FCC][2][56] = 127, [2][1][2][0][RTW89_ETSI][1][56] = 127, [2][1][2][0][RTW89_ETSI][0][56] = 127, [2][1][2][0][RTW89_MKK][1][56] = 127, [2][1][2][0][RTW89_MKK][0][56] = 127, [2][1][2][0][RTW89_IC][1][56] = 20, - [2][1][2][0][RTW89_IC][2][56] = 56, [2][1][2][0][RTW89_KCC][1][56] = 38, [2][1][2][0][RTW89_KCC][0][56] = 127, [2][1][2][0][RTW89_ACMA][1][56] = 127, @@ -46781,13 +45413,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][56] = 127, [2][1][2][0][RTW89_THAILAND][0][56] = 127, [2][1][2][0][RTW89_FCC][1][63] = 22, - [2][1][2][0][RTW89_FCC][2][63] = 58, [2][1][2][0][RTW89_ETSI][1][63] = 127, [2][1][2][0][RTW89_ETSI][0][63] = 127, [2][1][2][0][RTW89_MKK][1][63] = 127, [2][1][2][0][RTW89_MKK][0][63] = 127, [2][1][2][0][RTW89_IC][1][63] = 22, - [2][1][2][0][RTW89_IC][2][63] = 58, [2][1][2][0][RTW89_KCC][1][63] = 38, [2][1][2][0][RTW89_KCC][0][63] = 127, [2][1][2][0][RTW89_ACMA][1][63] = 127, @@ -46800,13 +45430,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][63] = 127, [2][1][2][0][RTW89_THAILAND][0][63] = 127, [2][1][2][0][RTW89_FCC][1][71] = 20, - [2][1][2][0][RTW89_FCC][2][71] = 58, [2][1][2][0][RTW89_ETSI][1][71] = 127, [2][1][2][0][RTW89_ETSI][0][71] = 127, [2][1][2][0][RTW89_MKK][1][71] = 127, [2][1][2][0][RTW89_MKK][0][71] = 127, [2][1][2][0][RTW89_IC][1][71] = 20, - [2][1][2][0][RTW89_IC][2][71] = 58, [2][1][2][0][RTW89_KCC][1][71] = 38, [2][1][2][0][RTW89_KCC][0][71] = 127, [2][1][2][0][RTW89_ACMA][1][71] = 127, @@ -46819,13 +45447,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][71] = 127, [2][1][2][0][RTW89_THAILAND][0][71] = 127, [2][1][2][0][RTW89_FCC][1][78] = 20, - [2][1][2][0][RTW89_FCC][2][78] = 58, [2][1][2][0][RTW89_ETSI][1][78] = 127, [2][1][2][0][RTW89_ETSI][0][78] = 127, [2][1][2][0][RTW89_MKK][1][78] = 127, [2][1][2][0][RTW89_MKK][0][78] = 127, [2][1][2][0][RTW89_IC][1][78] = 20, - [2][1][2][0][RTW89_IC][2][78] = 58, [2][1][2][0][RTW89_KCC][1][78] = 38, [2][1][2][0][RTW89_KCC][0][78] = 127, [2][1][2][0][RTW89_ACMA][1][78] = 127, @@ -46838,13 +45464,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][78] = 127, [2][1][2][0][RTW89_THAILAND][0][78] = 127, [2][1][2][0][RTW89_FCC][1][86] = 20, - [2][1][2][0][RTW89_FCC][2][86] = 127, [2][1][2][0][RTW89_ETSI][1][86] = 127, [2][1][2][0][RTW89_ETSI][0][86] = 127, [2][1][2][0][RTW89_MKK][1][86] = 127, [2][1][2][0][RTW89_MKK][0][86] = 127, [2][1][2][0][RTW89_IC][1][86] = 20, - [2][1][2][0][RTW89_IC][2][86] = 127, [2][1][2][0][RTW89_KCC][1][86] = 38, [2][1][2][0][RTW89_KCC][0][86] = 127, [2][1][2][0][RTW89_ACMA][1][86] = 127, @@ -46857,13 +45481,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][86] = 127, [2][1][2][0][RTW89_THAILAND][0][86] = 127, [2][1][2][0][RTW89_FCC][1][93] = 22, - [2][1][2][0][RTW89_FCC][2][93] = 127, [2][1][2][0][RTW89_ETSI][1][93] = 127, [2][1][2][0][RTW89_ETSI][0][93] = 127, [2][1][2][0][RTW89_MKK][1][93] = 127, [2][1][2][0][RTW89_MKK][0][93] = 127, [2][1][2][0][RTW89_IC][1][93] = 22, - [2][1][2][0][RTW89_IC][2][93] = 127, [2][1][2][0][RTW89_KCC][1][93] = 38, [2][1][2][0][RTW89_KCC][0][93] = 127, [2][1][2][0][RTW89_ACMA][1][93] = 127, @@ -46876,13 +45498,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][93] = 127, [2][1][2][0][RTW89_THAILAND][0][93] = 127, [2][1][2][0][RTW89_FCC][1][101] = 22, - [2][1][2][0][RTW89_FCC][2][101] = 127, [2][1][2][0][RTW89_ETSI][1][101] = 127, [2][1][2][0][RTW89_ETSI][0][101] = 127, [2][1][2][0][RTW89_MKK][1][101] = 127, [2][1][2][0][RTW89_MKK][0][101] = 127, [2][1][2][0][RTW89_IC][1][101] = 22, - [2][1][2][0][RTW89_IC][2][101] = 127, [2][1][2][0][RTW89_KCC][1][101] = 38, [2][1][2][0][RTW89_KCC][0][101] = 127, [2][1][2][0][RTW89_ACMA][1][101] = 127, @@ -46895,13 +45515,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][101] = 127, [2][1][2][0][RTW89_THAILAND][0][101] = 127, [2][1][2][0][RTW89_FCC][1][108] = 127, - [2][1][2][0][RTW89_FCC][2][108] = 127, [2][1][2][0][RTW89_ETSI][1][108] = 127, [2][1][2][0][RTW89_ETSI][0][108] = 127, [2][1][2][0][RTW89_MKK][1][108] = 127, [2][1][2][0][RTW89_MKK][0][108] = 127, [2][1][2][0][RTW89_IC][1][108] = 127, - [2][1][2][0][RTW89_IC][2][108] = 127, [2][1][2][0][RTW89_KCC][1][108] = 127, [2][1][2][0][RTW89_KCC][0][108] = 127, [2][1][2][0][RTW89_ACMA][1][108] = 127, @@ -46914,13 +45532,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][108] = 127, [2][1][2][0][RTW89_THAILAND][0][108] = 127, [2][1][2][0][RTW89_FCC][1][116] = 127, - [2][1][2][0][RTW89_FCC][2][116] = 127, [2][1][2][0][RTW89_ETSI][1][116] = 127, [2][1][2][0][RTW89_ETSI][0][116] = 127, [2][1][2][0][RTW89_MKK][1][116] = 127, [2][1][2][0][RTW89_MKK][0][116] = 127, [2][1][2][0][RTW89_IC][1][116] = 127, - [2][1][2][0][RTW89_IC][2][116] = 127, [2][1][2][0][RTW89_KCC][1][116] = 127, [2][1][2][0][RTW89_KCC][0][116] = 127, [2][1][2][0][RTW89_ACMA][1][116] = 127, @@ -46933,13 +45549,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][0][RTW89_THAILAND][1][116] = 127, [2][1][2][0][RTW89_THAILAND][0][116] = 127, [2][1][2][1][RTW89_FCC][1][3] = 22, - [2][1][2][1][RTW89_FCC][2][3] = 50, [2][1][2][1][RTW89_ETSI][1][3] = 42, [2][1][2][1][RTW89_ETSI][0][3] = 6, [2][1][2][1][RTW89_MKK][1][3] = 52, [2][1][2][1][RTW89_MKK][0][3] = 14, [2][1][2][1][RTW89_IC][1][3] = 22, - [2][1][2][1][RTW89_IC][2][3] = 50, [2][1][2][1][RTW89_KCC][1][3] = 38, [2][1][2][1][RTW89_KCC][0][3] = 12, [2][1][2][1][RTW89_ACMA][1][3] = 42, @@ -46952,13 +45566,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][3] = 46, [2][1][2][1][RTW89_THAILAND][0][3] = 6, [2][1][2][1][RTW89_FCC][1][11] = 20, - [2][1][2][1][RTW89_FCC][2][11] = 50, [2][1][2][1][RTW89_ETSI][1][11] = 42, [2][1][2][1][RTW89_ETSI][0][11] = 6, [2][1][2][1][RTW89_MKK][1][11] = 52, [2][1][2][1][RTW89_MKK][0][11] = 12, [2][1][2][1][RTW89_IC][1][11] = 20, - [2][1][2][1][RTW89_IC][2][11] = 50, [2][1][2][1][RTW89_KCC][1][11] = 38, [2][1][2][1][RTW89_KCC][0][11] = 12, [2][1][2][1][RTW89_ACMA][1][11] = 42, @@ -46971,13 +45583,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][11] = 46, [2][1][2][1][RTW89_THAILAND][0][11] = 6, [2][1][2][1][RTW89_FCC][1][18] = 20, - [2][1][2][1][RTW89_FCC][2][18] = 50, [2][1][2][1][RTW89_ETSI][1][18] = 42, [2][1][2][1][RTW89_ETSI][0][18] = 6, [2][1][2][1][RTW89_MKK][1][18] = 52, [2][1][2][1][RTW89_MKK][0][18] = 12, [2][1][2][1][RTW89_IC][1][18] = 20, - [2][1][2][1][RTW89_IC][2][18] = 50, [2][1][2][1][RTW89_KCC][1][18] = 38, [2][1][2][1][RTW89_KCC][0][18] = 12, [2][1][2][1][RTW89_ACMA][1][18] = 42, @@ -46990,13 +45600,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][18] = 46, [2][1][2][1][RTW89_THAILAND][0][18] = 6, [2][1][2][1][RTW89_FCC][1][26] = 20, - [2][1][2][1][RTW89_FCC][2][26] = 60, [2][1][2][1][RTW89_ETSI][1][26] = 42, [2][1][2][1][RTW89_ETSI][0][26] = 6, [2][1][2][1][RTW89_MKK][1][26] = 52, [2][1][2][1][RTW89_MKK][0][26] = 12, [2][1][2][1][RTW89_IC][1][26] = 20, - [2][1][2][1][RTW89_IC][2][26] = 60, [2][1][2][1][RTW89_KCC][1][26] = 38, [2][1][2][1][RTW89_KCC][0][26] = 12, [2][1][2][1][RTW89_ACMA][1][26] = 42, @@ -47009,13 +45617,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][26] = 46, [2][1][2][1][RTW89_THAILAND][0][26] = 6, [2][1][2][1][RTW89_FCC][1][33] = 20, - [2][1][2][1][RTW89_FCC][2][33] = 60, [2][1][2][1][RTW89_ETSI][1][33] = 42, [2][1][2][1][RTW89_ETSI][0][33] = 6, [2][1][2][1][RTW89_MKK][1][33] = 48, [2][1][2][1][RTW89_MKK][0][33] = 12, [2][1][2][1][RTW89_IC][1][33] = 20, - [2][1][2][1][RTW89_IC][2][33] = 60, [2][1][2][1][RTW89_KCC][1][33] = 38, [2][1][2][1][RTW89_KCC][0][33] = 12, [2][1][2][1][RTW89_ACMA][1][33] = 42, @@ -47028,13 +45634,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][33] = 46, [2][1][2][1][RTW89_THAILAND][0][33] = 6, [2][1][2][1][RTW89_FCC][1][41] = 22, - [2][1][2][1][RTW89_FCC][2][41] = 60, [2][1][2][1][RTW89_ETSI][1][41] = 42, [2][1][2][1][RTW89_ETSI][0][41] = 6, [2][1][2][1][RTW89_MKK][1][41] = 48, [2][1][2][1][RTW89_MKK][0][41] = 12, [2][1][2][1][RTW89_IC][1][41] = 22, - [2][1][2][1][RTW89_IC][2][41] = 60, [2][1][2][1][RTW89_KCC][1][41] = 38, [2][1][2][1][RTW89_KCC][0][41] = 12, [2][1][2][1][RTW89_ACMA][1][41] = 42, @@ -47047,13 +45651,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][41] = 46, [2][1][2][1][RTW89_THAILAND][0][41] = 6, [2][1][2][1][RTW89_FCC][1][48] = 22, - [2][1][2][1][RTW89_FCC][2][48] = 127, [2][1][2][1][RTW89_ETSI][1][48] = 127, [2][1][2][1][RTW89_ETSI][0][48] = 127, [2][1][2][1][RTW89_MKK][1][48] = 127, [2][1][2][1][RTW89_MKK][0][48] = 127, [2][1][2][1][RTW89_IC][1][48] = 22, - [2][1][2][1][RTW89_IC][2][48] = 60, [2][1][2][1][RTW89_KCC][1][48] = 38, [2][1][2][1][RTW89_KCC][0][48] = 127, [2][1][2][1][RTW89_ACMA][1][48] = 127, @@ -47066,13 +45668,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][48] = 127, [2][1][2][1][RTW89_THAILAND][0][48] = 127, [2][1][2][1][RTW89_FCC][1][56] = 20, - [2][1][2][1][RTW89_FCC][2][56] = 127, [2][1][2][1][RTW89_ETSI][1][56] = 127, [2][1][2][1][RTW89_ETSI][0][56] = 127, [2][1][2][1][RTW89_MKK][1][56] = 127, [2][1][2][1][RTW89_MKK][0][56] = 127, [2][1][2][1][RTW89_IC][1][56] = 20, - [2][1][2][1][RTW89_IC][2][56] = 56, [2][1][2][1][RTW89_KCC][1][56] = 38, [2][1][2][1][RTW89_KCC][0][56] = 127, [2][1][2][1][RTW89_ACMA][1][56] = 127, @@ -47085,13 +45685,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][56] = 127, [2][1][2][1][RTW89_THAILAND][0][56] = 127, [2][1][2][1][RTW89_FCC][1][63] = 22, - [2][1][2][1][RTW89_FCC][2][63] = 58, [2][1][2][1][RTW89_ETSI][1][63] = 127, [2][1][2][1][RTW89_ETSI][0][63] = 127, [2][1][2][1][RTW89_MKK][1][63] = 127, [2][1][2][1][RTW89_MKK][0][63] = 127, [2][1][2][1][RTW89_IC][1][63] = 22, - [2][1][2][1][RTW89_IC][2][63] = 58, [2][1][2][1][RTW89_KCC][1][63] = 38, [2][1][2][1][RTW89_KCC][0][63] = 127, [2][1][2][1][RTW89_ACMA][1][63] = 127, @@ -47104,13 +45702,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][63] = 127, [2][1][2][1][RTW89_THAILAND][0][63] = 127, [2][1][2][1][RTW89_FCC][1][71] = 20, - [2][1][2][1][RTW89_FCC][2][71] = 58, [2][1][2][1][RTW89_ETSI][1][71] = 127, [2][1][2][1][RTW89_ETSI][0][71] = 127, [2][1][2][1][RTW89_MKK][1][71] = 127, [2][1][2][1][RTW89_MKK][0][71] = 127, [2][1][2][1][RTW89_IC][1][71] = 20, - [2][1][2][1][RTW89_IC][2][71] = 58, [2][1][2][1][RTW89_KCC][1][71] = 38, [2][1][2][1][RTW89_KCC][0][71] = 127, [2][1][2][1][RTW89_ACMA][1][71] = 127, @@ -47123,13 +45719,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][71] = 127, [2][1][2][1][RTW89_THAILAND][0][71] = 127, [2][1][2][1][RTW89_FCC][1][78] = 20, - [2][1][2][1][RTW89_FCC][2][78] = 58, [2][1][2][1][RTW89_ETSI][1][78] = 127, [2][1][2][1][RTW89_ETSI][0][78] = 127, [2][1][2][1][RTW89_MKK][1][78] = 127, [2][1][2][1][RTW89_MKK][0][78] = 127, [2][1][2][1][RTW89_IC][1][78] = 20, - [2][1][2][1][RTW89_IC][2][78] = 58, [2][1][2][1][RTW89_KCC][1][78] = 38, [2][1][2][1][RTW89_KCC][0][78] = 127, [2][1][2][1][RTW89_ACMA][1][78] = 127, @@ -47142,13 +45736,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][78] = 127, [2][1][2][1][RTW89_THAILAND][0][78] = 127, [2][1][2][1][RTW89_FCC][1][86] = 20, - [2][1][2][1][RTW89_FCC][2][86] = 127, [2][1][2][1][RTW89_ETSI][1][86] = 127, [2][1][2][1][RTW89_ETSI][0][86] = 127, [2][1][2][1][RTW89_MKK][1][86] = 127, [2][1][2][1][RTW89_MKK][0][86] = 127, [2][1][2][1][RTW89_IC][1][86] = 20, - [2][1][2][1][RTW89_IC][2][86] = 127, [2][1][2][1][RTW89_KCC][1][86] = 38, [2][1][2][1][RTW89_KCC][0][86] = 127, [2][1][2][1][RTW89_ACMA][1][86] = 127, @@ -47161,13 +45753,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][86] = 127, [2][1][2][1][RTW89_THAILAND][0][86] = 127, [2][1][2][1][RTW89_FCC][1][93] = 22, - [2][1][2][1][RTW89_FCC][2][93] = 127, [2][1][2][1][RTW89_ETSI][1][93] = 127, [2][1][2][1][RTW89_ETSI][0][93] = 127, [2][1][2][1][RTW89_MKK][1][93] = 127, [2][1][2][1][RTW89_MKK][0][93] = 127, [2][1][2][1][RTW89_IC][1][93] = 22, - [2][1][2][1][RTW89_IC][2][93] = 127, [2][1][2][1][RTW89_KCC][1][93] = 38, [2][1][2][1][RTW89_KCC][0][93] = 127, [2][1][2][1][RTW89_ACMA][1][93] = 127, @@ -47180,13 +45770,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][93] = 127, [2][1][2][1][RTW89_THAILAND][0][93] = 127, [2][1][2][1][RTW89_FCC][1][101] = 22, - [2][1][2][1][RTW89_FCC][2][101] = 127, [2][1][2][1][RTW89_ETSI][1][101] = 127, [2][1][2][1][RTW89_ETSI][0][101] = 127, [2][1][2][1][RTW89_MKK][1][101] = 127, [2][1][2][1][RTW89_MKK][0][101] = 127, [2][1][2][1][RTW89_IC][1][101] = 22, - [2][1][2][1][RTW89_IC][2][101] = 127, [2][1][2][1][RTW89_KCC][1][101] = 38, [2][1][2][1][RTW89_KCC][0][101] = 127, [2][1][2][1][RTW89_ACMA][1][101] = 127, @@ -47199,13 +45787,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][101] = 127, [2][1][2][1][RTW89_THAILAND][0][101] = 127, [2][1][2][1][RTW89_FCC][1][108] = 127, - [2][1][2][1][RTW89_FCC][2][108] = 127, [2][1][2][1][RTW89_ETSI][1][108] = 127, [2][1][2][1][RTW89_ETSI][0][108] = 127, [2][1][2][1][RTW89_MKK][1][108] = 127, [2][1][2][1][RTW89_MKK][0][108] = 127, [2][1][2][1][RTW89_IC][1][108] = 127, - [2][1][2][1][RTW89_IC][2][108] = 127, [2][1][2][1][RTW89_KCC][1][108] = 127, [2][1][2][1][RTW89_KCC][0][108] = 127, [2][1][2][1][RTW89_ACMA][1][108] = 127, @@ -47218,13 +45804,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][108] = 127, [2][1][2][1][RTW89_THAILAND][0][108] = 127, [2][1][2][1][RTW89_FCC][1][116] = 127, - [2][1][2][1][RTW89_FCC][2][116] = 127, [2][1][2][1][RTW89_ETSI][1][116] = 127, [2][1][2][1][RTW89_ETSI][0][116] = 127, [2][1][2][1][RTW89_MKK][1][116] = 127, [2][1][2][1][RTW89_MKK][0][116] = 127, [2][1][2][1][RTW89_IC][1][116] = 127, - [2][1][2][1][RTW89_IC][2][116] = 127, [2][1][2][1][RTW89_KCC][1][116] = 127, [2][1][2][1][RTW89_KCC][0][116] = 127, [2][1][2][1][RTW89_ACMA][1][116] = 127, @@ -47237,13 +45821,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [2][1][2][1][RTW89_THAILAND][1][116] = 127, [2][1][2][1][RTW89_THAILAND][0][116] = 127, [3][0][2][0][RTW89_FCC][1][7] = 52, - [3][0][2][0][RTW89_FCC][2][7] = 52, [3][0][2][0][RTW89_ETSI][1][7] = 50, [3][0][2][0][RTW89_ETSI][0][7] = 30, [3][0][2][0][RTW89_MKK][1][7] = 50, [3][0][2][0][RTW89_MKK][0][7] = 22, [3][0][2][0][RTW89_IC][1][7] = 52, - [3][0][2][0][RTW89_IC][2][7] = 52, [3][0][2][0][RTW89_KCC][1][7] = 42, [3][0][2][0][RTW89_KCC][0][7] = 24, [3][0][2][0][RTW89_ACMA][1][7] = 50, @@ -47256,13 +45838,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][7] = 50, [3][0][2][0][RTW89_THAILAND][0][7] = 30, [3][0][2][0][RTW89_FCC][1][22] = 52, - [3][0][2][0][RTW89_FCC][2][22] = 52, [3][0][2][0][RTW89_ETSI][1][22] = 50, [3][0][2][0][RTW89_ETSI][0][22] = 30, [3][0][2][0][RTW89_MKK][1][22] = 50, [3][0][2][0][RTW89_MKK][0][22] = 20, [3][0][2][0][RTW89_IC][1][22] = 52, - [3][0][2][0][RTW89_IC][2][22] = 52, [3][0][2][0][RTW89_KCC][1][22] = 42, [3][0][2][0][RTW89_KCC][0][22] = 24, [3][0][2][0][RTW89_ACMA][1][22] = 50, @@ -47275,13 +45855,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][22] = 50, [3][0][2][0][RTW89_THAILAND][0][22] = 30, [3][0][2][0][RTW89_FCC][1][37] = 52, - [3][0][2][0][RTW89_FCC][2][37] = 52, [3][0][2][0][RTW89_ETSI][1][37] = 50, [3][0][2][0][RTW89_ETSI][0][37] = 30, [3][0][2][0][RTW89_MKK][1][37] = 50, [3][0][2][0][RTW89_MKK][0][37] = 20, [3][0][2][0][RTW89_IC][1][37] = 52, - [3][0][2][0][RTW89_IC][2][37] = 52, [3][0][2][0][RTW89_KCC][1][37] = 42, [3][0][2][0][RTW89_KCC][0][37] = 24, [3][0][2][0][RTW89_ACMA][1][37] = 50, @@ -47294,13 +45872,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][37] = 50, [3][0][2][0][RTW89_THAILAND][0][37] = 30, [3][0][2][0][RTW89_FCC][1][52] = 54, - [3][0][2][0][RTW89_FCC][2][52] = 127, [3][0][2][0][RTW89_ETSI][1][52] = 127, [3][0][2][0][RTW89_ETSI][0][52] = 127, [3][0][2][0][RTW89_MKK][1][52] = 127, [3][0][2][0][RTW89_MKK][0][52] = 127, [3][0][2][0][RTW89_IC][1][52] = 54, - [3][0][2][0][RTW89_IC][2][52] = 56, [3][0][2][0][RTW89_KCC][1][52] = 56, [3][0][2][0][RTW89_KCC][0][52] = 127, [3][0][2][0][RTW89_ACMA][1][52] = 127, @@ -47313,13 +45889,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][52] = 127, [3][0][2][0][RTW89_THAILAND][0][52] = 127, [3][0][2][0][RTW89_FCC][1][67] = 54, - [3][0][2][0][RTW89_FCC][2][67] = 54, [3][0][2][0][RTW89_ETSI][1][67] = 127, [3][0][2][0][RTW89_ETSI][0][67] = 127, [3][0][2][0][RTW89_MKK][1][67] = 127, [3][0][2][0][RTW89_MKK][0][67] = 127, [3][0][2][0][RTW89_IC][1][67] = 54, - [3][0][2][0][RTW89_IC][2][67] = 54, [3][0][2][0][RTW89_KCC][1][67] = 54, [3][0][2][0][RTW89_KCC][0][67] = 127, [3][0][2][0][RTW89_ACMA][1][67] = 127, @@ -47332,13 +45906,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][67] = 127, [3][0][2][0][RTW89_THAILAND][0][67] = 127, [3][0][2][0][RTW89_FCC][1][82] = 46, - [3][0][2][0][RTW89_FCC][2][82] = 127, [3][0][2][0][RTW89_ETSI][1][82] = 127, [3][0][2][0][RTW89_ETSI][0][82] = 127, [3][0][2][0][RTW89_MKK][1][82] = 127, [3][0][2][0][RTW89_MKK][0][82] = 127, [3][0][2][0][RTW89_IC][1][82] = 46, - [3][0][2][0][RTW89_IC][2][82] = 127, [3][0][2][0][RTW89_KCC][1][82] = 26, [3][0][2][0][RTW89_KCC][0][82] = 127, [3][0][2][0][RTW89_ACMA][1][82] = 127, @@ -47351,13 +45923,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][82] = 127, [3][0][2][0][RTW89_THAILAND][0][82] = 127, [3][0][2][0][RTW89_FCC][1][97] = 40, - [3][0][2][0][RTW89_FCC][2][97] = 127, [3][0][2][0][RTW89_ETSI][1][97] = 127, [3][0][2][0][RTW89_ETSI][0][97] = 127, [3][0][2][0][RTW89_MKK][1][97] = 127, [3][0][2][0][RTW89_MKK][0][97] = 127, [3][0][2][0][RTW89_IC][1][97] = 40, - [3][0][2][0][RTW89_IC][2][97] = 127, [3][0][2][0][RTW89_KCC][1][97] = 26, [3][0][2][0][RTW89_KCC][0][97] = 127, [3][0][2][0][RTW89_ACMA][1][97] = 127, @@ -47370,13 +45940,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][97] = 127, [3][0][2][0][RTW89_THAILAND][0][97] = 127, [3][0][2][0][RTW89_FCC][1][112] = 127, - [3][0][2][0][RTW89_FCC][2][112] = 127, [3][0][2][0][RTW89_ETSI][1][112] = 127, [3][0][2][0][RTW89_ETSI][0][112] = 127, [3][0][2][0][RTW89_MKK][1][112] = 127, [3][0][2][0][RTW89_MKK][0][112] = 127, [3][0][2][0][RTW89_IC][1][112] = 127, - [3][0][2][0][RTW89_IC][2][112] = 127, [3][0][2][0][RTW89_KCC][1][112] = 127, [3][0][2][0][RTW89_KCC][0][112] = 127, [3][0][2][0][RTW89_ACMA][1][112] = 127, @@ -47389,13 +45957,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][0][2][0][RTW89_THAILAND][1][112] = 127, [3][0][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][0][RTW89_FCC][1][7] = 32, - [3][1][2][0][RTW89_FCC][2][7] = 46, [3][1][2][0][RTW89_ETSI][1][7] = 50, [3][1][2][0][RTW89_ETSI][0][7] = 18, [3][1][2][0][RTW89_MKK][1][7] = 38, [3][1][2][0][RTW89_MKK][0][7] = 10, [3][1][2][0][RTW89_IC][1][7] = 32, - [3][1][2][0][RTW89_IC][2][7] = 46, [3][1][2][0][RTW89_KCC][1][7] = 40, [3][1][2][0][RTW89_KCC][0][7] = 12, [3][1][2][0][RTW89_ACMA][1][7] = 50, @@ -47408,13 +45974,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][7] = 46, [3][1][2][0][RTW89_THAILAND][0][7] = 18, [3][1][2][0][RTW89_FCC][1][22] = 30, - [3][1][2][0][RTW89_FCC][2][22] = 52, [3][1][2][0][RTW89_ETSI][1][22] = 46, [3][1][2][0][RTW89_ETSI][0][22] = 16, [3][1][2][0][RTW89_MKK][1][22] = 48, [3][1][2][0][RTW89_MKK][0][22] = 8, [3][1][2][0][RTW89_IC][1][22] = 30, - [3][1][2][0][RTW89_IC][2][22] = 52, [3][1][2][0][RTW89_KCC][1][22] = 40, [3][1][2][0][RTW89_KCC][0][22] = 12, [3][1][2][0][RTW89_ACMA][1][22] = 46, @@ -47427,13 +45991,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][22] = 46, [3][1][2][0][RTW89_THAILAND][0][22] = 18, [3][1][2][0][RTW89_FCC][1][37] = 30, - [3][1][2][0][RTW89_FCC][2][37] = 52, [3][1][2][0][RTW89_ETSI][1][37] = 46, [3][1][2][0][RTW89_ETSI][0][37] = 16, [3][1][2][0][RTW89_MKK][1][37] = 48, [3][1][2][0][RTW89_MKK][0][37] = 8, [3][1][2][0][RTW89_IC][1][37] = 30, - [3][1][2][0][RTW89_IC][2][37] = 52, [3][1][2][0][RTW89_KCC][1][37] = 40, [3][1][2][0][RTW89_KCC][0][37] = 12, [3][1][2][0][RTW89_ACMA][1][37] = 46, @@ -47446,13 +46008,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][37] = 46, [3][1][2][0][RTW89_THAILAND][0][37] = 18, [3][1][2][0][RTW89_FCC][1][52] = 30, - [3][1][2][0][RTW89_FCC][2][52] = 127, [3][1][2][0][RTW89_ETSI][1][52] = 127, [3][1][2][0][RTW89_ETSI][0][52] = 127, [3][1][2][0][RTW89_MKK][1][52] = 127, [3][1][2][0][RTW89_MKK][0][52] = 127, [3][1][2][0][RTW89_IC][1][52] = 30, - [3][1][2][0][RTW89_IC][2][52] = 56, [3][1][2][0][RTW89_KCC][1][52] = 48, [3][1][2][0][RTW89_KCC][0][52] = 127, [3][1][2][0][RTW89_ACMA][1][52] = 127, @@ -47465,13 +46025,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][52] = 127, [3][1][2][0][RTW89_THAILAND][0][52] = 127, [3][1][2][0][RTW89_FCC][1][67] = 32, - [3][1][2][0][RTW89_FCC][2][67] = 54, [3][1][2][0][RTW89_ETSI][1][67] = 127, [3][1][2][0][RTW89_ETSI][0][67] = 127, [3][1][2][0][RTW89_MKK][1][67] = 127, [3][1][2][0][RTW89_MKK][0][67] = 127, [3][1][2][0][RTW89_IC][1][67] = 32, - [3][1][2][0][RTW89_IC][2][67] = 54, [3][1][2][0][RTW89_KCC][1][67] = 48, [3][1][2][0][RTW89_KCC][0][67] = 127, [3][1][2][0][RTW89_ACMA][1][67] = 127, @@ -47484,13 +46042,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][67] = 127, [3][1][2][0][RTW89_THAILAND][0][67] = 127, [3][1][2][0][RTW89_FCC][1][82] = 32, - [3][1][2][0][RTW89_FCC][2][82] = 127, [3][1][2][0][RTW89_ETSI][1][82] = 127, [3][1][2][0][RTW89_ETSI][0][82] = 127, [3][1][2][0][RTW89_MKK][1][82] = 127, [3][1][2][0][RTW89_MKK][0][82] = 127, [3][1][2][0][RTW89_IC][1][82] = 32, - [3][1][2][0][RTW89_IC][2][82] = 127, [3][1][2][0][RTW89_KCC][1][82] = 24, [3][1][2][0][RTW89_KCC][0][82] = 127, [3][1][2][0][RTW89_ACMA][1][82] = 127, @@ -47503,13 +46059,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][82] = 127, [3][1][2][0][RTW89_THAILAND][0][82] = 127, [3][1][2][0][RTW89_FCC][1][97] = 32, - [3][1][2][0][RTW89_FCC][2][97] = 127, [3][1][2][0][RTW89_ETSI][1][97] = 127, [3][1][2][0][RTW89_ETSI][0][97] = 127, [3][1][2][0][RTW89_MKK][1][97] = 127, [3][1][2][0][RTW89_MKK][0][97] = 127, [3][1][2][0][RTW89_IC][1][97] = 32, - [3][1][2][0][RTW89_IC][2][97] = 127, [3][1][2][0][RTW89_KCC][1][97] = 24, [3][1][2][0][RTW89_KCC][0][97] = 127, [3][1][2][0][RTW89_ACMA][1][97] = 127, @@ -47522,13 +46076,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][97] = 127, [3][1][2][0][RTW89_THAILAND][0][97] = 127, [3][1][2][0][RTW89_FCC][1][112] = 127, - [3][1][2][0][RTW89_FCC][2][112] = 127, [3][1][2][0][RTW89_ETSI][1][112] = 127, [3][1][2][0][RTW89_ETSI][0][112] = 127, [3][1][2][0][RTW89_MKK][1][112] = 127, [3][1][2][0][RTW89_MKK][0][112] = 127, [3][1][2][0][RTW89_IC][1][112] = 127, - [3][1][2][0][RTW89_IC][2][112] = 127, [3][1][2][0][RTW89_KCC][1][112] = 127, [3][1][2][0][RTW89_KCC][0][112] = 127, [3][1][2][0][RTW89_ACMA][1][112] = 127, @@ -47541,13 +46093,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][0][RTW89_THAILAND][1][112] = 127, [3][1][2][0][RTW89_THAILAND][0][112] = 127, [3][1][2][1][RTW89_FCC][1][7] = 32, - [3][1][2][1][RTW89_FCC][2][7] = 46, [3][1][2][1][RTW89_ETSI][1][7] = 42, [3][1][2][1][RTW89_ETSI][0][7] = 6, [3][1][2][1][RTW89_MKK][1][7] = 38, [3][1][2][1][RTW89_MKK][0][7] = 10, [3][1][2][1][RTW89_IC][1][7] = 32, - [3][1][2][1][RTW89_IC][2][7] = 46, [3][1][2][1][RTW89_KCC][1][7] = 40, [3][1][2][1][RTW89_KCC][0][7] = 12, [3][1][2][1][RTW89_ACMA][1][7] = 42, @@ -47560,13 +46110,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][7] = 46, [3][1][2][1][RTW89_THAILAND][0][7] = 6, [3][1][2][1][RTW89_FCC][1][22] = 30, - [3][1][2][1][RTW89_FCC][2][22] = 52, [3][1][2][1][RTW89_ETSI][1][22] = 42, [3][1][2][1][RTW89_ETSI][0][22] = 6, [3][1][2][1][RTW89_MKK][1][22] = 48, [3][1][2][1][RTW89_MKK][0][22] = 8, [3][1][2][1][RTW89_IC][1][22] = 30, - [3][1][2][1][RTW89_IC][2][22] = 52, [3][1][2][1][RTW89_KCC][1][22] = 40, [3][1][2][1][RTW89_KCC][0][22] = 12, [3][1][2][1][RTW89_ACMA][1][22] = 42, @@ -47579,13 +46127,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][22] = 46, [3][1][2][1][RTW89_THAILAND][0][22] = 6, [3][1][2][1][RTW89_FCC][1][37] = 30, - [3][1][2][1][RTW89_FCC][2][37] = 52, [3][1][2][1][RTW89_ETSI][1][37] = 42, [3][1][2][1][RTW89_ETSI][0][37] = 6, [3][1][2][1][RTW89_MKK][1][37] = 48, [3][1][2][1][RTW89_MKK][0][37] = 8, [3][1][2][1][RTW89_IC][1][37] = 30, - [3][1][2][1][RTW89_IC][2][37] = 52, [3][1][2][1][RTW89_KCC][1][37] = 40, [3][1][2][1][RTW89_KCC][0][37] = 12, [3][1][2][1][RTW89_ACMA][1][37] = 42, @@ -47598,13 +46144,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][37] = 46, [3][1][2][1][RTW89_THAILAND][0][37] = 6, [3][1][2][1][RTW89_FCC][1][52] = 30, - [3][1][2][1][RTW89_FCC][2][52] = 127, [3][1][2][1][RTW89_ETSI][1][52] = 127, [3][1][2][1][RTW89_ETSI][0][52] = 127, [3][1][2][1][RTW89_MKK][1][52] = 127, [3][1][2][1][RTW89_MKK][0][52] = 127, [3][1][2][1][RTW89_IC][1][52] = 30, - [3][1][2][1][RTW89_IC][2][52] = 56, [3][1][2][1][RTW89_KCC][1][52] = 48, [3][1][2][1][RTW89_KCC][0][52] = 127, [3][1][2][1][RTW89_ACMA][1][52] = 127, @@ -47617,13 +46161,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][52] = 127, [3][1][2][1][RTW89_THAILAND][0][52] = 127, [3][1][2][1][RTW89_FCC][1][67] = 32, - [3][1][2][1][RTW89_FCC][2][67] = 54, [3][1][2][1][RTW89_ETSI][1][67] = 127, [3][1][2][1][RTW89_ETSI][0][67] = 127, [3][1][2][1][RTW89_MKK][1][67] = 127, [3][1][2][1][RTW89_MKK][0][67] = 127, [3][1][2][1][RTW89_IC][1][67] = 32, - [3][1][2][1][RTW89_IC][2][67] = 54, [3][1][2][1][RTW89_KCC][1][67] = 48, [3][1][2][1][RTW89_KCC][0][67] = 127, [3][1][2][1][RTW89_ACMA][1][67] = 127, @@ -47636,13 +46178,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][67] = 127, [3][1][2][1][RTW89_THAILAND][0][67] = 127, [3][1][2][1][RTW89_FCC][1][82] = 32, - [3][1][2][1][RTW89_FCC][2][82] = 127, [3][1][2][1][RTW89_ETSI][1][82] = 127, [3][1][2][1][RTW89_ETSI][0][82] = 127, [3][1][2][1][RTW89_MKK][1][82] = 127, [3][1][2][1][RTW89_MKK][0][82] = 127, [3][1][2][1][RTW89_IC][1][82] = 32, - [3][1][2][1][RTW89_IC][2][82] = 127, [3][1][2][1][RTW89_KCC][1][82] = 24, [3][1][2][1][RTW89_KCC][0][82] = 127, [3][1][2][1][RTW89_ACMA][1][82] = 127, @@ -47655,13 +46195,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][82] = 127, [3][1][2][1][RTW89_THAILAND][0][82] = 127, [3][1][2][1][RTW89_FCC][1][97] = 32, - [3][1][2][1][RTW89_FCC][2][97] = 127, [3][1][2][1][RTW89_ETSI][1][97] = 127, [3][1][2][1][RTW89_ETSI][0][97] = 127, [3][1][2][1][RTW89_MKK][1][97] = 127, [3][1][2][1][RTW89_MKK][0][97] = 127, [3][1][2][1][RTW89_IC][1][97] = 32, - [3][1][2][1][RTW89_IC][2][97] = 127, [3][1][2][1][RTW89_KCC][1][97] = 24, [3][1][2][1][RTW89_KCC][0][97] = 127, [3][1][2][1][RTW89_ACMA][1][97] = 127, @@ -47674,13 +46212,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM] [3][1][2][1][RTW89_THAILAND][1][97] = 127, [3][1][2][1][RTW89_THAILAND][0][97] = 127, [3][1][2][1][RTW89_FCC][1][112] = 127, - [3][1][2][1][RTW89_FCC][2][112] = 127, [3][1][2][1][RTW89_ETSI][1][112] = 127, [3][1][2][1][RTW89_ETSI][0][112] = 127, [3][1][2][1][RTW89_MKK][1][112] = 127, [3][1][2][1][RTW89_MKK][0][112] = 127, [3][1][2][1][RTW89_IC][1][112] = 127, - [3][1][2][1][RTW89_IC][2][112] = 127, [3][1][2][1][RTW89_KCC][1][112] = 127, [3][1][2][1][RTW89_KCC][0][112] = 127, [3][1][2][1][RTW89_ACMA][1][112] = 127, @@ -49374,7 +47910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_FCC][48] = 46, [0][0][RTW89_ETSI][48] = 127, [0][0][RTW89_MKK][48] = 127, - [0][0][RTW89_IC][48] = 127, + [0][0][RTW89_IC][48] = 46, [0][0][RTW89_KCC][48] = 127, [0][0][RTW89_ACMA][48] = 127, [0][0][RTW89_CN][48] = 127, @@ -49387,7 +47923,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_FCC][50] = 44, [0][0][RTW89_ETSI][50] = 127, [0][0][RTW89_MKK][50] = 127, - [0][0][RTW89_IC][50] = 127, + [0][0][RTW89_IC][50] = 44, [0][0][RTW89_KCC][50] = 127, [0][0][RTW89_ACMA][50] = 127, [0][0][RTW89_CN][50] = 127, @@ -49400,7 +47936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_FCC][52] = 34, [0][0][RTW89_ETSI][52] = 127, [0][0][RTW89_MKK][52] = 127, - [0][0][RTW89_IC][52] = 127, + [0][0][RTW89_IC][52] = 34, [0][0][RTW89_KCC][52] = 127, [0][0][RTW89_ACMA][52] = 127, [0][0][RTW89_CN][52] = 127, @@ -49738,7 +48274,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][48] = 20, [0][1][RTW89_ETSI][48] = 127, [0][1][RTW89_MKK][48] = 127, - [0][1][RTW89_IC][48] = 127, + [0][1][RTW89_IC][48] = 20, [0][1][RTW89_KCC][48] = 127, [0][1][RTW89_ACMA][48] = 127, [0][1][RTW89_CN][48] = 127, @@ -49751,7 +48287,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][50] = 20, [0][1][RTW89_ETSI][50] = 127, [0][1][RTW89_MKK][50] = 127, - [0][1][RTW89_IC][50] = 127, + [0][1][RTW89_IC][50] = 20, [0][1][RTW89_KCC][50] = 127, [0][1][RTW89_ACMA][50] = 127, [0][1][RTW89_CN][50] = 127, @@ -49764,7 +48300,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_FCC][52] = 8, [0][1][RTW89_ETSI][52] = 127, [0][1][RTW89_MKK][52] = 127, - [0][1][RTW89_IC][52] = 127, + [0][1][RTW89_IC][52] = 8, [0][1][RTW89_KCC][52] = 127, [0][1][RTW89_ACMA][52] = 127, [0][1][RTW89_CN][52] = 127, @@ -50102,7 +48638,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_FCC][48] = 56, [1][0][RTW89_ETSI][48] = 127, [1][0][RTW89_MKK][48] = 127, - [1][0][RTW89_IC][48] = 127, + [1][0][RTW89_IC][48] = 56, [1][0][RTW89_KCC][48] = 127, [1][0][RTW89_ACMA][48] = 127, [1][0][RTW89_CN][48] = 127, @@ -50115,7 +48651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_FCC][50] = 58, [1][0][RTW89_ETSI][50] = 127, [1][0][RTW89_MKK][50] = 127, - [1][0][RTW89_IC][50] = 127, + [1][0][RTW89_IC][50] = 58, [1][0][RTW89_KCC][50] = 127, [1][0][RTW89_ACMA][50] = 127, [1][0][RTW89_CN][50] = 127, @@ -50128,7 +48664,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_FCC][52] = 56, [1][0][RTW89_ETSI][52] = 127, [1][0][RTW89_MKK][52] = 127, - [1][0][RTW89_IC][52] = 127, + [1][0][RTW89_IC][52] = 56, [1][0][RTW89_KCC][52] = 127, [1][0][RTW89_ACMA][52] = 127, [1][0][RTW89_CN][52] = 127, @@ -50466,7 +49002,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_FCC][48] = 34, [1][1][RTW89_ETSI][48] = 127, [1][1][RTW89_MKK][48] = 127, - [1][1][RTW89_IC][48] = 127, + [1][1][RTW89_IC][48] = 34, [1][1][RTW89_KCC][48] = 127, [1][1][RTW89_ACMA][48] = 127, [1][1][RTW89_CN][48] = 127, @@ -50479,7 +49015,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_FCC][50] = 34, [1][1][RTW89_ETSI][50] = 127, [1][1][RTW89_MKK][50] = 127, - [1][1][RTW89_IC][50] = 127, + [1][1][RTW89_IC][50] = 34, [1][1][RTW89_KCC][50] = 127, [1][1][RTW89_ACMA][50] = 127, [1][1][RTW89_CN][50] = 127, @@ -50492,7 +49028,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_FCC][52] = 30, [1][1][RTW89_ETSI][52] = 127, [1][1][RTW89_MKK][52] = 127, - [1][1][RTW89_IC][52] = 127, + [1][1][RTW89_IC][52] = 30, [1][1][RTW89_KCC][52] = 127, [1][1][RTW89_ACMA][52] = 127, [1][1][RTW89_CN][52] = 127, @@ -50830,7 +49366,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_FCC][48] = 64, [2][0][RTW89_ETSI][48] = 127, [2][0][RTW89_MKK][48] = 127, - [2][0][RTW89_IC][48] = 127, + [2][0][RTW89_IC][48] = 64, [2][0][RTW89_KCC][48] = 127, [2][0][RTW89_ACMA][48] = 127, [2][0][RTW89_CN][48] = 127, @@ -50843,7 +49379,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_FCC][50] = 64, [2][0][RTW89_ETSI][50] = 127, [2][0][RTW89_MKK][50] = 127, - [2][0][RTW89_IC][50] = 127, + [2][0][RTW89_IC][50] = 64, [2][0][RTW89_KCC][50] = 127, [2][0][RTW89_ACMA][50] = 127, [2][0][RTW89_CN][50] = 127, @@ -50856,7 +49392,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_FCC][52] = 64, [2][0][RTW89_ETSI][52] = 127, [2][0][RTW89_MKK][52] = 127, - [2][0][RTW89_IC][52] = 127, + [2][0][RTW89_IC][52] = 64, [2][0][RTW89_KCC][52] = 127, [2][0][RTW89_ACMA][52] = 127, [2][0][RTW89_CN][52] = 127, @@ -51194,7 +49730,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_FCC][48] = 40, [2][1][RTW89_ETSI][48] = 127, [2][1][RTW89_MKK][48] = 127, - [2][1][RTW89_IC][48] = 127, + [2][1][RTW89_IC][48] = 40, [2][1][RTW89_KCC][48] = 127, [2][1][RTW89_ACMA][48] = 127, [2][1][RTW89_CN][48] = 127, @@ -51207,7 +49743,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_FCC][50] = 40, [2][1][RTW89_ETSI][50] = 127, [2][1][RTW89_MKK][50] = 127, - [2][1][RTW89_IC][50] = 127, + [2][1][RTW89_IC][50] = 40, [2][1][RTW89_KCC][50] = 127, [2][1][RTW89_ACMA][50] = 127, [2][1][RTW89_CN][50] = 127, @@ -51220,7 +49756,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_FCC][52] = 40, [2][1][RTW89_ETSI][52] = 127, [2][1][RTW89_MKK][52] = 127, - [2][1][RTW89_IC][52] = 127, + [2][1][RTW89_IC][52] = 40, [2][1][RTW89_KCC][52] = 127, [2][1][RTW89_ACMA][52] = 127, [2][1][RTW89_CN][52] = 127, @@ -51238,1164 +49774,778 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [RTW89_6G_CH_NUM] = { [0][0][RTW89_WW][0][0] = -16, [0][0][RTW89_WW][1][0] = -16, - [0][0][RTW89_WW][2][0] = 44, [0][0][RTW89_WW][0][2] = -18, [0][0][RTW89_WW][1][2] = -18, - [0][0][RTW89_WW][2][2] = 44, [0][0][RTW89_WW][0][4] = -18, [0][0][RTW89_WW][1][4] = -18, - [0][0][RTW89_WW][2][4] = 44, [0][0][RTW89_WW][0][6] = -18, [0][0][RTW89_WW][1][6] = -18, - [0][0][RTW89_WW][2][6] = 44, [0][0][RTW89_WW][0][8] = -18, [0][0][RTW89_WW][1][8] = -18, - [0][0][RTW89_WW][2][8] = 44, [0][0][RTW89_WW][0][10] = -18, [0][0][RTW89_WW][1][10] = -18, - [0][0][RTW89_WW][2][10] = 44, [0][0][RTW89_WW][0][12] = -18, [0][0][RTW89_WW][1][12] = -18, - [0][0][RTW89_WW][2][12] = 44, [0][0][RTW89_WW][0][14] = -18, [0][0][RTW89_WW][1][14] = -18, - [0][0][RTW89_WW][2][14] = 44, [0][0][RTW89_WW][0][15] = -18, [0][0][RTW89_WW][1][15] = -18, - [0][0][RTW89_WW][2][15] = 44, [0][0][RTW89_WW][0][17] = -18, [0][0][RTW89_WW][1][17] = -18, - [0][0][RTW89_WW][2][17] = 44, [0][0][RTW89_WW][0][19] = -18, [0][0][RTW89_WW][1][19] = -18, - [0][0][RTW89_WW][2][19] = 44, [0][0][RTW89_WW][0][21] = -18, [0][0][RTW89_WW][1][21] = -18, - [0][0][RTW89_WW][2][21] = 44, [0][0][RTW89_WW][0][23] = -18, [0][0][RTW89_WW][1][23] = -18, - [0][0][RTW89_WW][2][23] = 54, [0][0][RTW89_WW][0][25] = -18, [0][0][RTW89_WW][1][25] = -18, - [0][0][RTW89_WW][2][25] = 54, [0][0][RTW89_WW][0][27] = -18, [0][0][RTW89_WW][1][27] = -18, - [0][0][RTW89_WW][2][27] = 54, [0][0][RTW89_WW][0][29] = -18, [0][0][RTW89_WW][1][29] = -18, - [0][0][RTW89_WW][2][29] = 54, [0][0][RTW89_WW][0][30] = -18, [0][0][RTW89_WW][1][30] = -18, - [0][0][RTW89_WW][2][30] = 54, [0][0][RTW89_WW][0][32] = -18, [0][0][RTW89_WW][1][32] = -18, - [0][0][RTW89_WW][2][32] = 54, [0][0][RTW89_WW][0][34] = -18, [0][0][RTW89_WW][1][34] = -18, - [0][0][RTW89_WW][2][34] = 54, [0][0][RTW89_WW][0][36] = -18, [0][0][RTW89_WW][1][36] = -18, - [0][0][RTW89_WW][2][36] = 54, [0][0][RTW89_WW][0][38] = -18, [0][0][RTW89_WW][1][38] = -18, - [0][0][RTW89_WW][2][38] = 54, [0][0][RTW89_WW][0][40] = -18, [0][0][RTW89_WW][1][40] = -18, - [0][0][RTW89_WW][2][40] = 54, [0][0][RTW89_WW][0][42] = -18, [0][0][RTW89_WW][1][42] = -18, - [0][0][RTW89_WW][2][42] = 54, [0][0][RTW89_WW][0][44] = -16, [0][0][RTW89_WW][1][44] = -16, - [0][0][RTW89_WW][2][44] = 56, [0][0][RTW89_WW][0][45] = -16, [0][0][RTW89_WW][1][45] = -16, - [0][0][RTW89_WW][2][45] = 56, [0][0][RTW89_WW][0][47] = -18, [0][0][RTW89_WW][1][47] = -18, - [0][0][RTW89_WW][2][47] = 56, [0][0][RTW89_WW][0][49] = -18, [0][0][RTW89_WW][1][49] = -18, - [0][0][RTW89_WW][2][49] = 56, [0][0][RTW89_WW][0][51] = -18, [0][0][RTW89_WW][1][51] = -18, - [0][0][RTW89_WW][2][51] = 56, [0][0][RTW89_WW][0][53] = -16, [0][0][RTW89_WW][1][53] = -16, - [0][0][RTW89_WW][2][53] = 56, [0][0][RTW89_WW][0][55] = -18, [0][0][RTW89_WW][1][55] = -18, - [0][0][RTW89_WW][2][55] = 56, [0][0][RTW89_WW][0][57] = -18, [0][0][RTW89_WW][1][57] = -18, - [0][0][RTW89_WW][2][57] = 56, [0][0][RTW89_WW][0][59] = -18, [0][0][RTW89_WW][1][59] = -18, - [0][0][RTW89_WW][2][59] = 56, [0][0][RTW89_WW][0][60] = -18, [0][0][RTW89_WW][1][60] = -18, - [0][0][RTW89_WW][2][60] = 56, [0][0][RTW89_WW][0][62] = -18, [0][0][RTW89_WW][1][62] = -18, - [0][0][RTW89_WW][2][62] = 56, [0][0][RTW89_WW][0][64] = -18, [0][0][RTW89_WW][1][64] = -18, - [0][0][RTW89_WW][2][64] = 56, [0][0][RTW89_WW][0][66] = -18, [0][0][RTW89_WW][1][66] = -18, - [0][0][RTW89_WW][2][66] = 56, [0][0][RTW89_WW][0][68] = -18, [0][0][RTW89_WW][1][68] = -18, - [0][0][RTW89_WW][2][68] = 56, [0][0][RTW89_WW][0][70] = -16, [0][0][RTW89_WW][1][70] = -16, - [0][0][RTW89_WW][2][70] = 56, [0][0][RTW89_WW][0][72] = -18, [0][0][RTW89_WW][1][72] = -18, - [0][0][RTW89_WW][2][72] = 56, [0][0][RTW89_WW][0][74] = -18, [0][0][RTW89_WW][1][74] = -18, - [0][0][RTW89_WW][2][74] = 56, [0][0][RTW89_WW][0][75] = -18, [0][0][RTW89_WW][1][75] = -18, - [0][0][RTW89_WW][2][75] = 56, [0][0][RTW89_WW][0][77] = -18, [0][0][RTW89_WW][1][77] = -18, - [0][0][RTW89_WW][2][77] = 56, [0][0][RTW89_WW][0][79] = -18, [0][0][RTW89_WW][1][79] = -18, - [0][0][RTW89_WW][2][79] = 56, [0][0][RTW89_WW][0][81] = -18, [0][0][RTW89_WW][1][81] = -18, - [0][0][RTW89_WW][2][81] = 56, [0][0][RTW89_WW][0][83] = -18, [0][0][RTW89_WW][1][83] = -18, - [0][0][RTW89_WW][2][83] = 56, [0][0][RTW89_WW][0][85] = -18, [0][0][RTW89_WW][1][85] = -18, - [0][0][RTW89_WW][2][85] = 56, [0][0][RTW89_WW][0][87] = -16, [0][0][RTW89_WW][1][87] = -16, - [0][0][RTW89_WW][2][87] = 0, [0][0][RTW89_WW][0][89] = -16, [0][0][RTW89_WW][1][89] = -16, - [0][0][RTW89_WW][2][89] = 0, [0][0][RTW89_WW][0][90] = -16, [0][0][RTW89_WW][1][90] = -16, - [0][0][RTW89_WW][2][90] = 0, [0][0][RTW89_WW][0][92] = -16, [0][0][RTW89_WW][1][92] = -16, - [0][0][RTW89_WW][2][92] = 0, [0][0][RTW89_WW][0][94] = -16, [0][0][RTW89_WW][1][94] = -16, - [0][0][RTW89_WW][2][94] = 0, [0][0][RTW89_WW][0][96] = -16, [0][0][RTW89_WW][1][96] = -16, - [0][0][RTW89_WW][2][96] = 0, [0][0][RTW89_WW][0][98] = -16, [0][0][RTW89_WW][1][98] = -16, - [0][0][RTW89_WW][2][98] = 0, [0][0][RTW89_WW][0][100] = -16, [0][0][RTW89_WW][1][100] = -16, - [0][0][RTW89_WW][2][100] = 0, [0][0][RTW89_WW][0][102] = -16, [0][0][RTW89_WW][1][102] = -16, - [0][0][RTW89_WW][2][102] = 0, [0][0][RTW89_WW][0][104] = -16, [0][0][RTW89_WW][1][104] = -16, - [0][0][RTW89_WW][2][104] = 0, [0][0][RTW89_WW][0][105] = -16, [0][0][RTW89_WW][1][105] = -16, - [0][0][RTW89_WW][2][105] = 0, [0][0][RTW89_WW][0][107] = -12, [0][0][RTW89_WW][1][107] = -12, - [0][0][RTW89_WW][2][107] = 0, [0][0][RTW89_WW][0][109] = -12, [0][0][RTW89_WW][1][109] = -12, - [0][0][RTW89_WW][2][109] = 0, [0][0][RTW89_WW][0][111] = 0, [0][0][RTW89_WW][1][111] = 0, - [0][0][RTW89_WW][2][111] = 0, [0][0][RTW89_WW][0][113] = 0, [0][0][RTW89_WW][1][113] = 0, - [0][0][RTW89_WW][2][113] = 0, [0][0][RTW89_WW][0][115] = 0, [0][0][RTW89_WW][1][115] = 0, - [0][0][RTW89_WW][2][115] = 0, [0][0][RTW89_WW][0][117] = 0, [0][0][RTW89_WW][1][117] = 0, - [0][0][RTW89_WW][2][117] = 0, [0][0][RTW89_WW][0][119] = 0, [0][0][RTW89_WW][1][119] = 0, - [0][0][RTW89_WW][2][119] = 0, [0][1][RTW89_WW][0][0] = -40, [0][1][RTW89_WW][1][0] = -40, - [0][1][RTW89_WW][2][0] = 32, [0][1][RTW89_WW][0][2] = -40, [0][1][RTW89_WW][1][2] = -40, - [0][1][RTW89_WW][2][2] = 32, [0][1][RTW89_WW][0][4] = -40, [0][1][RTW89_WW][1][4] = -40, - [0][1][RTW89_WW][2][4] = 32, [0][1][RTW89_WW][0][6] = -40, [0][1][RTW89_WW][1][6] = -40, - [0][1][RTW89_WW][2][6] = 32, [0][1][RTW89_WW][0][8] = -40, [0][1][RTW89_WW][1][8] = -40, - [0][1][RTW89_WW][2][8] = 32, [0][1][RTW89_WW][0][10] = -40, [0][1][RTW89_WW][1][10] = -40, - [0][1][RTW89_WW][2][10] = 32, [0][1][RTW89_WW][0][12] = -40, [0][1][RTW89_WW][1][12] = -40, - [0][1][RTW89_WW][2][12] = 32, [0][1][RTW89_WW][0][14] = -40, [0][1][RTW89_WW][1][14] = -40, - [0][1][RTW89_WW][2][14] = 32, [0][1][RTW89_WW][0][15] = -40, [0][1][RTW89_WW][1][15] = -40, - [0][1][RTW89_WW][2][15] = 32, [0][1][RTW89_WW][0][17] = -40, [0][1][RTW89_WW][1][17] = -40, - [0][1][RTW89_WW][2][17] = 32, [0][1][RTW89_WW][0][19] = -40, [0][1][RTW89_WW][1][19] = -40, - [0][1][RTW89_WW][2][19] = 32, [0][1][RTW89_WW][0][21] = -40, [0][1][RTW89_WW][1][21] = -40, - [0][1][RTW89_WW][2][21] = 32, [0][1][RTW89_WW][0][23] = -40, [0][1][RTW89_WW][1][23] = -40, - [0][1][RTW89_WW][2][23] = 32, [0][1][RTW89_WW][0][25] = -40, [0][1][RTW89_WW][1][25] = -40, - [0][1][RTW89_WW][2][25] = 32, [0][1][RTW89_WW][0][27] = -40, [0][1][RTW89_WW][1][27] = -40, - [0][1][RTW89_WW][2][27] = 32, [0][1][RTW89_WW][0][29] = -40, [0][1][RTW89_WW][1][29] = -40, - [0][1][RTW89_WW][2][29] = 32, [0][1][RTW89_WW][0][30] = -40, [0][1][RTW89_WW][1][30] = -40, - [0][1][RTW89_WW][2][30] = 32, [0][1][RTW89_WW][0][32] = -40, [0][1][RTW89_WW][1][32] = -40, - [0][1][RTW89_WW][2][32] = 32, [0][1][RTW89_WW][0][34] = -40, [0][1][RTW89_WW][1][34] = -40, - [0][1][RTW89_WW][2][34] = 32, [0][1][RTW89_WW][0][36] = -40, [0][1][RTW89_WW][1][36] = -40, - [0][1][RTW89_WW][2][36] = 32, [0][1][RTW89_WW][0][38] = -40, [0][1][RTW89_WW][1][38] = -40, - [0][1][RTW89_WW][2][38] = 32, [0][1][RTW89_WW][0][40] = -40, [0][1][RTW89_WW][1][40] = -40, - [0][1][RTW89_WW][2][40] = 32, [0][1][RTW89_WW][0][42] = -40, [0][1][RTW89_WW][1][42] = -40, - [0][1][RTW89_WW][2][42] = 32, [0][1][RTW89_WW][0][44] = -40, [0][1][RTW89_WW][1][44] = -40, - [0][1][RTW89_WW][2][44] = 32, [0][1][RTW89_WW][0][45] = -40, [0][1][RTW89_WW][1][45] = -40, - [0][1][RTW89_WW][2][45] = 32, [0][1][RTW89_WW][0][47] = -40, [0][1][RTW89_WW][1][47] = -40, - [0][1][RTW89_WW][2][47] = 32, [0][1][RTW89_WW][0][49] = -40, [0][1][RTW89_WW][1][49] = -40, - [0][1][RTW89_WW][2][49] = 32, [0][1][RTW89_WW][0][51] = -40, [0][1][RTW89_WW][1][51] = -40, - [0][1][RTW89_WW][2][51] = 32, [0][1][RTW89_WW][0][53] = -40, [0][1][RTW89_WW][1][53] = -40, - [0][1][RTW89_WW][2][53] = 32, [0][1][RTW89_WW][0][55] = -40, [0][1][RTW89_WW][1][55] = -40, - [0][1][RTW89_WW][2][55] = 30, [0][1][RTW89_WW][0][57] = -40, [0][1][RTW89_WW][1][57] = -40, - [0][1][RTW89_WW][2][57] = 30, [0][1][RTW89_WW][0][59] = -40, [0][1][RTW89_WW][1][59] = -40, - [0][1][RTW89_WW][2][59] = 30, [0][1][RTW89_WW][0][60] = -40, [0][1][RTW89_WW][1][60] = -40, - [0][1][RTW89_WW][2][60] = 30, [0][1][RTW89_WW][0][62] = -40, [0][1][RTW89_WW][1][62] = -40, - [0][1][RTW89_WW][2][62] = 30, [0][1][RTW89_WW][0][64] = -40, [0][1][RTW89_WW][1][64] = -40, - [0][1][RTW89_WW][2][64] = 30, [0][1][RTW89_WW][0][66] = -40, [0][1][RTW89_WW][1][66] = -40, - [0][1][RTW89_WW][2][66] = 30, [0][1][RTW89_WW][0][68] = -40, [0][1][RTW89_WW][1][68] = -40, - [0][1][RTW89_WW][2][68] = 30, [0][1][RTW89_WW][0][70] = -38, [0][1][RTW89_WW][1][70] = -38, - [0][1][RTW89_WW][2][70] = 30, [0][1][RTW89_WW][0][72] = -38, [0][1][RTW89_WW][1][72] = -38, - [0][1][RTW89_WW][2][72] = 30, [0][1][RTW89_WW][0][74] = -38, [0][1][RTW89_WW][1][74] = -38, - [0][1][RTW89_WW][2][74] = 30, [0][1][RTW89_WW][0][75] = -38, [0][1][RTW89_WW][1][75] = -38, - [0][1][RTW89_WW][2][75] = 30, [0][1][RTW89_WW][0][77] = -38, [0][1][RTW89_WW][1][77] = -38, - [0][1][RTW89_WW][2][77] = 30, [0][1][RTW89_WW][0][79] = -38, [0][1][RTW89_WW][1][79] = -38, - [0][1][RTW89_WW][2][79] = 30, [0][1][RTW89_WW][0][81] = -38, [0][1][RTW89_WW][1][81] = -38, - [0][1][RTW89_WW][2][81] = 30, [0][1][RTW89_WW][0][83] = -38, [0][1][RTW89_WW][1][83] = -38, - [0][1][RTW89_WW][2][83] = 30, [0][1][RTW89_WW][0][85] = -38, [0][1][RTW89_WW][1][85] = -38, - [0][1][RTW89_WW][2][85] = 30, [0][1][RTW89_WW][0][87] = -40, [0][1][RTW89_WW][1][87] = -40, - [0][1][RTW89_WW][2][87] = 0, [0][1][RTW89_WW][0][89] = -38, [0][1][RTW89_WW][1][89] = -38, - [0][1][RTW89_WW][2][89] = 0, [0][1][RTW89_WW][0][90] = -38, [0][1][RTW89_WW][1][90] = -38, - [0][1][RTW89_WW][2][90] = 0, [0][1][RTW89_WW][0][92] = -38, [0][1][RTW89_WW][1][92] = -38, - [0][1][RTW89_WW][2][92] = 0, [0][1][RTW89_WW][0][94] = -38, [0][1][RTW89_WW][1][94] = -38, - [0][1][RTW89_WW][2][94] = 0, [0][1][RTW89_WW][0][96] = -38, [0][1][RTW89_WW][1][96] = -38, - [0][1][RTW89_WW][2][96] = 0, [0][1][RTW89_WW][0][98] = -38, [0][1][RTW89_WW][1][98] = -38, - [0][1][RTW89_WW][2][98] = 0, [0][1][RTW89_WW][0][100] = -38, [0][1][RTW89_WW][1][100] = -38, - [0][1][RTW89_WW][2][100] = 0, [0][1][RTW89_WW][0][102] = -38, [0][1][RTW89_WW][1][102] = -38, - [0][1][RTW89_WW][2][102] = 0, [0][1][RTW89_WW][0][104] = -38, [0][1][RTW89_WW][1][104] = -38, - [0][1][RTW89_WW][2][104] = 0, [0][1][RTW89_WW][0][105] = -38, [0][1][RTW89_WW][1][105] = -38, - [0][1][RTW89_WW][2][105] = 0, [0][1][RTW89_WW][0][107] = -34, [0][1][RTW89_WW][1][107] = -34, - [0][1][RTW89_WW][2][107] = 0, [0][1][RTW89_WW][0][109] = -34, [0][1][RTW89_WW][1][109] = -34, - [0][1][RTW89_WW][2][109] = 0, [0][1][RTW89_WW][0][111] = 0, [0][1][RTW89_WW][1][111] = 0, - [0][1][RTW89_WW][2][111] = 0, [0][1][RTW89_WW][0][113] = 0, [0][1][RTW89_WW][1][113] = 0, - [0][1][RTW89_WW][2][113] = 0, [0][1][RTW89_WW][0][115] = 0, [0][1][RTW89_WW][1][115] = 0, - [0][1][RTW89_WW][2][115] = 0, [0][1][RTW89_WW][0][117] = 0, [0][1][RTW89_WW][1][117] = 0, - [0][1][RTW89_WW][2][117] = 0, [0][1][RTW89_WW][0][119] = 0, [0][1][RTW89_WW][1][119] = 0, - [0][1][RTW89_WW][2][119] = 0, [1][0][RTW89_WW][0][0] = -4, [1][0][RTW89_WW][1][0] = -4, - [1][0][RTW89_WW][2][0] = 52, [1][0][RTW89_WW][0][2] = -4, [1][0][RTW89_WW][1][2] = -4, - [1][0][RTW89_WW][2][2] = 52, [1][0][RTW89_WW][0][4] = -4, [1][0][RTW89_WW][1][4] = -4, - [1][0][RTW89_WW][2][4] = 52, [1][0][RTW89_WW][0][6] = -4, [1][0][RTW89_WW][1][6] = -4, - [1][0][RTW89_WW][2][6] = 52, [1][0][RTW89_WW][0][8] = -4, [1][0][RTW89_WW][1][8] = -4, - [1][0][RTW89_WW][2][8] = 52, [1][0][RTW89_WW][0][10] = -4, [1][0][RTW89_WW][1][10] = -4, - [1][0][RTW89_WW][2][10] = 52, [1][0][RTW89_WW][0][12] = -4, [1][0][RTW89_WW][1][12] = -4, - [1][0][RTW89_WW][2][12] = 52, [1][0][RTW89_WW][0][14] = -4, [1][0][RTW89_WW][1][14] = -4, - [1][0][RTW89_WW][2][14] = 52, [1][0][RTW89_WW][0][15] = -4, [1][0][RTW89_WW][1][15] = -4, - [1][0][RTW89_WW][2][15] = 52, [1][0][RTW89_WW][0][17] = -4, [1][0][RTW89_WW][1][17] = -4, - [1][0][RTW89_WW][2][17] = 52, [1][0][RTW89_WW][0][19] = -4, [1][0][RTW89_WW][1][19] = -4, - [1][0][RTW89_WW][2][19] = 52, [1][0][RTW89_WW][0][21] = -4, [1][0][RTW89_WW][1][21] = -4, - [1][0][RTW89_WW][2][21] = 52, [1][0][RTW89_WW][0][23] = -4, [1][0][RTW89_WW][1][23] = -4, - [1][0][RTW89_WW][2][23] = 66, [1][0][RTW89_WW][0][25] = -4, [1][0][RTW89_WW][1][25] = -4, - [1][0][RTW89_WW][2][25] = 66, [1][0][RTW89_WW][0][27] = -4, [1][0][RTW89_WW][1][27] = -4, - [1][0][RTW89_WW][2][27] = 66, [1][0][RTW89_WW][0][29] = -4, [1][0][RTW89_WW][1][29] = -4, - [1][0][RTW89_WW][2][29] = 66, [1][0][RTW89_WW][0][30] = -4, [1][0][RTW89_WW][1][30] = -4, - [1][0][RTW89_WW][2][30] = 66, [1][0][RTW89_WW][0][32] = -4, [1][0][RTW89_WW][1][32] = -4, - [1][0][RTW89_WW][2][32] = 66, [1][0][RTW89_WW][0][34] = -4, [1][0][RTW89_WW][1][34] = -4, - [1][0][RTW89_WW][2][34] = 66, [1][0][RTW89_WW][0][36] = -4, [1][0][RTW89_WW][1][36] = -4, - [1][0][RTW89_WW][2][36] = 66, [1][0][RTW89_WW][0][38] = -4, [1][0][RTW89_WW][1][38] = -4, - [1][0][RTW89_WW][2][38] = 66, [1][0][RTW89_WW][0][40] = -4, [1][0][RTW89_WW][1][40] = -4, - [1][0][RTW89_WW][2][40] = 66, [1][0][RTW89_WW][0][42] = -4, [1][0][RTW89_WW][1][42] = -4, - [1][0][RTW89_WW][2][42] = 66, [1][0][RTW89_WW][0][44] = -4, [1][0][RTW89_WW][1][44] = -4, - [1][0][RTW89_WW][2][44] = 66, [1][0][RTW89_WW][0][45] = -4, [1][0][RTW89_WW][1][45] = -4, - [1][0][RTW89_WW][2][45] = 68, [1][0][RTW89_WW][0][47] = -4, [1][0][RTW89_WW][1][47] = -4, - [1][0][RTW89_WW][2][47] = 68, [1][0][RTW89_WW][0][49] = -4, [1][0][RTW89_WW][1][49] = -4, - [1][0][RTW89_WW][2][49] = 68, [1][0][RTW89_WW][0][51] = -4, [1][0][RTW89_WW][1][51] = -4, - [1][0][RTW89_WW][2][51] = 68, [1][0][RTW89_WW][0][53] = -4, [1][0][RTW89_WW][1][53] = -4, - [1][0][RTW89_WW][2][53] = 68, [1][0][RTW89_WW][0][55] = -4, [1][0][RTW89_WW][1][55] = -4, - [1][0][RTW89_WW][2][55] = 68, [1][0][RTW89_WW][0][57] = -4, [1][0][RTW89_WW][1][57] = -4, - [1][0][RTW89_WW][2][57] = 68, [1][0][RTW89_WW][0][59] = -4, [1][0][RTW89_WW][1][59] = -4, - [1][0][RTW89_WW][2][59] = 68, [1][0][RTW89_WW][0][60] = -4, [1][0][RTW89_WW][1][60] = -4, - [1][0][RTW89_WW][2][60] = 68, [1][0][RTW89_WW][0][62] = -4, [1][0][RTW89_WW][1][62] = -4, - [1][0][RTW89_WW][2][62] = 68, [1][0][RTW89_WW][0][64] = -4, [1][0][RTW89_WW][1][64] = -4, - [1][0][RTW89_WW][2][64] = 68, [1][0][RTW89_WW][0][66] = -4, [1][0][RTW89_WW][1][66] = -4, - [1][0][RTW89_WW][2][66] = 68, [1][0][RTW89_WW][0][68] = -4, [1][0][RTW89_WW][1][68] = -4, - [1][0][RTW89_WW][2][68] = 68, [1][0][RTW89_WW][0][70] = -4, [1][0][RTW89_WW][1][70] = -4, - [1][0][RTW89_WW][2][70] = 68, [1][0][RTW89_WW][0][72] = -4, [1][0][RTW89_WW][1][72] = -4, - [1][0][RTW89_WW][2][72] = 68, [1][0][RTW89_WW][0][74] = -4, [1][0][RTW89_WW][1][74] = -4, - [1][0][RTW89_WW][2][74] = 68, [1][0][RTW89_WW][0][75] = -4, [1][0][RTW89_WW][1][75] = -4, - [1][0][RTW89_WW][2][75] = 68, [1][0][RTW89_WW][0][77] = -4, [1][0][RTW89_WW][1][77] = -4, - [1][0][RTW89_WW][2][77] = 68, [1][0][RTW89_WW][0][79] = -4, [1][0][RTW89_WW][1][79] = -4, - [1][0][RTW89_WW][2][79] = 68, [1][0][RTW89_WW][0][81] = -4, [1][0][RTW89_WW][1][81] = -4, - [1][0][RTW89_WW][2][81] = 68, [1][0][RTW89_WW][0][83] = -4, [1][0][RTW89_WW][1][83] = -4, - [1][0][RTW89_WW][2][83] = 68, [1][0][RTW89_WW][0][85] = -4, [1][0][RTW89_WW][1][85] = -4, - [1][0][RTW89_WW][2][85] = 68, [1][0][RTW89_WW][0][87] = -4, [1][0][RTW89_WW][1][87] = -4, - [1][0][RTW89_WW][2][87] = 0, [1][0][RTW89_WW][0][89] = -4, [1][0][RTW89_WW][1][89] = -4, - [1][0][RTW89_WW][2][89] = 0, [1][0][RTW89_WW][0][90] = -4, [1][0][RTW89_WW][1][90] = -4, - [1][0][RTW89_WW][2][90] = 0, [1][0][RTW89_WW][0][92] = -4, [1][0][RTW89_WW][1][92] = -4, - [1][0][RTW89_WW][2][92] = 0, [1][0][RTW89_WW][0][94] = -4, [1][0][RTW89_WW][1][94] = -4, - [1][0][RTW89_WW][2][94] = 0, [1][0][RTW89_WW][0][96] = -4, [1][0][RTW89_WW][1][96] = -4, - [1][0][RTW89_WW][2][96] = 0, [1][0][RTW89_WW][0][98] = -4, [1][0][RTW89_WW][1][98] = -4, - [1][0][RTW89_WW][2][98] = 0, [1][0][RTW89_WW][0][100] = -4, [1][0][RTW89_WW][1][100] = -4, - [1][0][RTW89_WW][2][100] = 0, [1][0][RTW89_WW][0][102] = -4, [1][0][RTW89_WW][1][102] = -4, - [1][0][RTW89_WW][2][102] = 0, [1][0][RTW89_WW][0][104] = -4, [1][0][RTW89_WW][1][104] = -4, - [1][0][RTW89_WW][2][104] = 0, [1][0][RTW89_WW][0][105] = -4, [1][0][RTW89_WW][1][105] = -4, - [1][0][RTW89_WW][2][105] = 0, [1][0][RTW89_WW][0][107] = -2, [1][0][RTW89_WW][1][107] = -2, - [1][0][RTW89_WW][2][107] = 0, [1][0][RTW89_WW][0][109] = 2, [1][0][RTW89_WW][1][109] = 2, - [1][0][RTW89_WW][2][109] = 0, [1][0][RTW89_WW][0][111] = 0, [1][0][RTW89_WW][1][111] = 0, - [1][0][RTW89_WW][2][111] = 0, [1][0][RTW89_WW][0][113] = 0, [1][0][RTW89_WW][1][113] = 0, - [1][0][RTW89_WW][2][113] = 0, [1][0][RTW89_WW][0][115] = 0, [1][0][RTW89_WW][1][115] = 0, - [1][0][RTW89_WW][2][115] = 0, [1][0][RTW89_WW][0][117] = 0, [1][0][RTW89_WW][1][117] = 0, - [1][0][RTW89_WW][2][117] = 0, [1][0][RTW89_WW][0][119] = 0, [1][0][RTW89_WW][1][119] = 0, - [1][0][RTW89_WW][2][119] = 0, [1][1][RTW89_WW][0][0] = -26, [1][1][RTW89_WW][1][0] = -26, - [1][1][RTW89_WW][2][0] = 44, [1][1][RTW89_WW][0][2] = -28, [1][1][RTW89_WW][1][2] = -28, - [1][1][RTW89_WW][2][2] = 44, [1][1][RTW89_WW][0][4] = -28, [1][1][RTW89_WW][1][4] = -28, - [1][1][RTW89_WW][2][4] = 44, [1][1][RTW89_WW][0][6] = -28, [1][1][RTW89_WW][1][6] = -28, - [1][1][RTW89_WW][2][6] = 44, [1][1][RTW89_WW][0][8] = -28, [1][1][RTW89_WW][1][8] = -28, - [1][1][RTW89_WW][2][8] = 44, [1][1][RTW89_WW][0][10] = -28, [1][1][RTW89_WW][1][10] = -28, - [1][1][RTW89_WW][2][10] = 44, [1][1][RTW89_WW][0][12] = -28, [1][1][RTW89_WW][1][12] = -28, - [1][1][RTW89_WW][2][12] = 44, [1][1][RTW89_WW][0][14] = -28, [1][1][RTW89_WW][1][14] = -28, - [1][1][RTW89_WW][2][14] = 44, [1][1][RTW89_WW][0][15] = -28, [1][1][RTW89_WW][1][15] = -28, - [1][1][RTW89_WW][2][15] = 44, [1][1][RTW89_WW][0][17] = -28, [1][1][RTW89_WW][1][17] = -28, - [1][1][RTW89_WW][2][17] = 44, [1][1][RTW89_WW][0][19] = -28, [1][1][RTW89_WW][1][19] = -28, - [1][1][RTW89_WW][2][19] = 44, [1][1][RTW89_WW][0][21] = -28, [1][1][RTW89_WW][1][21] = -28, - [1][1][RTW89_WW][2][21] = 44, [1][1][RTW89_WW][0][23] = -28, [1][1][RTW89_WW][1][23] = -28, - [1][1][RTW89_WW][2][23] = 44, [1][1][RTW89_WW][0][25] = -28, [1][1][RTW89_WW][1][25] = -28, - [1][1][RTW89_WW][2][25] = 44, [1][1][RTW89_WW][0][27] = -28, [1][1][RTW89_WW][1][27] = -28, - [1][1][RTW89_WW][2][27] = 44, [1][1][RTW89_WW][0][29] = -28, [1][1][RTW89_WW][1][29] = -28, - [1][1][RTW89_WW][2][29] = 44, [1][1][RTW89_WW][0][30] = -28, [1][1][RTW89_WW][1][30] = -28, - [1][1][RTW89_WW][2][30] = 44, [1][1][RTW89_WW][0][32] = -28, [1][1][RTW89_WW][1][32] = -28, - [1][1][RTW89_WW][2][32] = 44, [1][1][RTW89_WW][0][34] = -28, [1][1][RTW89_WW][1][34] = -28, - [1][1][RTW89_WW][2][34] = 44, [1][1][RTW89_WW][0][36] = -28, [1][1][RTW89_WW][1][36] = -28, - [1][1][RTW89_WW][2][36] = 44, [1][1][RTW89_WW][0][38] = -28, [1][1][RTW89_WW][1][38] = -28, - [1][1][RTW89_WW][2][38] = 44, [1][1][RTW89_WW][0][40] = -28, [1][1][RTW89_WW][1][40] = -28, - [1][1][RTW89_WW][2][40] = 44, [1][1][RTW89_WW][0][42] = -28, [1][1][RTW89_WW][1][42] = -28, - [1][1][RTW89_WW][2][42] = 44, [1][1][RTW89_WW][0][44] = -28, [1][1][RTW89_WW][1][44] = -28, - [1][1][RTW89_WW][2][44] = 44, [1][1][RTW89_WW][0][45] = -26, [1][1][RTW89_WW][1][45] = -26, - [1][1][RTW89_WW][2][45] = 44, [1][1][RTW89_WW][0][47] = -28, [1][1][RTW89_WW][1][47] = -28, - [1][1][RTW89_WW][2][47] = 44, [1][1][RTW89_WW][0][49] = -28, [1][1][RTW89_WW][1][49] = -28, - [1][1][RTW89_WW][2][49] = 44, [1][1][RTW89_WW][0][51] = -28, [1][1][RTW89_WW][1][51] = -28, - [1][1][RTW89_WW][2][51] = 44, [1][1][RTW89_WW][0][53] = -26, [1][1][RTW89_WW][1][53] = -26, - [1][1][RTW89_WW][2][53] = 44, [1][1][RTW89_WW][0][55] = -28, [1][1][RTW89_WW][1][55] = -28, - [1][1][RTW89_WW][2][55] = 44, [1][1][RTW89_WW][0][57] = -28, [1][1][RTW89_WW][1][57] = -28, - [1][1][RTW89_WW][2][57] = 44, [1][1][RTW89_WW][0][59] = -28, [1][1][RTW89_WW][1][59] = -28, - [1][1][RTW89_WW][2][59] = 44, [1][1][RTW89_WW][0][60] = -28, [1][1][RTW89_WW][1][60] = -28, - [1][1][RTW89_WW][2][60] = 44, [1][1][RTW89_WW][0][62] = -28, [1][1][RTW89_WW][1][62] = -28, - [1][1][RTW89_WW][2][62] = 44, [1][1][RTW89_WW][0][64] = -28, [1][1][RTW89_WW][1][64] = -28, - [1][1][RTW89_WW][2][64] = 44, [1][1][RTW89_WW][0][66] = -28, [1][1][RTW89_WW][1][66] = -28, - [1][1][RTW89_WW][2][66] = 44, [1][1][RTW89_WW][0][68] = -28, [1][1][RTW89_WW][1][68] = -28, - [1][1][RTW89_WW][2][68] = 44, [1][1][RTW89_WW][0][70] = -26, [1][1][RTW89_WW][1][70] = -26, - [1][1][RTW89_WW][2][70] = 44, [1][1][RTW89_WW][0][72] = -28, [1][1][RTW89_WW][1][72] = -28, - [1][1][RTW89_WW][2][72] = 44, [1][1][RTW89_WW][0][74] = -28, [1][1][RTW89_WW][1][74] = -28, - [1][1][RTW89_WW][2][74] = 44, [1][1][RTW89_WW][0][75] = -28, [1][1][RTW89_WW][1][75] = -28, - [1][1][RTW89_WW][2][75] = 44, [1][1][RTW89_WW][0][77] = -28, [1][1][RTW89_WW][1][77] = -28, - [1][1][RTW89_WW][2][77] = 44, [1][1][RTW89_WW][0][79] = -28, [1][1][RTW89_WW][1][79] = -28, - [1][1][RTW89_WW][2][79] = 44, [1][1][RTW89_WW][0][81] = -28, [1][1][RTW89_WW][1][81] = -28, - [1][1][RTW89_WW][2][81] = 44, [1][1][RTW89_WW][0][83] = -28, [1][1][RTW89_WW][1][83] = -28, - [1][1][RTW89_WW][2][83] = 44, [1][1][RTW89_WW][0][85] = -28, [1][1][RTW89_WW][1][85] = -28, - [1][1][RTW89_WW][2][85] = 44, [1][1][RTW89_WW][0][87] = -28, [1][1][RTW89_WW][1][87] = -28, - [1][1][RTW89_WW][2][87] = 0, [1][1][RTW89_WW][0][89] = -26, [1][1][RTW89_WW][1][89] = -26, - [1][1][RTW89_WW][2][89] = 0, [1][1][RTW89_WW][0][90] = -26, [1][1][RTW89_WW][1][90] = -26, - [1][1][RTW89_WW][2][90] = 0, [1][1][RTW89_WW][0][92] = -26, [1][1][RTW89_WW][1][92] = -26, - [1][1][RTW89_WW][2][92] = 0, [1][1][RTW89_WW][0][94] = -26, [1][1][RTW89_WW][1][94] = -26, - [1][1][RTW89_WW][2][94] = 0, [1][1][RTW89_WW][0][96] = -26, [1][1][RTW89_WW][1][96] = -26, - [1][1][RTW89_WW][2][96] = 0, [1][1][RTW89_WW][0][98] = -26, [1][1][RTW89_WW][1][98] = -26, - [1][1][RTW89_WW][2][98] = 0, [1][1][RTW89_WW][0][100] = -26, [1][1][RTW89_WW][1][100] = -26, - [1][1][RTW89_WW][2][100] = 0, [1][1][RTW89_WW][0][102] = -26, [1][1][RTW89_WW][1][102] = -26, - [1][1][RTW89_WW][2][102] = 0, [1][1][RTW89_WW][0][104] = -26, [1][1][RTW89_WW][1][104] = -26, - [1][1][RTW89_WW][2][104] = 0, [1][1][RTW89_WW][0][105] = -26, [1][1][RTW89_WW][1][105] = -26, - [1][1][RTW89_WW][2][105] = 0, [1][1][RTW89_WW][0][107] = -22, [1][1][RTW89_WW][1][107] = -22, - [1][1][RTW89_WW][2][107] = 0, [1][1][RTW89_WW][0][109] = -22, [1][1][RTW89_WW][1][109] = -22, - [1][1][RTW89_WW][2][109] = 0, [1][1][RTW89_WW][0][111] = 0, [1][1][RTW89_WW][1][111] = 0, - [1][1][RTW89_WW][2][111] = 0, [1][1][RTW89_WW][0][113] = 0, [1][1][RTW89_WW][1][113] = 0, - [1][1][RTW89_WW][2][113] = 0, [1][1][RTW89_WW][0][115] = 0, [1][1][RTW89_WW][1][115] = 0, - [1][1][RTW89_WW][2][115] = 0, [1][1][RTW89_WW][0][117] = 0, [1][1][RTW89_WW][1][117] = 0, - [1][1][RTW89_WW][2][117] = 0, [1][1][RTW89_WW][0][119] = 0, [1][1][RTW89_WW][1][119] = 0, - [1][1][RTW89_WW][2][119] = 0, [2][0][RTW89_WW][0][0] = -2, [2][0][RTW89_WW][1][0] = -2, - [2][0][RTW89_WW][2][0] = 60, [2][0][RTW89_WW][0][2] = -2, [2][0][RTW89_WW][1][2] = -2, - [2][0][RTW89_WW][2][2] = 60, [2][0][RTW89_WW][0][4] = -2, [2][0][RTW89_WW][1][4] = -2, - [2][0][RTW89_WW][2][4] = 60, [2][0][RTW89_WW][0][6] = -2, [2][0][RTW89_WW][1][6] = -2, - [2][0][RTW89_WW][2][6] = 60, [2][0][RTW89_WW][0][8] = -2, [2][0][RTW89_WW][1][8] = -2, - [2][0][RTW89_WW][2][8] = 60, [2][0][RTW89_WW][0][10] = -2, [2][0][RTW89_WW][1][10] = -2, - [2][0][RTW89_WW][2][10] = 60, [2][0][RTW89_WW][0][12] = -2, [2][0][RTW89_WW][1][12] = -2, - [2][0][RTW89_WW][2][12] = 60, [2][0][RTW89_WW][0][14] = -2, [2][0][RTW89_WW][1][14] = -2, - [2][0][RTW89_WW][2][14] = 60, [2][0][RTW89_WW][0][15] = -2, [2][0][RTW89_WW][1][15] = -2, - [2][0][RTW89_WW][2][15] = 60, [2][0][RTW89_WW][0][17] = -2, [2][0][RTW89_WW][1][17] = -2, - [2][0][RTW89_WW][2][17] = 60, [2][0][RTW89_WW][0][19] = -2, [2][0][RTW89_WW][1][19] = -2, - [2][0][RTW89_WW][2][19] = 60, [2][0][RTW89_WW][0][21] = -2, [2][0][RTW89_WW][1][21] = -2, - [2][0][RTW89_WW][2][21] = 60, [2][0][RTW89_WW][0][23] = -2, [2][0][RTW89_WW][1][23] = -2, - [2][0][RTW89_WW][2][23] = 70, [2][0][RTW89_WW][0][25] = -2, [2][0][RTW89_WW][1][25] = -2, - [2][0][RTW89_WW][2][25] = 70, [2][0][RTW89_WW][0][27] = -2, [2][0][RTW89_WW][1][27] = -2, - [2][0][RTW89_WW][2][27] = 70, [2][0][RTW89_WW][0][29] = -2, [2][0][RTW89_WW][1][29] = -2, - [2][0][RTW89_WW][2][29] = 70, [2][0][RTW89_WW][0][30] = -2, [2][0][RTW89_WW][1][30] = -2, - [2][0][RTW89_WW][2][30] = 70, [2][0][RTW89_WW][0][32] = -2, [2][0][RTW89_WW][1][32] = -2, - [2][0][RTW89_WW][2][32] = 70, [2][0][RTW89_WW][0][34] = -2, [2][0][RTW89_WW][1][34] = -2, - [2][0][RTW89_WW][2][34] = 70, [2][0][RTW89_WW][0][36] = -2, [2][0][RTW89_WW][1][36] = -2, - [2][0][RTW89_WW][2][36] = 70, [2][0][RTW89_WW][0][38] = -2, [2][0][RTW89_WW][1][38] = -2, - [2][0][RTW89_WW][2][38] = 70, [2][0][RTW89_WW][0][40] = -2, [2][0][RTW89_WW][1][40] = -2, - [2][0][RTW89_WW][2][40] = 70, [2][0][RTW89_WW][0][42] = -2, [2][0][RTW89_WW][1][42] = -2, - [2][0][RTW89_WW][2][42] = 70, [2][0][RTW89_WW][0][44] = -2, [2][0][RTW89_WW][1][44] = -2, - [2][0][RTW89_WW][2][44] = 70, [2][0][RTW89_WW][0][45] = -2, [2][0][RTW89_WW][1][45] = -2, - [2][0][RTW89_WW][2][45] = 70, [2][0][RTW89_WW][0][47] = -2, [2][0][RTW89_WW][1][47] = -2, - [2][0][RTW89_WW][2][47] = 70, [2][0][RTW89_WW][0][49] = -2, [2][0][RTW89_WW][1][49] = -2, - [2][0][RTW89_WW][2][49] = 70, [2][0][RTW89_WW][0][51] = -2, [2][0][RTW89_WW][1][51] = -2, - [2][0][RTW89_WW][2][51] = 70, [2][0][RTW89_WW][0][53] = -2, [2][0][RTW89_WW][1][53] = -2, - [2][0][RTW89_WW][2][53] = 70, [2][0][RTW89_WW][0][55] = -2, [2][0][RTW89_WW][1][55] = -2, - [2][0][RTW89_WW][2][55] = 68, [2][0][RTW89_WW][0][57] = -2, [2][0][RTW89_WW][1][57] = -2, - [2][0][RTW89_WW][2][57] = 68, [2][0][RTW89_WW][0][59] = -2, [2][0][RTW89_WW][1][59] = -2, - [2][0][RTW89_WW][2][59] = 68, [2][0][RTW89_WW][0][60] = -2, [2][0][RTW89_WW][1][60] = -2, - [2][0][RTW89_WW][2][60] = 68, [2][0][RTW89_WW][0][62] = -2, [2][0][RTW89_WW][1][62] = -2, - [2][0][RTW89_WW][2][62] = 68, [2][0][RTW89_WW][0][64] = -2, [2][0][RTW89_WW][1][64] = -2, - [2][0][RTW89_WW][2][64] = 68, [2][0][RTW89_WW][0][66] = -2, [2][0][RTW89_WW][1][66] = -2, - [2][0][RTW89_WW][2][66] = 68, [2][0][RTW89_WW][0][68] = -2, [2][0][RTW89_WW][1][68] = -2, - [2][0][RTW89_WW][2][68] = 68, [2][0][RTW89_WW][0][70] = -2, [2][0][RTW89_WW][1][70] = -2, - [2][0][RTW89_WW][2][70] = 68, [2][0][RTW89_WW][0][72] = -2, [2][0][RTW89_WW][1][72] = -2, - [2][0][RTW89_WW][2][72] = 68, [2][0][RTW89_WW][0][74] = -2, [2][0][RTW89_WW][1][74] = -2, - [2][0][RTW89_WW][2][74] = 68, [2][0][RTW89_WW][0][75] = -2, [2][0][RTW89_WW][1][75] = -2, - [2][0][RTW89_WW][2][75] = 68, [2][0][RTW89_WW][0][77] = -2, [2][0][RTW89_WW][1][77] = -2, - [2][0][RTW89_WW][2][77] = 68, [2][0][RTW89_WW][0][79] = -2, [2][0][RTW89_WW][1][79] = -2, - [2][0][RTW89_WW][2][79] = 68, [2][0][RTW89_WW][0][81] = -2, [2][0][RTW89_WW][1][81] = -2, - [2][0][RTW89_WW][2][81] = 68, [2][0][RTW89_WW][0][83] = -2, [2][0][RTW89_WW][1][83] = -2, - [2][0][RTW89_WW][2][83] = 68, [2][0][RTW89_WW][0][85] = -2, [2][0][RTW89_WW][1][85] = -2, - [2][0][RTW89_WW][2][85] = 68, [2][0][RTW89_WW][0][87] = -2, [2][0][RTW89_WW][1][87] = -2, - [2][0][RTW89_WW][2][87] = 0, [2][0][RTW89_WW][0][89] = -2, [2][0][RTW89_WW][1][89] = -2, - [2][0][RTW89_WW][2][89] = 0, [2][0][RTW89_WW][0][90] = -2, [2][0][RTW89_WW][1][90] = -2, - [2][0][RTW89_WW][2][90] = 0, [2][0][RTW89_WW][0][92] = -2, [2][0][RTW89_WW][1][92] = -2, - [2][0][RTW89_WW][2][92] = 0, [2][0][RTW89_WW][0][94] = -2, [2][0][RTW89_WW][1][94] = -2, - [2][0][RTW89_WW][2][94] = 0, [2][0][RTW89_WW][0][96] = -2, [2][0][RTW89_WW][1][96] = -2, - [2][0][RTW89_WW][2][96] = 0, [2][0][RTW89_WW][0][98] = -2, [2][0][RTW89_WW][1][98] = -2, - [2][0][RTW89_WW][2][98] = 0, [2][0][RTW89_WW][0][100] = -2, [2][0][RTW89_WW][1][100] = -2, - [2][0][RTW89_WW][2][100] = 0, [2][0][RTW89_WW][0][102] = -2, [2][0][RTW89_WW][1][102] = -2, - [2][0][RTW89_WW][2][102] = 0, [2][0][RTW89_WW][0][104] = -2, [2][0][RTW89_WW][1][104] = -2, - [2][0][RTW89_WW][2][104] = 0, [2][0][RTW89_WW][0][105] = -2, [2][0][RTW89_WW][1][105] = -2, - [2][0][RTW89_WW][2][105] = 0, [2][0][RTW89_WW][0][107] = -2, [2][0][RTW89_WW][1][107] = -2, - [2][0][RTW89_WW][2][107] = 0, [2][0][RTW89_WW][0][109] = 12, [2][0][RTW89_WW][1][109] = 12, - [2][0][RTW89_WW][2][109] = 0, [2][0][RTW89_WW][0][111] = 0, [2][0][RTW89_WW][1][111] = 0, - [2][0][RTW89_WW][2][111] = 0, [2][0][RTW89_WW][0][113] = 0, [2][0][RTW89_WW][1][113] = 0, - [2][0][RTW89_WW][2][113] = 0, [2][0][RTW89_WW][0][115] = 0, [2][0][RTW89_WW][1][115] = 0, - [2][0][RTW89_WW][2][115] = 0, [2][0][RTW89_WW][0][117] = 0, [2][0][RTW89_WW][1][117] = 0, - [2][0][RTW89_WW][2][117] = 0, [2][0][RTW89_WW][0][119] = 0, [2][0][RTW89_WW][1][119] = 0, - [2][0][RTW89_WW][2][119] = 0, [2][1][RTW89_WW][0][0] = -16, [2][1][RTW89_WW][1][0] = -16, - [2][1][RTW89_WW][2][0] = 54, [2][1][RTW89_WW][0][2] = -16, [2][1][RTW89_WW][1][2] = -16, - [2][1][RTW89_WW][2][2] = 54, [2][1][RTW89_WW][0][4] = -16, [2][1][RTW89_WW][1][4] = -16, - [2][1][RTW89_WW][2][4] = 54, [2][1][RTW89_WW][0][6] = -16, [2][1][RTW89_WW][1][6] = -16, - [2][1][RTW89_WW][2][6] = 54, [2][1][RTW89_WW][0][8] = -16, [2][1][RTW89_WW][1][8] = -16, - [2][1][RTW89_WW][2][8] = 54, [2][1][RTW89_WW][0][10] = -16, [2][1][RTW89_WW][1][10] = -16, - [2][1][RTW89_WW][2][10] = 54, [2][1][RTW89_WW][0][12] = -16, [2][1][RTW89_WW][1][12] = -16, - [2][1][RTW89_WW][2][12] = 54, [2][1][RTW89_WW][0][14] = -16, [2][1][RTW89_WW][1][14] = -16, - [2][1][RTW89_WW][2][14] = 54, [2][1][RTW89_WW][0][15] = -16, [2][1][RTW89_WW][1][15] = -16, - [2][1][RTW89_WW][2][15] = 54, [2][1][RTW89_WW][0][17] = -16, [2][1][RTW89_WW][1][17] = -16, - [2][1][RTW89_WW][2][17] = 54, [2][1][RTW89_WW][0][19] = -16, [2][1][RTW89_WW][1][19] = -16, - [2][1][RTW89_WW][2][19] = 54, [2][1][RTW89_WW][0][21] = -16, [2][1][RTW89_WW][1][21] = -16, - [2][1][RTW89_WW][2][21] = 54, [2][1][RTW89_WW][0][23] = -16, [2][1][RTW89_WW][1][23] = -16, - [2][1][RTW89_WW][2][23] = 54, [2][1][RTW89_WW][0][25] = -16, [2][1][RTW89_WW][1][25] = -16, - [2][1][RTW89_WW][2][25] = 54, [2][1][RTW89_WW][0][27] = -16, [2][1][RTW89_WW][1][27] = -16, - [2][1][RTW89_WW][2][27] = 54, [2][1][RTW89_WW][0][29] = -16, [2][1][RTW89_WW][1][29] = -16, - [2][1][RTW89_WW][2][29] = 54, [2][1][RTW89_WW][0][30] = -16, [2][1][RTW89_WW][1][30] = -16, - [2][1][RTW89_WW][2][30] = 54, [2][1][RTW89_WW][0][32] = -16, [2][1][RTW89_WW][1][32] = -16, - [2][1][RTW89_WW][2][32] = 54, [2][1][RTW89_WW][0][34] = -16, [2][1][RTW89_WW][1][34] = -16, - [2][1][RTW89_WW][2][34] = 54, [2][1][RTW89_WW][0][36] = -16, [2][1][RTW89_WW][1][36] = -16, - [2][1][RTW89_WW][2][36] = 54, [2][1][RTW89_WW][0][38] = -16, [2][1][RTW89_WW][1][38] = -16, - [2][1][RTW89_WW][2][38] = 54, [2][1][RTW89_WW][0][40] = -16, [2][1][RTW89_WW][1][40] = -16, - [2][1][RTW89_WW][2][40] = 54, [2][1][RTW89_WW][0][42] = -16, [2][1][RTW89_WW][1][42] = -16, - [2][1][RTW89_WW][2][42] = 54, [2][1][RTW89_WW][0][44] = -16, [2][1][RTW89_WW][1][44] = -16, - [2][1][RTW89_WW][2][44] = 54, [2][1][RTW89_WW][0][45] = -16, [2][1][RTW89_WW][1][45] = -16, - [2][1][RTW89_WW][2][45] = 56, [2][1][RTW89_WW][0][47] = -16, [2][1][RTW89_WW][1][47] = -16, - [2][1][RTW89_WW][2][47] = 56, [2][1][RTW89_WW][0][49] = -16, [2][1][RTW89_WW][1][49] = -16, - [2][1][RTW89_WW][2][49] = 56, [2][1][RTW89_WW][0][51] = -16, [2][1][RTW89_WW][1][51] = -16, - [2][1][RTW89_WW][2][51] = 56, [2][1][RTW89_WW][0][53] = -16, [2][1][RTW89_WW][1][53] = -16, - [2][1][RTW89_WW][2][53] = 56, [2][1][RTW89_WW][0][55] = -16, [2][1][RTW89_WW][1][55] = -16, - [2][1][RTW89_WW][2][55] = 54, [2][1][RTW89_WW][0][57] = -16, [2][1][RTW89_WW][1][57] = -16, - [2][1][RTW89_WW][2][57] = 54, [2][1][RTW89_WW][0][59] = -16, [2][1][RTW89_WW][1][59] = -16, - [2][1][RTW89_WW][2][59] = 54, [2][1][RTW89_WW][0][60] = -16, [2][1][RTW89_WW][1][60] = -16, - [2][1][RTW89_WW][2][60] = 54, [2][1][RTW89_WW][0][62] = -16, [2][1][RTW89_WW][1][62] = -16, - [2][1][RTW89_WW][2][62] = 54, [2][1][RTW89_WW][0][64] = -16, [2][1][RTW89_WW][1][64] = -16, - [2][1][RTW89_WW][2][64] = 54, [2][1][RTW89_WW][0][66] = -16, [2][1][RTW89_WW][1][66] = -16, - [2][1][RTW89_WW][2][66] = 54, [2][1][RTW89_WW][0][68] = -16, [2][1][RTW89_WW][1][68] = -16, - [2][1][RTW89_WW][2][68] = 54, [2][1][RTW89_WW][0][70] = -16, [2][1][RTW89_WW][1][70] = -16, - [2][1][RTW89_WW][2][70] = 56, [2][1][RTW89_WW][0][72] = -16, [2][1][RTW89_WW][1][72] = -16, - [2][1][RTW89_WW][2][72] = 56, [2][1][RTW89_WW][0][74] = -16, [2][1][RTW89_WW][1][74] = -16, - [2][1][RTW89_WW][2][74] = 56, [2][1][RTW89_WW][0][75] = -16, [2][1][RTW89_WW][1][75] = -16, - [2][1][RTW89_WW][2][75] = 56, [2][1][RTW89_WW][0][77] = -16, [2][1][RTW89_WW][1][77] = -16, - [2][1][RTW89_WW][2][77] = 56, [2][1][RTW89_WW][0][79] = -16, [2][1][RTW89_WW][1][79] = -16, - [2][1][RTW89_WW][2][79] = 56, [2][1][RTW89_WW][0][81] = -16, [2][1][RTW89_WW][1][81] = -16, - [2][1][RTW89_WW][2][81] = 56, [2][1][RTW89_WW][0][83] = -16, [2][1][RTW89_WW][1][83] = -16, - [2][1][RTW89_WW][2][83] = 56, [2][1][RTW89_WW][0][85] = -18, [2][1][RTW89_WW][1][85] = -18, - [2][1][RTW89_WW][2][85] = 56, [2][1][RTW89_WW][0][87] = -16, [2][1][RTW89_WW][1][87] = -16, - [2][1][RTW89_WW][2][87] = 0, [2][1][RTW89_WW][0][89] = -16, [2][1][RTW89_WW][1][89] = -16, - [2][1][RTW89_WW][2][89] = 0, [2][1][RTW89_WW][0][90] = -16, [2][1][RTW89_WW][1][90] = -16, - [2][1][RTW89_WW][2][90] = 0, [2][1][RTW89_WW][0][92] = -16, [2][1][RTW89_WW][1][92] = -16, - [2][1][RTW89_WW][2][92] = 0, [2][1][RTW89_WW][0][94] = -16, [2][1][RTW89_WW][1][94] = -16, - [2][1][RTW89_WW][2][94] = 0, [2][1][RTW89_WW][0][96] = -16, [2][1][RTW89_WW][1][96] = -16, - [2][1][RTW89_WW][2][96] = 0, [2][1][RTW89_WW][0][98] = -16, [2][1][RTW89_WW][1][98] = -16, - [2][1][RTW89_WW][2][98] = 0, [2][1][RTW89_WW][0][100] = -16, [2][1][RTW89_WW][1][100] = -16, - [2][1][RTW89_WW][2][100] = 0, [2][1][RTW89_WW][0][102] = -16, [2][1][RTW89_WW][1][102] = -16, - [2][1][RTW89_WW][2][102] = 0, [2][1][RTW89_WW][0][104] = -16, [2][1][RTW89_WW][1][104] = -16, - [2][1][RTW89_WW][2][104] = 0, [2][1][RTW89_WW][0][105] = -16, [2][1][RTW89_WW][1][105] = -16, - [2][1][RTW89_WW][2][105] = 0, [2][1][RTW89_WW][0][107] = -14, [2][1][RTW89_WW][1][107] = -14, - [2][1][RTW89_WW][2][107] = 0, [2][1][RTW89_WW][0][109] = -10, [2][1][RTW89_WW][1][109] = -10, - [2][1][RTW89_WW][2][109] = 0, [2][1][RTW89_WW][0][111] = 0, [2][1][RTW89_WW][1][111] = 0, - [2][1][RTW89_WW][2][111] = 0, [2][1][RTW89_WW][0][113] = 0, [2][1][RTW89_WW][1][113] = 0, - [2][1][RTW89_WW][2][113] = 0, [2][1][RTW89_WW][0][115] = 0, [2][1][RTW89_WW][1][115] = 0, - [2][1][RTW89_WW][2][115] = 0, [2][1][RTW89_WW][0][117] = 0, [2][1][RTW89_WW][1][117] = 0, - [2][1][RTW89_WW][2][117] = 0, [2][1][RTW89_WW][0][119] = 0, [2][1][RTW89_WW][1][119] = 0, - [2][1][RTW89_WW][2][119] = 0, [0][0][RTW89_FCC][1][0] = -16, - [0][0][RTW89_FCC][2][0] = 44, [0][0][RTW89_ETSI][1][0] = 32, [0][0][RTW89_ETSI][0][0] = -8, [0][0][RTW89_MKK][1][0] = 30, [0][0][RTW89_MKK][0][0] = -8, [0][0][RTW89_IC][1][0] = -16, - [0][0][RTW89_IC][2][0] = 44, [0][0][RTW89_KCC][1][0] = -2, [0][0][RTW89_KCC][0][0] = -2, [0][0][RTW89_ACMA][1][0] = 32, @@ -52408,13 +50558,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][0] = 30, [0][0][RTW89_THAILAND][0][0] = -16, [0][0][RTW89_FCC][1][2] = -18, - [0][0][RTW89_FCC][2][2] = 44, [0][0][RTW89_ETSI][1][2] = 32, [0][0][RTW89_ETSI][0][2] = -8, [0][0][RTW89_MKK][1][2] = 30, [0][0][RTW89_MKK][0][2] = -8, [0][0][RTW89_IC][1][2] = -18, - [0][0][RTW89_IC][2][2] = 44, [0][0][RTW89_KCC][1][2] = -2, [0][0][RTW89_KCC][0][2] = -2, [0][0][RTW89_ACMA][1][2] = 32, @@ -52427,13 +50575,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][2] = 30, [0][0][RTW89_THAILAND][0][2] = -18, [0][0][RTW89_FCC][1][4] = -18, - [0][0][RTW89_FCC][2][4] = 44, [0][0][RTW89_ETSI][1][4] = 32, [0][0][RTW89_ETSI][0][4] = -8, [0][0][RTW89_MKK][1][4] = 30, [0][0][RTW89_MKK][0][4] = -8, [0][0][RTW89_IC][1][4] = -18, - [0][0][RTW89_IC][2][4] = 44, [0][0][RTW89_KCC][1][4] = -2, [0][0][RTW89_KCC][0][4] = -2, [0][0][RTW89_ACMA][1][4] = 32, @@ -52446,13 +50592,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][4] = 30, [0][0][RTW89_THAILAND][0][4] = -18, [0][0][RTW89_FCC][1][6] = -18, - [0][0][RTW89_FCC][2][6] = 44, [0][0][RTW89_ETSI][1][6] = 32, [0][0][RTW89_ETSI][0][6] = -8, [0][0][RTW89_MKK][1][6] = 30, [0][0][RTW89_MKK][0][6] = -8, [0][0][RTW89_IC][1][6] = -18, - [0][0][RTW89_IC][2][6] = 44, [0][0][RTW89_KCC][1][6] = -2, [0][0][RTW89_KCC][0][6] = -2, [0][0][RTW89_ACMA][1][6] = 32, @@ -52465,13 +50609,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][6] = 30, [0][0][RTW89_THAILAND][0][6] = -18, [0][0][RTW89_FCC][1][8] = -18, - [0][0][RTW89_FCC][2][8] = 44, [0][0][RTW89_ETSI][1][8] = 32, [0][0][RTW89_ETSI][0][8] = -8, [0][0][RTW89_MKK][1][8] = 30, [0][0][RTW89_MKK][0][8] = -8, [0][0][RTW89_IC][1][8] = -18, - [0][0][RTW89_IC][2][8] = 44, [0][0][RTW89_KCC][1][8] = -2, [0][0][RTW89_KCC][0][8] = -2, [0][0][RTW89_ACMA][1][8] = 32, @@ -52484,13 +50626,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][8] = 30, [0][0][RTW89_THAILAND][0][8] = -18, [0][0][RTW89_FCC][1][10] = -18, - [0][0][RTW89_FCC][2][10] = 44, [0][0][RTW89_ETSI][1][10] = 32, [0][0][RTW89_ETSI][0][10] = -8, [0][0][RTW89_MKK][1][10] = 30, [0][0][RTW89_MKK][0][10] = -8, [0][0][RTW89_IC][1][10] = -18, - [0][0][RTW89_IC][2][10] = 44, [0][0][RTW89_KCC][1][10] = -2, [0][0][RTW89_KCC][0][10] = -2, [0][0][RTW89_ACMA][1][10] = 32, @@ -52503,13 +50643,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][10] = 30, [0][0][RTW89_THAILAND][0][10] = -18, [0][0][RTW89_FCC][1][12] = -18, - [0][0][RTW89_FCC][2][12] = 44, [0][0][RTW89_ETSI][1][12] = 32, [0][0][RTW89_ETSI][0][12] = -8, [0][0][RTW89_MKK][1][12] = 30, [0][0][RTW89_MKK][0][12] = -8, [0][0][RTW89_IC][1][12] = -18, - [0][0][RTW89_IC][2][12] = 44, [0][0][RTW89_KCC][1][12] = -2, [0][0][RTW89_KCC][0][12] = -2, [0][0][RTW89_ACMA][1][12] = 32, @@ -52522,13 +50660,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][12] = 30, [0][0][RTW89_THAILAND][0][12] = -18, [0][0][RTW89_FCC][1][14] = -18, - [0][0][RTW89_FCC][2][14] = 44, [0][0][RTW89_ETSI][1][14] = 32, [0][0][RTW89_ETSI][0][14] = -8, [0][0][RTW89_MKK][1][14] = 30, [0][0][RTW89_MKK][0][14] = -8, [0][0][RTW89_IC][1][14] = -18, - [0][0][RTW89_IC][2][14] = 44, [0][0][RTW89_KCC][1][14] = -2, [0][0][RTW89_KCC][0][14] = -2, [0][0][RTW89_ACMA][1][14] = 32, @@ -52541,13 +50677,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][14] = 30, [0][0][RTW89_THAILAND][0][14] = -18, [0][0][RTW89_FCC][1][15] = -18, - [0][0][RTW89_FCC][2][15] = 44, [0][0][RTW89_ETSI][1][15] = 32, [0][0][RTW89_ETSI][0][15] = -8, [0][0][RTW89_MKK][1][15] = 30, [0][0][RTW89_MKK][0][15] = -8, [0][0][RTW89_IC][1][15] = -18, - [0][0][RTW89_IC][2][15] = 44, [0][0][RTW89_KCC][1][15] = -2, [0][0][RTW89_KCC][0][15] = -2, [0][0][RTW89_ACMA][1][15] = 32, @@ -52560,13 +50694,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][15] = 30, [0][0][RTW89_THAILAND][0][15] = -18, [0][0][RTW89_FCC][1][17] = -18, - [0][0][RTW89_FCC][2][17] = 44, [0][0][RTW89_ETSI][1][17] = 32, [0][0][RTW89_ETSI][0][17] = -8, [0][0][RTW89_MKK][1][17] = 30, [0][0][RTW89_MKK][0][17] = -8, [0][0][RTW89_IC][1][17] = -18, - [0][0][RTW89_IC][2][17] = 44, [0][0][RTW89_KCC][1][17] = -2, [0][0][RTW89_KCC][0][17] = -2, [0][0][RTW89_ACMA][1][17] = 32, @@ -52579,13 +50711,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][17] = 30, [0][0][RTW89_THAILAND][0][17] = -18, [0][0][RTW89_FCC][1][19] = -18, - [0][0][RTW89_FCC][2][19] = 44, [0][0][RTW89_ETSI][1][19] = 32, [0][0][RTW89_ETSI][0][19] = -8, [0][0][RTW89_MKK][1][19] = 30, [0][0][RTW89_MKK][0][19] = -8, [0][0][RTW89_IC][1][19] = -18, - [0][0][RTW89_IC][2][19] = 44, [0][0][RTW89_KCC][1][19] = -2, [0][0][RTW89_KCC][0][19] = -2, [0][0][RTW89_ACMA][1][19] = 32, @@ -52598,13 +50728,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][19] = 30, [0][0][RTW89_THAILAND][0][19] = -18, [0][0][RTW89_FCC][1][21] = -18, - [0][0][RTW89_FCC][2][21] = 44, [0][0][RTW89_ETSI][1][21] = 32, [0][0][RTW89_ETSI][0][21] = -8, [0][0][RTW89_MKK][1][21] = 30, [0][0][RTW89_MKK][0][21] = -8, [0][0][RTW89_IC][1][21] = -18, - [0][0][RTW89_IC][2][21] = 44, [0][0][RTW89_KCC][1][21] = -2, [0][0][RTW89_KCC][0][21] = -2, [0][0][RTW89_ACMA][1][21] = 32, @@ -52617,13 +50745,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][21] = 30, [0][0][RTW89_THAILAND][0][21] = -18, [0][0][RTW89_FCC][1][23] = -18, - [0][0][RTW89_FCC][2][23] = 54, [0][0][RTW89_ETSI][1][23] = 32, [0][0][RTW89_ETSI][0][23] = -8, [0][0][RTW89_MKK][1][23] = 30, [0][0][RTW89_MKK][0][23] = -8, [0][0][RTW89_IC][1][23] = -18, - [0][0][RTW89_IC][2][23] = 54, [0][0][RTW89_KCC][1][23] = -2, [0][0][RTW89_KCC][0][23] = -2, [0][0][RTW89_ACMA][1][23] = 32, @@ -52636,13 +50762,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][23] = 30, [0][0][RTW89_THAILAND][0][23] = -18, [0][0][RTW89_FCC][1][25] = -18, - [0][0][RTW89_FCC][2][25] = 54, [0][0][RTW89_ETSI][1][25] = 32, [0][0][RTW89_ETSI][0][25] = -8, [0][0][RTW89_MKK][1][25] = 30, [0][0][RTW89_MKK][0][25] = -8, [0][0][RTW89_IC][1][25] = -18, - [0][0][RTW89_IC][2][25] = 54, [0][0][RTW89_KCC][1][25] = -2, [0][0][RTW89_KCC][0][25] = -2, [0][0][RTW89_ACMA][1][25] = 32, @@ -52655,13 +50779,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][25] = 30, [0][0][RTW89_THAILAND][0][25] = -18, [0][0][RTW89_FCC][1][27] = -18, - [0][0][RTW89_FCC][2][27] = 54, [0][0][RTW89_ETSI][1][27] = 32, [0][0][RTW89_ETSI][0][27] = -8, [0][0][RTW89_MKK][1][27] = 30, [0][0][RTW89_MKK][0][27] = -8, [0][0][RTW89_IC][1][27] = -18, - [0][0][RTW89_IC][2][27] = 54, [0][0][RTW89_KCC][1][27] = -2, [0][0][RTW89_KCC][0][27] = -2, [0][0][RTW89_ACMA][1][27] = 32, @@ -52674,13 +50796,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][27] = 30, [0][0][RTW89_THAILAND][0][27] = -18, [0][0][RTW89_FCC][1][29] = -18, - [0][0][RTW89_FCC][2][29] = 54, [0][0][RTW89_ETSI][1][29] = 32, [0][0][RTW89_ETSI][0][29] = -8, [0][0][RTW89_MKK][1][29] = 30, [0][0][RTW89_MKK][0][29] = -8, [0][0][RTW89_IC][1][29] = -18, - [0][0][RTW89_IC][2][29] = 54, [0][0][RTW89_KCC][1][29] = -2, [0][0][RTW89_KCC][0][29] = -2, [0][0][RTW89_ACMA][1][29] = 32, @@ -52693,13 +50813,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][29] = 30, [0][0][RTW89_THAILAND][0][29] = -18, [0][0][RTW89_FCC][1][30] = -18, - [0][0][RTW89_FCC][2][30] = 54, [0][0][RTW89_ETSI][1][30] = 32, [0][0][RTW89_ETSI][0][30] = -8, [0][0][RTW89_MKK][1][30] = 30, [0][0][RTW89_MKK][0][30] = -8, [0][0][RTW89_IC][1][30] = -18, - [0][0][RTW89_IC][2][30] = 54, [0][0][RTW89_KCC][1][30] = -2, [0][0][RTW89_KCC][0][30] = -2, [0][0][RTW89_ACMA][1][30] = 32, @@ -52712,13 +50830,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][30] = 30, [0][0][RTW89_THAILAND][0][30] = -18, [0][0][RTW89_FCC][1][32] = -18, - [0][0][RTW89_FCC][2][32] = 54, [0][0][RTW89_ETSI][1][32] = 32, [0][0][RTW89_ETSI][0][32] = -8, [0][0][RTW89_MKK][1][32] = 30, [0][0][RTW89_MKK][0][32] = -8, [0][0][RTW89_IC][1][32] = -18, - [0][0][RTW89_IC][2][32] = 54, [0][0][RTW89_KCC][1][32] = -2, [0][0][RTW89_KCC][0][32] = -2, [0][0][RTW89_ACMA][1][32] = 32, @@ -52731,13 +50847,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][32] = 30, [0][0][RTW89_THAILAND][0][32] = -18, [0][0][RTW89_FCC][1][34] = -18, - [0][0][RTW89_FCC][2][34] = 54, [0][0][RTW89_ETSI][1][34] = 32, [0][0][RTW89_ETSI][0][34] = -8, [0][0][RTW89_MKK][1][34] = 30, [0][0][RTW89_MKK][0][34] = -8, [0][0][RTW89_IC][1][34] = -18, - [0][0][RTW89_IC][2][34] = 54, [0][0][RTW89_KCC][1][34] = -2, [0][0][RTW89_KCC][0][34] = -2, [0][0][RTW89_ACMA][1][34] = 32, @@ -52750,13 +50864,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][34] = 30, [0][0][RTW89_THAILAND][0][34] = -18, [0][0][RTW89_FCC][1][36] = -18, - [0][0][RTW89_FCC][2][36] = 54, [0][0][RTW89_ETSI][1][36] = 32, [0][0][RTW89_ETSI][0][36] = -8, [0][0][RTW89_MKK][1][36] = 30, [0][0][RTW89_MKK][0][36] = -8, [0][0][RTW89_IC][1][36] = -18, - [0][0][RTW89_IC][2][36] = 54, [0][0][RTW89_KCC][1][36] = -2, [0][0][RTW89_KCC][0][36] = -2, [0][0][RTW89_ACMA][1][36] = 32, @@ -52769,13 +50881,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][36] = 30, [0][0][RTW89_THAILAND][0][36] = -18, [0][0][RTW89_FCC][1][38] = -18, - [0][0][RTW89_FCC][2][38] = 54, [0][0][RTW89_ETSI][1][38] = 32, [0][0][RTW89_ETSI][0][38] = -8, [0][0][RTW89_MKK][1][38] = 30, [0][0][RTW89_MKK][0][38] = -8, [0][0][RTW89_IC][1][38] = -18, - [0][0][RTW89_IC][2][38] = 54, [0][0][RTW89_KCC][1][38] = -2, [0][0][RTW89_KCC][0][38] = -2, [0][0][RTW89_ACMA][1][38] = 32, @@ -52788,13 +50898,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][38] = 30, [0][0][RTW89_THAILAND][0][38] = -18, [0][0][RTW89_FCC][1][40] = -18, - [0][0][RTW89_FCC][2][40] = 54, [0][0][RTW89_ETSI][1][40] = 32, [0][0][RTW89_ETSI][0][40] = -8, [0][0][RTW89_MKK][1][40] = 30, [0][0][RTW89_MKK][0][40] = -8, [0][0][RTW89_IC][1][40] = -18, - [0][0][RTW89_IC][2][40] = 54, [0][0][RTW89_KCC][1][40] = -2, [0][0][RTW89_KCC][0][40] = -2, [0][0][RTW89_ACMA][1][40] = 32, @@ -52807,13 +50915,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][40] = 30, [0][0][RTW89_THAILAND][0][40] = -18, [0][0][RTW89_FCC][1][42] = -18, - [0][0][RTW89_FCC][2][42] = 54, [0][0][RTW89_ETSI][1][42] = 32, [0][0][RTW89_ETSI][0][42] = -8, [0][0][RTW89_MKK][1][42] = 30, [0][0][RTW89_MKK][0][42] = -8, [0][0][RTW89_IC][1][42] = -18, - [0][0][RTW89_IC][2][42] = 54, [0][0][RTW89_KCC][1][42] = -2, [0][0][RTW89_KCC][0][42] = -2, [0][0][RTW89_ACMA][1][42] = 32, @@ -52826,13 +50932,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][42] = 30, [0][0][RTW89_THAILAND][0][42] = -18, [0][0][RTW89_FCC][1][44] = -16, - [0][0][RTW89_FCC][2][44] = 56, [0][0][RTW89_ETSI][1][44] = 32, [0][0][RTW89_ETSI][0][44] = -6, [0][0][RTW89_MKK][1][44] = 8, [0][0][RTW89_MKK][0][44] = -10, [0][0][RTW89_IC][1][44] = -16, - [0][0][RTW89_IC][2][44] = 56, [0][0][RTW89_KCC][1][44] = -2, [0][0][RTW89_KCC][0][44] = -2, [0][0][RTW89_ACMA][1][44] = 32, @@ -52845,13 +50949,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][44] = 30, [0][0][RTW89_THAILAND][0][44] = -16, [0][0][RTW89_FCC][1][45] = -16, - [0][0][RTW89_FCC][2][45] = 127, [0][0][RTW89_ETSI][1][45] = 127, [0][0][RTW89_ETSI][0][45] = 127, [0][0][RTW89_MKK][1][45] = 127, [0][0][RTW89_MKK][0][45] = 127, [0][0][RTW89_IC][1][45] = -16, - [0][0][RTW89_IC][2][45] = 56, [0][0][RTW89_KCC][1][45] = -2, [0][0][RTW89_KCC][0][45] = 127, [0][0][RTW89_ACMA][1][45] = 127, @@ -52864,13 +50966,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][45] = 127, [0][0][RTW89_THAILAND][0][45] = 127, [0][0][RTW89_FCC][1][47] = -18, - [0][0][RTW89_FCC][2][47] = 127, [0][0][RTW89_ETSI][1][47] = 127, [0][0][RTW89_ETSI][0][47] = 127, [0][0][RTW89_MKK][1][47] = 127, [0][0][RTW89_MKK][0][47] = 127, [0][0][RTW89_IC][1][47] = -18, - [0][0][RTW89_IC][2][47] = 56, [0][0][RTW89_KCC][1][47] = -2, [0][0][RTW89_KCC][0][47] = 127, [0][0][RTW89_ACMA][1][47] = 127, @@ -52883,13 +50983,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][47] = 127, [0][0][RTW89_THAILAND][0][47] = 127, [0][0][RTW89_FCC][1][49] = -18, - [0][0][RTW89_FCC][2][49] = 127, [0][0][RTW89_ETSI][1][49] = 127, [0][0][RTW89_ETSI][0][49] = 127, [0][0][RTW89_MKK][1][49] = 127, [0][0][RTW89_MKK][0][49] = 127, [0][0][RTW89_IC][1][49] = -18, - [0][0][RTW89_IC][2][49] = 56, [0][0][RTW89_KCC][1][49] = -2, [0][0][RTW89_KCC][0][49] = 127, [0][0][RTW89_ACMA][1][49] = 127, @@ -52902,13 +51000,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][49] = 127, [0][0][RTW89_THAILAND][0][49] = 127, [0][0][RTW89_FCC][1][51] = -18, - [0][0][RTW89_FCC][2][51] = 127, [0][0][RTW89_ETSI][1][51] = 127, [0][0][RTW89_ETSI][0][51] = 127, [0][0][RTW89_MKK][1][51] = 127, [0][0][RTW89_MKK][0][51] = 127, [0][0][RTW89_IC][1][51] = -18, - [0][0][RTW89_IC][2][51] = 56, [0][0][RTW89_KCC][1][51] = -2, [0][0][RTW89_KCC][0][51] = 127, [0][0][RTW89_ACMA][1][51] = 127, @@ -52921,13 +51017,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][51] = 127, [0][0][RTW89_THAILAND][0][51] = 127, [0][0][RTW89_FCC][1][53] = -16, - [0][0][RTW89_FCC][2][53] = 127, [0][0][RTW89_ETSI][1][53] = 127, [0][0][RTW89_ETSI][0][53] = 127, [0][0][RTW89_MKK][1][53] = 127, [0][0][RTW89_MKK][0][53] = 127, [0][0][RTW89_IC][1][53] = -16, - [0][0][RTW89_IC][2][53] = 56, [0][0][RTW89_KCC][1][53] = -2, [0][0][RTW89_KCC][0][53] = 127, [0][0][RTW89_ACMA][1][53] = 127, @@ -52940,13 +51034,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][53] = 127, [0][0][RTW89_THAILAND][0][53] = 127, [0][0][RTW89_FCC][1][55] = -18, - [0][0][RTW89_FCC][2][55] = 56, [0][0][RTW89_ETSI][1][55] = 127, [0][0][RTW89_ETSI][0][55] = 127, [0][0][RTW89_MKK][1][55] = 127, [0][0][RTW89_MKK][0][55] = 127, [0][0][RTW89_IC][1][55] = -18, - [0][0][RTW89_IC][2][55] = 56, [0][0][RTW89_KCC][1][55] = -2, [0][0][RTW89_KCC][0][55] = 127, [0][0][RTW89_ACMA][1][55] = 127, @@ -52959,13 +51051,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][55] = 127, [0][0][RTW89_THAILAND][0][55] = 127, [0][0][RTW89_FCC][1][57] = -18, - [0][0][RTW89_FCC][2][57] = 56, [0][0][RTW89_ETSI][1][57] = 127, [0][0][RTW89_ETSI][0][57] = 127, [0][0][RTW89_MKK][1][57] = 127, [0][0][RTW89_MKK][0][57] = 127, [0][0][RTW89_IC][1][57] = -18, - [0][0][RTW89_IC][2][57] = 56, [0][0][RTW89_KCC][1][57] = -2, [0][0][RTW89_KCC][0][57] = 127, [0][0][RTW89_ACMA][1][57] = 127, @@ -52978,13 +51068,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][57] = 127, [0][0][RTW89_THAILAND][0][57] = 127, [0][0][RTW89_FCC][1][59] = -18, - [0][0][RTW89_FCC][2][59] = 56, [0][0][RTW89_ETSI][1][59] = 127, [0][0][RTW89_ETSI][0][59] = 127, [0][0][RTW89_MKK][1][59] = 127, [0][0][RTW89_MKK][0][59] = 127, [0][0][RTW89_IC][1][59] = -18, - [0][0][RTW89_IC][2][59] = 56, [0][0][RTW89_KCC][1][59] = -2, [0][0][RTW89_KCC][0][59] = 127, [0][0][RTW89_ACMA][1][59] = 127, @@ -52997,13 +51085,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][59] = 127, [0][0][RTW89_THAILAND][0][59] = 127, [0][0][RTW89_FCC][1][60] = -18, - [0][0][RTW89_FCC][2][60] = 56, [0][0][RTW89_ETSI][1][60] = 127, [0][0][RTW89_ETSI][0][60] = 127, [0][0][RTW89_MKK][1][60] = 127, [0][0][RTW89_MKK][0][60] = 127, [0][0][RTW89_IC][1][60] = -18, - [0][0][RTW89_IC][2][60] = 56, [0][0][RTW89_KCC][1][60] = -2, [0][0][RTW89_KCC][0][60] = 127, [0][0][RTW89_ACMA][1][60] = 127, @@ -53016,13 +51102,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][60] = 127, [0][0][RTW89_THAILAND][0][60] = 127, [0][0][RTW89_FCC][1][62] = -18, - [0][0][RTW89_FCC][2][62] = 56, [0][0][RTW89_ETSI][1][62] = 127, [0][0][RTW89_ETSI][0][62] = 127, [0][0][RTW89_MKK][1][62] = 127, [0][0][RTW89_MKK][0][62] = 127, [0][0][RTW89_IC][1][62] = -18, - [0][0][RTW89_IC][2][62] = 56, [0][0][RTW89_KCC][1][62] = -2, [0][0][RTW89_KCC][0][62] = 127, [0][0][RTW89_ACMA][1][62] = 127, @@ -53035,13 +51119,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][62] = 127, [0][0][RTW89_THAILAND][0][62] = 127, [0][0][RTW89_FCC][1][64] = -18, - [0][0][RTW89_FCC][2][64] = 56, [0][0][RTW89_ETSI][1][64] = 127, [0][0][RTW89_ETSI][0][64] = 127, [0][0][RTW89_MKK][1][64] = 127, [0][0][RTW89_MKK][0][64] = 127, [0][0][RTW89_IC][1][64] = -18, - [0][0][RTW89_IC][2][64] = 56, [0][0][RTW89_KCC][1][64] = -2, [0][0][RTW89_KCC][0][64] = 127, [0][0][RTW89_ACMA][1][64] = 127, @@ -53054,13 +51136,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][64] = 127, [0][0][RTW89_THAILAND][0][64] = 127, [0][0][RTW89_FCC][1][66] = -18, - [0][0][RTW89_FCC][2][66] = 56, [0][0][RTW89_ETSI][1][66] = 127, [0][0][RTW89_ETSI][0][66] = 127, [0][0][RTW89_MKK][1][66] = 127, [0][0][RTW89_MKK][0][66] = 127, [0][0][RTW89_IC][1][66] = -18, - [0][0][RTW89_IC][2][66] = 56, [0][0][RTW89_KCC][1][66] = -2, [0][0][RTW89_KCC][0][66] = 127, [0][0][RTW89_ACMA][1][66] = 127, @@ -53073,13 +51153,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][66] = 127, [0][0][RTW89_THAILAND][0][66] = 127, [0][0][RTW89_FCC][1][68] = -18, - [0][0][RTW89_FCC][2][68] = 56, [0][0][RTW89_ETSI][1][68] = 127, [0][0][RTW89_ETSI][0][68] = 127, [0][0][RTW89_MKK][1][68] = 127, [0][0][RTW89_MKK][0][68] = 127, [0][0][RTW89_IC][1][68] = -18, - [0][0][RTW89_IC][2][68] = 56, [0][0][RTW89_KCC][1][68] = -2, [0][0][RTW89_KCC][0][68] = 127, [0][0][RTW89_ACMA][1][68] = 127, @@ -53092,13 +51170,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][68] = 127, [0][0][RTW89_THAILAND][0][68] = 127, [0][0][RTW89_FCC][1][70] = -16, - [0][0][RTW89_FCC][2][70] = 56, [0][0][RTW89_ETSI][1][70] = 127, [0][0][RTW89_ETSI][0][70] = 127, [0][0][RTW89_MKK][1][70] = 127, [0][0][RTW89_MKK][0][70] = 127, [0][0][RTW89_IC][1][70] = -16, - [0][0][RTW89_IC][2][70] = 56, [0][0][RTW89_KCC][1][70] = -2, [0][0][RTW89_KCC][0][70] = 127, [0][0][RTW89_ACMA][1][70] = 127, @@ -53111,13 +51187,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][70] = 127, [0][0][RTW89_THAILAND][0][70] = 127, [0][0][RTW89_FCC][1][72] = -18, - [0][0][RTW89_FCC][2][72] = 56, [0][0][RTW89_ETSI][1][72] = 127, [0][0][RTW89_ETSI][0][72] = 127, [0][0][RTW89_MKK][1][72] = 127, [0][0][RTW89_MKK][0][72] = 127, [0][0][RTW89_IC][1][72] = -18, - [0][0][RTW89_IC][2][72] = 56, [0][0][RTW89_KCC][1][72] = -2, [0][0][RTW89_KCC][0][72] = 127, [0][0][RTW89_ACMA][1][72] = 127, @@ -53130,13 +51204,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][72] = 127, [0][0][RTW89_THAILAND][0][72] = 127, [0][0][RTW89_FCC][1][74] = -18, - [0][0][RTW89_FCC][2][74] = 56, [0][0][RTW89_ETSI][1][74] = 127, [0][0][RTW89_ETSI][0][74] = 127, [0][0][RTW89_MKK][1][74] = 127, [0][0][RTW89_MKK][0][74] = 127, [0][0][RTW89_IC][1][74] = -18, - [0][0][RTW89_IC][2][74] = 56, [0][0][RTW89_KCC][1][74] = -2, [0][0][RTW89_KCC][0][74] = 127, [0][0][RTW89_ACMA][1][74] = 127, @@ -53149,13 +51221,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][74] = 127, [0][0][RTW89_THAILAND][0][74] = 127, [0][0][RTW89_FCC][1][75] = -18, - [0][0][RTW89_FCC][2][75] = 56, [0][0][RTW89_ETSI][1][75] = 127, [0][0][RTW89_ETSI][0][75] = 127, [0][0][RTW89_MKK][1][75] = 127, [0][0][RTW89_MKK][0][75] = 127, [0][0][RTW89_IC][1][75] = -18, - [0][0][RTW89_IC][2][75] = 56, [0][0][RTW89_KCC][1][75] = -2, [0][0][RTW89_KCC][0][75] = 127, [0][0][RTW89_ACMA][1][75] = 127, @@ -53168,13 +51238,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][75] = 127, [0][0][RTW89_THAILAND][0][75] = 127, [0][0][RTW89_FCC][1][77] = -18, - [0][0][RTW89_FCC][2][77] = 56, [0][0][RTW89_ETSI][1][77] = 127, [0][0][RTW89_ETSI][0][77] = 127, [0][0][RTW89_MKK][1][77] = 127, [0][0][RTW89_MKK][0][77] = 127, [0][0][RTW89_IC][1][77] = -18, - [0][0][RTW89_IC][2][77] = 56, [0][0][RTW89_KCC][1][77] = -2, [0][0][RTW89_KCC][0][77] = 127, [0][0][RTW89_ACMA][1][77] = 127, @@ -53187,13 +51255,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][77] = 127, [0][0][RTW89_THAILAND][0][77] = 127, [0][0][RTW89_FCC][1][79] = -18, - [0][0][RTW89_FCC][2][79] = 56, [0][0][RTW89_ETSI][1][79] = 127, [0][0][RTW89_ETSI][0][79] = 127, [0][0][RTW89_MKK][1][79] = 127, [0][0][RTW89_MKK][0][79] = 127, [0][0][RTW89_IC][1][79] = -18, - [0][0][RTW89_IC][2][79] = 56, [0][0][RTW89_KCC][1][79] = -2, [0][0][RTW89_KCC][0][79] = 127, [0][0][RTW89_ACMA][1][79] = 127, @@ -53206,13 +51272,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][79] = 127, [0][0][RTW89_THAILAND][0][79] = 127, [0][0][RTW89_FCC][1][81] = -18, - [0][0][RTW89_FCC][2][81] = 56, [0][0][RTW89_ETSI][1][81] = 127, [0][0][RTW89_ETSI][0][81] = 127, [0][0][RTW89_MKK][1][81] = 127, [0][0][RTW89_MKK][0][81] = 127, [0][0][RTW89_IC][1][81] = -18, - [0][0][RTW89_IC][2][81] = 56, [0][0][RTW89_KCC][1][81] = -2, [0][0][RTW89_KCC][0][81] = 127, [0][0][RTW89_ACMA][1][81] = 127, @@ -53225,13 +51289,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][81] = 127, [0][0][RTW89_THAILAND][0][81] = 127, [0][0][RTW89_FCC][1][83] = -18, - [0][0][RTW89_FCC][2][83] = 56, [0][0][RTW89_ETSI][1][83] = 127, [0][0][RTW89_ETSI][0][83] = 127, [0][0][RTW89_MKK][1][83] = 127, [0][0][RTW89_MKK][0][83] = 127, [0][0][RTW89_IC][1][83] = -18, - [0][0][RTW89_IC][2][83] = 56, [0][0][RTW89_KCC][1][83] = -2, [0][0][RTW89_KCC][0][83] = 127, [0][0][RTW89_ACMA][1][83] = 127, @@ -53244,13 +51306,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][83] = 127, [0][0][RTW89_THAILAND][0][83] = 127, [0][0][RTW89_FCC][1][85] = -18, - [0][0][RTW89_FCC][2][85] = 56, [0][0][RTW89_ETSI][1][85] = 127, [0][0][RTW89_ETSI][0][85] = 127, [0][0][RTW89_MKK][1][85] = 127, [0][0][RTW89_MKK][0][85] = 127, [0][0][RTW89_IC][1][85] = -18, - [0][0][RTW89_IC][2][85] = 56, [0][0][RTW89_KCC][1][85] = -2, [0][0][RTW89_KCC][0][85] = 127, [0][0][RTW89_ACMA][1][85] = 127, @@ -53263,13 +51323,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][85] = 127, [0][0][RTW89_THAILAND][0][85] = 127, [0][0][RTW89_FCC][1][87] = -16, - [0][0][RTW89_FCC][2][87] = 127, [0][0][RTW89_ETSI][1][87] = 127, [0][0][RTW89_ETSI][0][87] = 127, [0][0][RTW89_MKK][1][87] = 127, [0][0][RTW89_MKK][0][87] = 127, [0][0][RTW89_IC][1][87] = -16, - [0][0][RTW89_IC][2][87] = 127, [0][0][RTW89_KCC][1][87] = -2, [0][0][RTW89_KCC][0][87] = 127, [0][0][RTW89_ACMA][1][87] = 127, @@ -53282,13 +51340,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][87] = 127, [0][0][RTW89_THAILAND][0][87] = 127, [0][0][RTW89_FCC][1][89] = -16, - [0][0][RTW89_FCC][2][89] = 127, [0][0][RTW89_ETSI][1][89] = 127, [0][0][RTW89_ETSI][0][89] = 127, [0][0][RTW89_MKK][1][89] = 127, [0][0][RTW89_MKK][0][89] = 127, [0][0][RTW89_IC][1][89] = -16, - [0][0][RTW89_IC][2][89] = 127, [0][0][RTW89_KCC][1][89] = -2, [0][0][RTW89_KCC][0][89] = 127, [0][0][RTW89_ACMA][1][89] = 127, @@ -53301,13 +51357,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][89] = 127, [0][0][RTW89_THAILAND][0][89] = 127, [0][0][RTW89_FCC][1][90] = -16, - [0][0][RTW89_FCC][2][90] = 127, [0][0][RTW89_ETSI][1][90] = 127, [0][0][RTW89_ETSI][0][90] = 127, [0][0][RTW89_MKK][1][90] = 127, [0][0][RTW89_MKK][0][90] = 127, [0][0][RTW89_IC][1][90] = -16, - [0][0][RTW89_IC][2][90] = 127, [0][0][RTW89_KCC][1][90] = -2, [0][0][RTW89_KCC][0][90] = 127, [0][0][RTW89_ACMA][1][90] = 127, @@ -53320,13 +51374,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][90] = 127, [0][0][RTW89_THAILAND][0][90] = 127, [0][0][RTW89_FCC][1][92] = -16, - [0][0][RTW89_FCC][2][92] = 127, [0][0][RTW89_ETSI][1][92] = 127, [0][0][RTW89_ETSI][0][92] = 127, [0][0][RTW89_MKK][1][92] = 127, [0][0][RTW89_MKK][0][92] = 127, [0][0][RTW89_IC][1][92] = -16, - [0][0][RTW89_IC][2][92] = 127, [0][0][RTW89_KCC][1][92] = -2, [0][0][RTW89_KCC][0][92] = 127, [0][0][RTW89_ACMA][1][92] = 127, @@ -53339,13 +51391,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][92] = 127, [0][0][RTW89_THAILAND][0][92] = 127, [0][0][RTW89_FCC][1][94] = -16, - [0][0][RTW89_FCC][2][94] = 127, [0][0][RTW89_ETSI][1][94] = 127, [0][0][RTW89_ETSI][0][94] = 127, [0][0][RTW89_MKK][1][94] = 127, [0][0][RTW89_MKK][0][94] = 127, [0][0][RTW89_IC][1][94] = -16, - [0][0][RTW89_IC][2][94] = 127, [0][0][RTW89_KCC][1][94] = -2, [0][0][RTW89_KCC][0][94] = 127, [0][0][RTW89_ACMA][1][94] = 127, @@ -53358,13 +51408,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][94] = 127, [0][0][RTW89_THAILAND][0][94] = 127, [0][0][RTW89_FCC][1][96] = -16, - [0][0][RTW89_FCC][2][96] = 127, [0][0][RTW89_ETSI][1][96] = 127, [0][0][RTW89_ETSI][0][96] = 127, [0][0][RTW89_MKK][1][96] = 127, [0][0][RTW89_MKK][0][96] = 127, [0][0][RTW89_IC][1][96] = -16, - [0][0][RTW89_IC][2][96] = 127, [0][0][RTW89_KCC][1][96] = -2, [0][0][RTW89_KCC][0][96] = 127, [0][0][RTW89_ACMA][1][96] = 127, @@ -53377,13 +51425,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][96] = 127, [0][0][RTW89_THAILAND][0][96] = 127, [0][0][RTW89_FCC][1][98] = -16, - [0][0][RTW89_FCC][2][98] = 127, [0][0][RTW89_ETSI][1][98] = 127, [0][0][RTW89_ETSI][0][98] = 127, [0][0][RTW89_MKK][1][98] = 127, [0][0][RTW89_MKK][0][98] = 127, [0][0][RTW89_IC][1][98] = -16, - [0][0][RTW89_IC][2][98] = 127, [0][0][RTW89_KCC][1][98] = -2, [0][0][RTW89_KCC][0][98] = 127, [0][0][RTW89_ACMA][1][98] = 127, @@ -53396,13 +51442,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][98] = 127, [0][0][RTW89_THAILAND][0][98] = 127, [0][0][RTW89_FCC][1][100] = -16, - [0][0][RTW89_FCC][2][100] = 127, [0][0][RTW89_ETSI][1][100] = 127, [0][0][RTW89_ETSI][0][100] = 127, [0][0][RTW89_MKK][1][100] = 127, [0][0][RTW89_MKK][0][100] = 127, [0][0][RTW89_IC][1][100] = -16, - [0][0][RTW89_IC][2][100] = 127, [0][0][RTW89_KCC][1][100] = -2, [0][0][RTW89_KCC][0][100] = 127, [0][0][RTW89_ACMA][1][100] = 127, @@ -53415,13 +51459,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][100] = 127, [0][0][RTW89_THAILAND][0][100] = 127, [0][0][RTW89_FCC][1][102] = -16, - [0][0][RTW89_FCC][2][102] = 127, [0][0][RTW89_ETSI][1][102] = 127, [0][0][RTW89_ETSI][0][102] = 127, [0][0][RTW89_MKK][1][102] = 127, [0][0][RTW89_MKK][0][102] = 127, [0][0][RTW89_IC][1][102] = -16, - [0][0][RTW89_IC][2][102] = 127, [0][0][RTW89_KCC][1][102] = -2, [0][0][RTW89_KCC][0][102] = 127, [0][0][RTW89_ACMA][1][102] = 127, @@ -53434,13 +51476,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][102] = 127, [0][0][RTW89_THAILAND][0][102] = 127, [0][0][RTW89_FCC][1][104] = -16, - [0][0][RTW89_FCC][2][104] = 127, [0][0][RTW89_ETSI][1][104] = 127, [0][0][RTW89_ETSI][0][104] = 127, [0][0][RTW89_MKK][1][104] = 127, [0][0][RTW89_MKK][0][104] = 127, [0][0][RTW89_IC][1][104] = -16, - [0][0][RTW89_IC][2][104] = 127, [0][0][RTW89_KCC][1][104] = -2, [0][0][RTW89_KCC][0][104] = 127, [0][0][RTW89_ACMA][1][104] = 127, @@ -53453,13 +51493,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][104] = 127, [0][0][RTW89_THAILAND][0][104] = 127, [0][0][RTW89_FCC][1][105] = -16, - [0][0][RTW89_FCC][2][105] = 127, [0][0][RTW89_ETSI][1][105] = 127, [0][0][RTW89_ETSI][0][105] = 127, [0][0][RTW89_MKK][1][105] = 127, [0][0][RTW89_MKK][0][105] = 127, [0][0][RTW89_IC][1][105] = -16, - [0][0][RTW89_IC][2][105] = 127, [0][0][RTW89_KCC][1][105] = -2, [0][0][RTW89_KCC][0][105] = 127, [0][0][RTW89_ACMA][1][105] = 127, @@ -53472,13 +51510,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][105] = 127, [0][0][RTW89_THAILAND][0][105] = 127, [0][0][RTW89_FCC][1][107] = -12, - [0][0][RTW89_FCC][2][107] = 127, [0][0][RTW89_ETSI][1][107] = 127, [0][0][RTW89_ETSI][0][107] = 127, [0][0][RTW89_MKK][1][107] = 127, [0][0][RTW89_MKK][0][107] = 127, [0][0][RTW89_IC][1][107] = -12, - [0][0][RTW89_IC][2][107] = 127, [0][0][RTW89_KCC][1][107] = -2, [0][0][RTW89_KCC][0][107] = 127, [0][0][RTW89_ACMA][1][107] = 127, @@ -53491,13 +51527,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][107] = 127, [0][0][RTW89_THAILAND][0][107] = 127, [0][0][RTW89_FCC][1][109] = -12, - [0][0][RTW89_FCC][2][109] = 127, [0][0][RTW89_ETSI][1][109] = 127, [0][0][RTW89_ETSI][0][109] = 127, [0][0][RTW89_MKK][1][109] = 127, [0][0][RTW89_MKK][0][109] = 127, [0][0][RTW89_IC][1][109] = -12, - [0][0][RTW89_IC][2][109] = 127, [0][0][RTW89_KCC][1][109] = 127, [0][0][RTW89_KCC][0][109] = 127, [0][0][RTW89_ACMA][1][109] = 127, @@ -53510,13 +51544,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][109] = 127, [0][0][RTW89_THAILAND][0][109] = 127, [0][0][RTW89_FCC][1][111] = 127, - [0][0][RTW89_FCC][2][111] = 127, [0][0][RTW89_ETSI][1][111] = 127, [0][0][RTW89_ETSI][0][111] = 127, [0][0][RTW89_MKK][1][111] = 127, [0][0][RTW89_MKK][0][111] = 127, [0][0][RTW89_IC][1][111] = 127, - [0][0][RTW89_IC][2][111] = 127, [0][0][RTW89_KCC][1][111] = 127, [0][0][RTW89_KCC][0][111] = 127, [0][0][RTW89_ACMA][1][111] = 127, @@ -53529,13 +51561,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][111] = 127, [0][0][RTW89_THAILAND][0][111] = 127, [0][0][RTW89_FCC][1][113] = 127, - [0][0][RTW89_FCC][2][113] = 127, [0][0][RTW89_ETSI][1][113] = 127, [0][0][RTW89_ETSI][0][113] = 127, [0][0][RTW89_MKK][1][113] = 127, [0][0][RTW89_MKK][0][113] = 127, [0][0][RTW89_IC][1][113] = 127, - [0][0][RTW89_IC][2][113] = 127, [0][0][RTW89_KCC][1][113] = 127, [0][0][RTW89_KCC][0][113] = 127, [0][0][RTW89_ACMA][1][113] = 127, @@ -53548,13 +51578,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][113] = 127, [0][0][RTW89_THAILAND][0][113] = 127, [0][0][RTW89_FCC][1][115] = 127, - [0][0][RTW89_FCC][2][115] = 127, [0][0][RTW89_ETSI][1][115] = 127, [0][0][RTW89_ETSI][0][115] = 127, [0][0][RTW89_MKK][1][115] = 127, [0][0][RTW89_MKK][0][115] = 127, [0][0][RTW89_IC][1][115] = 127, - [0][0][RTW89_IC][2][115] = 127, [0][0][RTW89_KCC][1][115] = 127, [0][0][RTW89_KCC][0][115] = 127, [0][0][RTW89_ACMA][1][115] = 127, @@ -53567,13 +51595,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][115] = 127, [0][0][RTW89_THAILAND][0][115] = 127, [0][0][RTW89_FCC][1][117] = 127, - [0][0][RTW89_FCC][2][117] = 127, [0][0][RTW89_ETSI][1][117] = 127, [0][0][RTW89_ETSI][0][117] = 127, [0][0][RTW89_MKK][1][117] = 127, [0][0][RTW89_MKK][0][117] = 127, [0][0][RTW89_IC][1][117] = 127, - [0][0][RTW89_IC][2][117] = 127, [0][0][RTW89_KCC][1][117] = 127, [0][0][RTW89_KCC][0][117] = 127, [0][0][RTW89_ACMA][1][117] = 127, @@ -53586,13 +51612,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][117] = 127, [0][0][RTW89_THAILAND][0][117] = 127, [0][0][RTW89_FCC][1][119] = 127, - [0][0][RTW89_FCC][2][119] = 127, [0][0][RTW89_ETSI][1][119] = 127, [0][0][RTW89_ETSI][0][119] = 127, [0][0][RTW89_MKK][1][119] = 127, [0][0][RTW89_MKK][0][119] = 127, [0][0][RTW89_IC][1][119] = 127, - [0][0][RTW89_IC][2][119] = 127, [0][0][RTW89_KCC][1][119] = 127, [0][0][RTW89_KCC][0][119] = 127, [0][0][RTW89_ACMA][1][119] = 127, @@ -53605,13 +51629,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][0][RTW89_THAILAND][1][119] = 127, [0][0][RTW89_THAILAND][0][119] = 127, [0][1][RTW89_FCC][1][0] = -40, - [0][1][RTW89_FCC][2][0] = 32, [0][1][RTW89_ETSI][1][0] = 20, [0][1][RTW89_ETSI][0][0] = -18, [0][1][RTW89_MKK][1][0] = 18, [0][1][RTW89_MKK][0][0] = -20, [0][1][RTW89_IC][1][0] = -40, - [0][1][RTW89_IC][2][0] = 32, [0][1][RTW89_KCC][1][0] = -14, [0][1][RTW89_KCC][0][0] = -14, [0][1][RTW89_ACMA][1][0] = 20, @@ -53624,13 +51646,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][0] = 6, [0][1][RTW89_THAILAND][0][0] = -40, [0][1][RTW89_FCC][1][2] = -40, - [0][1][RTW89_FCC][2][2] = 32, [0][1][RTW89_ETSI][1][2] = 20, [0][1][RTW89_ETSI][0][2] = -18, [0][1][RTW89_MKK][1][2] = 18, [0][1][RTW89_MKK][0][2] = -22, [0][1][RTW89_IC][1][2] = -40, - [0][1][RTW89_IC][2][2] = 32, [0][1][RTW89_KCC][1][2] = -14, [0][1][RTW89_KCC][0][2] = -14, [0][1][RTW89_ACMA][1][2] = 20, @@ -53643,13 +51663,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][2] = 6, [0][1][RTW89_THAILAND][0][2] = -40, [0][1][RTW89_FCC][1][4] = -40, - [0][1][RTW89_FCC][2][4] = 32, [0][1][RTW89_ETSI][1][4] = 20, [0][1][RTW89_ETSI][0][4] = -18, [0][1][RTW89_MKK][1][4] = 18, [0][1][RTW89_MKK][0][4] = -22, [0][1][RTW89_IC][1][4] = -40, - [0][1][RTW89_IC][2][4] = 32, [0][1][RTW89_KCC][1][4] = -14, [0][1][RTW89_KCC][0][4] = -14, [0][1][RTW89_ACMA][1][4] = 20, @@ -53662,13 +51680,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][4] = 6, [0][1][RTW89_THAILAND][0][4] = -40, [0][1][RTW89_FCC][1][6] = -40, - [0][1][RTW89_FCC][2][6] = 32, [0][1][RTW89_ETSI][1][6] = 20, [0][1][RTW89_ETSI][0][6] = -18, [0][1][RTW89_MKK][1][6] = 18, [0][1][RTW89_MKK][0][6] = -22, [0][1][RTW89_IC][1][6] = -40, - [0][1][RTW89_IC][2][6] = 32, [0][1][RTW89_KCC][1][6] = -14, [0][1][RTW89_KCC][0][6] = -14, [0][1][RTW89_ACMA][1][6] = 20, @@ -53681,13 +51697,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][6] = 6, [0][1][RTW89_THAILAND][0][6] = -40, [0][1][RTW89_FCC][1][8] = -40, - [0][1][RTW89_FCC][2][8] = 32, [0][1][RTW89_ETSI][1][8] = 20, [0][1][RTW89_ETSI][0][8] = -18, [0][1][RTW89_MKK][1][8] = 18, [0][1][RTW89_MKK][0][8] = -22, [0][1][RTW89_IC][1][8] = -40, - [0][1][RTW89_IC][2][8] = 32, [0][1][RTW89_KCC][1][8] = -14, [0][1][RTW89_KCC][0][8] = -14, [0][1][RTW89_ACMA][1][8] = 20, @@ -53700,13 +51714,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][8] = 6, [0][1][RTW89_THAILAND][0][8] = -40, [0][1][RTW89_FCC][1][10] = -40, - [0][1][RTW89_FCC][2][10] = 32, [0][1][RTW89_ETSI][1][10] = 20, [0][1][RTW89_ETSI][0][10] = -18, [0][1][RTW89_MKK][1][10] = 18, [0][1][RTW89_MKK][0][10] = -22, [0][1][RTW89_IC][1][10] = -40, - [0][1][RTW89_IC][2][10] = 32, [0][1][RTW89_KCC][1][10] = -14, [0][1][RTW89_KCC][0][10] = -14, [0][1][RTW89_ACMA][1][10] = 20, @@ -53719,13 +51731,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][10] = 6, [0][1][RTW89_THAILAND][0][10] = -40, [0][1][RTW89_FCC][1][12] = -40, - [0][1][RTW89_FCC][2][12] = 32, [0][1][RTW89_ETSI][1][12] = 20, [0][1][RTW89_ETSI][0][12] = -18, [0][1][RTW89_MKK][1][12] = 18, [0][1][RTW89_MKK][0][12] = -22, [0][1][RTW89_IC][1][12] = -40, - [0][1][RTW89_IC][2][12] = 32, [0][1][RTW89_KCC][1][12] = -14, [0][1][RTW89_KCC][0][12] = -14, [0][1][RTW89_ACMA][1][12] = 20, @@ -53738,13 +51748,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][12] = 6, [0][1][RTW89_THAILAND][0][12] = -40, [0][1][RTW89_FCC][1][14] = -40, - [0][1][RTW89_FCC][2][14] = 32, [0][1][RTW89_ETSI][1][14] = 20, [0][1][RTW89_ETSI][0][14] = -18, [0][1][RTW89_MKK][1][14] = 18, [0][1][RTW89_MKK][0][14] = -22, [0][1][RTW89_IC][1][14] = -40, - [0][1][RTW89_IC][2][14] = 32, [0][1][RTW89_KCC][1][14] = -14, [0][1][RTW89_KCC][0][14] = -14, [0][1][RTW89_ACMA][1][14] = 20, @@ -53757,13 +51765,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][14] = 6, [0][1][RTW89_THAILAND][0][14] = -40, [0][1][RTW89_FCC][1][15] = -40, - [0][1][RTW89_FCC][2][15] = 32, [0][1][RTW89_ETSI][1][15] = 20, [0][1][RTW89_ETSI][0][15] = -18, [0][1][RTW89_MKK][1][15] = 18, [0][1][RTW89_MKK][0][15] = -22, [0][1][RTW89_IC][1][15] = -40, - [0][1][RTW89_IC][2][15] = 32, [0][1][RTW89_KCC][1][15] = -14, [0][1][RTW89_KCC][0][15] = -14, [0][1][RTW89_ACMA][1][15] = 20, @@ -53776,13 +51782,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][15] = 6, [0][1][RTW89_THAILAND][0][15] = -40, [0][1][RTW89_FCC][1][17] = -40, - [0][1][RTW89_FCC][2][17] = 32, [0][1][RTW89_ETSI][1][17] = 20, [0][1][RTW89_ETSI][0][17] = -18, [0][1][RTW89_MKK][1][17] = 18, [0][1][RTW89_MKK][0][17] = -22, [0][1][RTW89_IC][1][17] = -40, - [0][1][RTW89_IC][2][17] = 32, [0][1][RTW89_KCC][1][17] = -14, [0][1][RTW89_KCC][0][17] = -14, [0][1][RTW89_ACMA][1][17] = 20, @@ -53795,13 +51799,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][17] = 6, [0][1][RTW89_THAILAND][0][17] = -40, [0][1][RTW89_FCC][1][19] = -40, - [0][1][RTW89_FCC][2][19] = 32, [0][1][RTW89_ETSI][1][19] = 20, [0][1][RTW89_ETSI][0][19] = -18, [0][1][RTW89_MKK][1][19] = 18, [0][1][RTW89_MKK][0][19] = -22, [0][1][RTW89_IC][1][19] = -40, - [0][1][RTW89_IC][2][19] = 32, [0][1][RTW89_KCC][1][19] = -14, [0][1][RTW89_KCC][0][19] = -14, [0][1][RTW89_ACMA][1][19] = 20, @@ -53814,13 +51816,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][19] = 6, [0][1][RTW89_THAILAND][0][19] = -40, [0][1][RTW89_FCC][1][21] = -40, - [0][1][RTW89_FCC][2][21] = 32, [0][1][RTW89_ETSI][1][21] = 20, [0][1][RTW89_ETSI][0][21] = -18, [0][1][RTW89_MKK][1][21] = 18, [0][1][RTW89_MKK][0][21] = -22, [0][1][RTW89_IC][1][21] = -40, - [0][1][RTW89_IC][2][21] = 32, [0][1][RTW89_KCC][1][21] = -14, [0][1][RTW89_KCC][0][21] = -14, [0][1][RTW89_ACMA][1][21] = 20, @@ -53833,13 +51833,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][21] = 6, [0][1][RTW89_THAILAND][0][21] = -40, [0][1][RTW89_FCC][1][23] = -40, - [0][1][RTW89_FCC][2][23] = 32, [0][1][RTW89_ETSI][1][23] = 20, [0][1][RTW89_ETSI][0][23] = -18, [0][1][RTW89_MKK][1][23] = 18, [0][1][RTW89_MKK][0][23] = -22, [0][1][RTW89_IC][1][23] = -40, - [0][1][RTW89_IC][2][23] = 32, [0][1][RTW89_KCC][1][23] = -14, [0][1][RTW89_KCC][0][23] = -14, [0][1][RTW89_ACMA][1][23] = 20, @@ -53852,13 +51850,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][23] = 6, [0][1][RTW89_THAILAND][0][23] = -40, [0][1][RTW89_FCC][1][25] = -40, - [0][1][RTW89_FCC][2][25] = 32, [0][1][RTW89_ETSI][1][25] = 20, [0][1][RTW89_ETSI][0][25] = -18, [0][1][RTW89_MKK][1][25] = -4, [0][1][RTW89_MKK][0][25] = -22, [0][1][RTW89_IC][1][25] = -40, - [0][1][RTW89_IC][2][25] = 32, [0][1][RTW89_KCC][1][25] = -14, [0][1][RTW89_KCC][0][25] = -14, [0][1][RTW89_ACMA][1][25] = 20, @@ -53871,13 +51867,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][25] = 6, [0][1][RTW89_THAILAND][0][25] = -40, [0][1][RTW89_FCC][1][27] = -40, - [0][1][RTW89_FCC][2][27] = 32, [0][1][RTW89_ETSI][1][27] = 20, [0][1][RTW89_ETSI][0][27] = -18, [0][1][RTW89_MKK][1][27] = -4, [0][1][RTW89_MKK][0][27] = -22, [0][1][RTW89_IC][1][27] = -40, - [0][1][RTW89_IC][2][27] = 32, [0][1][RTW89_KCC][1][27] = -14, [0][1][RTW89_KCC][0][27] = -14, [0][1][RTW89_ACMA][1][27] = 20, @@ -53890,13 +51884,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][27] = 6, [0][1][RTW89_THAILAND][0][27] = -40, [0][1][RTW89_FCC][1][29] = -40, - [0][1][RTW89_FCC][2][29] = 32, [0][1][RTW89_ETSI][1][29] = 20, [0][1][RTW89_ETSI][0][29] = -18, [0][1][RTW89_MKK][1][29] = -4, [0][1][RTW89_MKK][0][29] = -22, [0][1][RTW89_IC][1][29] = -40, - [0][1][RTW89_IC][2][29] = 32, [0][1][RTW89_KCC][1][29] = -14, [0][1][RTW89_KCC][0][29] = -14, [0][1][RTW89_ACMA][1][29] = 20, @@ -53909,13 +51901,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][29] = 6, [0][1][RTW89_THAILAND][0][29] = -40, [0][1][RTW89_FCC][1][30] = -40, - [0][1][RTW89_FCC][2][30] = 32, [0][1][RTW89_ETSI][1][30] = 20, [0][1][RTW89_ETSI][0][30] = -18, [0][1][RTW89_MKK][1][30] = -4, [0][1][RTW89_MKK][0][30] = -22, [0][1][RTW89_IC][1][30] = -40, - [0][1][RTW89_IC][2][30] = 32, [0][1][RTW89_KCC][1][30] = -14, [0][1][RTW89_KCC][0][30] = -14, [0][1][RTW89_ACMA][1][30] = 20, @@ -53928,13 +51918,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][30] = 6, [0][1][RTW89_THAILAND][0][30] = -40, [0][1][RTW89_FCC][1][32] = -40, - [0][1][RTW89_FCC][2][32] = 32, [0][1][RTW89_ETSI][1][32] = 20, [0][1][RTW89_ETSI][0][32] = -18, [0][1][RTW89_MKK][1][32] = -4, [0][1][RTW89_MKK][0][32] = -22, [0][1][RTW89_IC][1][32] = -40, - [0][1][RTW89_IC][2][32] = 32, [0][1][RTW89_KCC][1][32] = -14, [0][1][RTW89_KCC][0][32] = -14, [0][1][RTW89_ACMA][1][32] = 20, @@ -53947,13 +51935,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][32] = 6, [0][1][RTW89_THAILAND][0][32] = -40, [0][1][RTW89_FCC][1][34] = -40, - [0][1][RTW89_FCC][2][34] = 32, [0][1][RTW89_ETSI][1][34] = 20, [0][1][RTW89_ETSI][0][34] = -18, [0][1][RTW89_MKK][1][34] = -4, [0][1][RTW89_MKK][0][34] = -22, [0][1][RTW89_IC][1][34] = -40, - [0][1][RTW89_IC][2][34] = 32, [0][1][RTW89_KCC][1][34] = -14, [0][1][RTW89_KCC][0][34] = -14, [0][1][RTW89_ACMA][1][34] = 20, @@ -53966,13 +51952,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][34] = 6, [0][1][RTW89_THAILAND][0][34] = -40, [0][1][RTW89_FCC][1][36] = -40, - [0][1][RTW89_FCC][2][36] = 32, [0][1][RTW89_ETSI][1][36] = 20, [0][1][RTW89_ETSI][0][36] = -18, [0][1][RTW89_MKK][1][36] = -4, [0][1][RTW89_MKK][0][36] = -22, [0][1][RTW89_IC][1][36] = -40, - [0][1][RTW89_IC][2][36] = 32, [0][1][RTW89_KCC][1][36] = -14, [0][1][RTW89_KCC][0][36] = -14, [0][1][RTW89_ACMA][1][36] = 20, @@ -53985,13 +51969,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][36] = 6, [0][1][RTW89_THAILAND][0][36] = -40, [0][1][RTW89_FCC][1][38] = -40, - [0][1][RTW89_FCC][2][38] = 32, [0][1][RTW89_ETSI][1][38] = 20, [0][1][RTW89_ETSI][0][38] = -18, [0][1][RTW89_MKK][1][38] = -4, [0][1][RTW89_MKK][0][38] = -22, [0][1][RTW89_IC][1][38] = -40, - [0][1][RTW89_IC][2][38] = 32, [0][1][RTW89_KCC][1][38] = -14, [0][1][RTW89_KCC][0][38] = -14, [0][1][RTW89_ACMA][1][38] = 20, @@ -54004,13 +51986,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][38] = 6, [0][1][RTW89_THAILAND][0][38] = -40, [0][1][RTW89_FCC][1][40] = -40, - [0][1][RTW89_FCC][2][40] = 32, [0][1][RTW89_ETSI][1][40] = 20, [0][1][RTW89_ETSI][0][40] = -18, [0][1][RTW89_MKK][1][40] = -4, [0][1][RTW89_MKK][0][40] = -22, [0][1][RTW89_IC][1][40] = -40, - [0][1][RTW89_IC][2][40] = 32, [0][1][RTW89_KCC][1][40] = -14, [0][1][RTW89_KCC][0][40] = -14, [0][1][RTW89_ACMA][1][40] = 20, @@ -54023,13 +52003,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][40] = 6, [0][1][RTW89_THAILAND][0][40] = -40, [0][1][RTW89_FCC][1][42] = -40, - [0][1][RTW89_FCC][2][42] = 32, [0][1][RTW89_ETSI][1][42] = 20, [0][1][RTW89_ETSI][0][42] = -18, [0][1][RTW89_MKK][1][42] = -4, [0][1][RTW89_MKK][0][42] = -22, [0][1][RTW89_IC][1][42] = -40, - [0][1][RTW89_IC][2][42] = 32, [0][1][RTW89_KCC][1][42] = -14, [0][1][RTW89_KCC][0][42] = -14, [0][1][RTW89_ACMA][1][42] = 20, @@ -54042,13 +52020,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][42] = 6, [0][1][RTW89_THAILAND][0][42] = -40, [0][1][RTW89_FCC][1][44] = -40, - [0][1][RTW89_FCC][2][44] = 32, [0][1][RTW89_ETSI][1][44] = 20, [0][1][RTW89_ETSI][0][44] = -18, [0][1][RTW89_MKK][1][44] = -4, [0][1][RTW89_MKK][0][44] = -22, [0][1][RTW89_IC][1][44] = -40, - [0][1][RTW89_IC][2][44] = 32, [0][1][RTW89_KCC][1][44] = -14, [0][1][RTW89_KCC][0][44] = -14, [0][1][RTW89_ACMA][1][44] = 20, @@ -54061,13 +52037,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][44] = 6, [0][1][RTW89_THAILAND][0][44] = -40, [0][1][RTW89_FCC][1][45] = -40, - [0][1][RTW89_FCC][2][45] = 127, [0][1][RTW89_ETSI][1][45] = 127, [0][1][RTW89_ETSI][0][45] = 127, [0][1][RTW89_MKK][1][45] = 127, [0][1][RTW89_MKK][0][45] = 127, [0][1][RTW89_IC][1][45] = -40, - [0][1][RTW89_IC][2][45] = 32, [0][1][RTW89_KCC][1][45] = -14, [0][1][RTW89_KCC][0][45] = 127, [0][1][RTW89_ACMA][1][45] = 127, @@ -54080,13 +52054,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][45] = 127, [0][1][RTW89_THAILAND][0][45] = 127, [0][1][RTW89_FCC][1][47] = -40, - [0][1][RTW89_FCC][2][47] = 127, [0][1][RTW89_ETSI][1][47] = 127, [0][1][RTW89_ETSI][0][47] = 127, [0][1][RTW89_MKK][1][47] = 127, [0][1][RTW89_MKK][0][47] = 127, [0][1][RTW89_IC][1][47] = -40, - [0][1][RTW89_IC][2][47] = 32, [0][1][RTW89_KCC][1][47] = -14, [0][1][RTW89_KCC][0][47] = 127, [0][1][RTW89_ACMA][1][47] = 127, @@ -54099,13 +52071,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][47] = 127, [0][1][RTW89_THAILAND][0][47] = 127, [0][1][RTW89_FCC][1][49] = -40, - [0][1][RTW89_FCC][2][49] = 127, [0][1][RTW89_ETSI][1][49] = 127, [0][1][RTW89_ETSI][0][49] = 127, [0][1][RTW89_MKK][1][49] = 127, [0][1][RTW89_MKK][0][49] = 127, [0][1][RTW89_IC][1][49] = -40, - [0][1][RTW89_IC][2][49] = 32, [0][1][RTW89_KCC][1][49] = -14, [0][1][RTW89_KCC][0][49] = 127, [0][1][RTW89_ACMA][1][49] = 127, @@ -54118,13 +52088,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][49] = 127, [0][1][RTW89_THAILAND][0][49] = 127, [0][1][RTW89_FCC][1][51] = -40, - [0][1][RTW89_FCC][2][51] = 127, [0][1][RTW89_ETSI][1][51] = 127, [0][1][RTW89_ETSI][0][51] = 127, [0][1][RTW89_MKK][1][51] = 127, [0][1][RTW89_MKK][0][51] = 127, [0][1][RTW89_IC][1][51] = -40, - [0][1][RTW89_IC][2][51] = 32, [0][1][RTW89_KCC][1][51] = -14, [0][1][RTW89_KCC][0][51] = 127, [0][1][RTW89_ACMA][1][51] = 127, @@ -54137,13 +52105,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][51] = 127, [0][1][RTW89_THAILAND][0][51] = 127, [0][1][RTW89_FCC][1][53] = -40, - [0][1][RTW89_FCC][2][53] = 127, [0][1][RTW89_ETSI][1][53] = 127, [0][1][RTW89_ETSI][0][53] = 127, [0][1][RTW89_MKK][1][53] = 127, [0][1][RTW89_MKK][0][53] = 127, [0][1][RTW89_IC][1][53] = -40, - [0][1][RTW89_IC][2][53] = 32, [0][1][RTW89_KCC][1][53] = -14, [0][1][RTW89_KCC][0][53] = 127, [0][1][RTW89_ACMA][1][53] = 127, @@ -54156,13 +52122,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][53] = 127, [0][1][RTW89_THAILAND][0][53] = 127, [0][1][RTW89_FCC][1][55] = -40, - [0][1][RTW89_FCC][2][55] = 30, [0][1][RTW89_ETSI][1][55] = 127, [0][1][RTW89_ETSI][0][55] = 127, [0][1][RTW89_MKK][1][55] = 127, [0][1][RTW89_MKK][0][55] = 127, [0][1][RTW89_IC][1][55] = -40, - [0][1][RTW89_IC][2][55] = 30, [0][1][RTW89_KCC][1][55] = -14, [0][1][RTW89_KCC][0][55] = 127, [0][1][RTW89_ACMA][1][55] = 127, @@ -54175,13 +52139,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][55] = 127, [0][1][RTW89_THAILAND][0][55] = 127, [0][1][RTW89_FCC][1][57] = -40, - [0][1][RTW89_FCC][2][57] = 30, [0][1][RTW89_ETSI][1][57] = 127, [0][1][RTW89_ETSI][0][57] = 127, [0][1][RTW89_MKK][1][57] = 127, [0][1][RTW89_MKK][0][57] = 127, [0][1][RTW89_IC][1][57] = -40, - [0][1][RTW89_IC][2][57] = 30, [0][1][RTW89_KCC][1][57] = -14, [0][1][RTW89_KCC][0][57] = 127, [0][1][RTW89_ACMA][1][57] = 127, @@ -54194,13 +52156,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][57] = 127, [0][1][RTW89_THAILAND][0][57] = 127, [0][1][RTW89_FCC][1][59] = -40, - [0][1][RTW89_FCC][2][59] = 30, [0][1][RTW89_ETSI][1][59] = 127, [0][1][RTW89_ETSI][0][59] = 127, [0][1][RTW89_MKK][1][59] = 127, [0][1][RTW89_MKK][0][59] = 127, [0][1][RTW89_IC][1][59] = -40, - [0][1][RTW89_IC][2][59] = 30, [0][1][RTW89_KCC][1][59] = -14, [0][1][RTW89_KCC][0][59] = 127, [0][1][RTW89_ACMA][1][59] = 127, @@ -54213,13 +52173,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][59] = 127, [0][1][RTW89_THAILAND][0][59] = 127, [0][1][RTW89_FCC][1][60] = -40, - [0][1][RTW89_FCC][2][60] = 30, [0][1][RTW89_ETSI][1][60] = 127, [0][1][RTW89_ETSI][0][60] = 127, [0][1][RTW89_MKK][1][60] = 127, [0][1][RTW89_MKK][0][60] = 127, [0][1][RTW89_IC][1][60] = -40, - [0][1][RTW89_IC][2][60] = 30, [0][1][RTW89_KCC][1][60] = -14, [0][1][RTW89_KCC][0][60] = 127, [0][1][RTW89_ACMA][1][60] = 127, @@ -54232,13 +52190,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][60] = 127, [0][1][RTW89_THAILAND][0][60] = 127, [0][1][RTW89_FCC][1][62] = -40, - [0][1][RTW89_FCC][2][62] = 30, [0][1][RTW89_ETSI][1][62] = 127, [0][1][RTW89_ETSI][0][62] = 127, [0][1][RTW89_MKK][1][62] = 127, [0][1][RTW89_MKK][0][62] = 127, [0][1][RTW89_IC][1][62] = -40, - [0][1][RTW89_IC][2][62] = 30, [0][1][RTW89_KCC][1][62] = -14, [0][1][RTW89_KCC][0][62] = 127, [0][1][RTW89_ACMA][1][62] = 127, @@ -54251,13 +52207,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][62] = 127, [0][1][RTW89_THAILAND][0][62] = 127, [0][1][RTW89_FCC][1][64] = -40, - [0][1][RTW89_FCC][2][64] = 30, [0][1][RTW89_ETSI][1][64] = 127, [0][1][RTW89_ETSI][0][64] = 127, [0][1][RTW89_MKK][1][64] = 127, [0][1][RTW89_MKK][0][64] = 127, [0][1][RTW89_IC][1][64] = -40, - [0][1][RTW89_IC][2][64] = 30, [0][1][RTW89_KCC][1][64] = -14, [0][1][RTW89_KCC][0][64] = 127, [0][1][RTW89_ACMA][1][64] = 127, @@ -54270,13 +52224,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][64] = 127, [0][1][RTW89_THAILAND][0][64] = 127, [0][1][RTW89_FCC][1][66] = -40, - [0][1][RTW89_FCC][2][66] = 30, [0][1][RTW89_ETSI][1][66] = 127, [0][1][RTW89_ETSI][0][66] = 127, [0][1][RTW89_MKK][1][66] = 127, [0][1][RTW89_MKK][0][66] = 127, [0][1][RTW89_IC][1][66] = -40, - [0][1][RTW89_IC][2][66] = 30, [0][1][RTW89_KCC][1][66] = -14, [0][1][RTW89_KCC][0][66] = 127, [0][1][RTW89_ACMA][1][66] = 127, @@ -54289,13 +52241,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][66] = 127, [0][1][RTW89_THAILAND][0][66] = 127, [0][1][RTW89_FCC][1][68] = -40, - [0][1][RTW89_FCC][2][68] = 30, [0][1][RTW89_ETSI][1][68] = 127, [0][1][RTW89_ETSI][0][68] = 127, [0][1][RTW89_MKK][1][68] = 127, [0][1][RTW89_MKK][0][68] = 127, [0][1][RTW89_IC][1][68] = -40, - [0][1][RTW89_IC][2][68] = 30, [0][1][RTW89_KCC][1][68] = -14, [0][1][RTW89_KCC][0][68] = 127, [0][1][RTW89_ACMA][1][68] = 127, @@ -54308,13 +52258,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][68] = 127, [0][1][RTW89_THAILAND][0][68] = 127, [0][1][RTW89_FCC][1][70] = -38, - [0][1][RTW89_FCC][2][70] = 30, [0][1][RTW89_ETSI][1][70] = 127, [0][1][RTW89_ETSI][0][70] = 127, [0][1][RTW89_MKK][1][70] = 127, [0][1][RTW89_MKK][0][70] = 127, [0][1][RTW89_IC][1][70] = -38, - [0][1][RTW89_IC][2][70] = 30, [0][1][RTW89_KCC][1][70] = -14, [0][1][RTW89_KCC][0][70] = 127, [0][1][RTW89_ACMA][1][70] = 127, @@ -54327,13 +52275,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][70] = 127, [0][1][RTW89_THAILAND][0][70] = 127, [0][1][RTW89_FCC][1][72] = -38, - [0][1][RTW89_FCC][2][72] = 30, [0][1][RTW89_ETSI][1][72] = 127, [0][1][RTW89_ETSI][0][72] = 127, [0][1][RTW89_MKK][1][72] = 127, [0][1][RTW89_MKK][0][72] = 127, [0][1][RTW89_IC][1][72] = -38, - [0][1][RTW89_IC][2][72] = 30, [0][1][RTW89_KCC][1][72] = -14, [0][1][RTW89_KCC][0][72] = 127, [0][1][RTW89_ACMA][1][72] = 127, @@ -54346,13 +52292,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][72] = 127, [0][1][RTW89_THAILAND][0][72] = 127, [0][1][RTW89_FCC][1][74] = -38, - [0][1][RTW89_FCC][2][74] = 30, [0][1][RTW89_ETSI][1][74] = 127, [0][1][RTW89_ETSI][0][74] = 127, [0][1][RTW89_MKK][1][74] = 127, [0][1][RTW89_MKK][0][74] = 127, [0][1][RTW89_IC][1][74] = -38, - [0][1][RTW89_IC][2][74] = 30, [0][1][RTW89_KCC][1][74] = -14, [0][1][RTW89_KCC][0][74] = 127, [0][1][RTW89_ACMA][1][74] = 127, @@ -54365,13 +52309,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][74] = 127, [0][1][RTW89_THAILAND][0][74] = 127, [0][1][RTW89_FCC][1][75] = -38, - [0][1][RTW89_FCC][2][75] = 30, [0][1][RTW89_ETSI][1][75] = 127, [0][1][RTW89_ETSI][0][75] = 127, [0][1][RTW89_MKK][1][75] = 127, [0][1][RTW89_MKK][0][75] = 127, [0][1][RTW89_IC][1][75] = -38, - [0][1][RTW89_IC][2][75] = 30, [0][1][RTW89_KCC][1][75] = -14, [0][1][RTW89_KCC][0][75] = 127, [0][1][RTW89_ACMA][1][75] = 127, @@ -54384,13 +52326,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][75] = 127, [0][1][RTW89_THAILAND][0][75] = 127, [0][1][RTW89_FCC][1][77] = -38, - [0][1][RTW89_FCC][2][77] = 30, [0][1][RTW89_ETSI][1][77] = 127, [0][1][RTW89_ETSI][0][77] = 127, [0][1][RTW89_MKK][1][77] = 127, [0][1][RTW89_MKK][0][77] = 127, [0][1][RTW89_IC][1][77] = -38, - [0][1][RTW89_IC][2][77] = 30, [0][1][RTW89_KCC][1][77] = -14, [0][1][RTW89_KCC][0][77] = 127, [0][1][RTW89_ACMA][1][77] = 127, @@ -54403,13 +52343,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][77] = 127, [0][1][RTW89_THAILAND][0][77] = 127, [0][1][RTW89_FCC][1][79] = -38, - [0][1][RTW89_FCC][2][79] = 30, [0][1][RTW89_ETSI][1][79] = 127, [0][1][RTW89_ETSI][0][79] = 127, [0][1][RTW89_MKK][1][79] = 127, [0][1][RTW89_MKK][0][79] = 127, [0][1][RTW89_IC][1][79] = -38, - [0][1][RTW89_IC][2][79] = 30, [0][1][RTW89_KCC][1][79] = -14, [0][1][RTW89_KCC][0][79] = 127, [0][1][RTW89_ACMA][1][79] = 127, @@ -54422,13 +52360,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][79] = 127, [0][1][RTW89_THAILAND][0][79] = 127, [0][1][RTW89_FCC][1][81] = -38, - [0][1][RTW89_FCC][2][81] = 30, [0][1][RTW89_ETSI][1][81] = 127, [0][1][RTW89_ETSI][0][81] = 127, [0][1][RTW89_MKK][1][81] = 127, [0][1][RTW89_MKK][0][81] = 127, [0][1][RTW89_IC][1][81] = -38, - [0][1][RTW89_IC][2][81] = 30, [0][1][RTW89_KCC][1][81] = -14, [0][1][RTW89_KCC][0][81] = 127, [0][1][RTW89_ACMA][1][81] = 127, @@ -54441,13 +52377,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][81] = 127, [0][1][RTW89_THAILAND][0][81] = 127, [0][1][RTW89_FCC][1][83] = -38, - [0][1][RTW89_FCC][2][83] = 30, [0][1][RTW89_ETSI][1][83] = 127, [0][1][RTW89_ETSI][0][83] = 127, [0][1][RTW89_MKK][1][83] = 127, [0][1][RTW89_MKK][0][83] = 127, [0][1][RTW89_IC][1][83] = -38, - [0][1][RTW89_IC][2][83] = 30, [0][1][RTW89_KCC][1][83] = -14, [0][1][RTW89_KCC][0][83] = 127, [0][1][RTW89_ACMA][1][83] = 127, @@ -54460,13 +52394,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][83] = 127, [0][1][RTW89_THAILAND][0][83] = 127, [0][1][RTW89_FCC][1][85] = -38, - [0][1][RTW89_FCC][2][85] = 30, [0][1][RTW89_ETSI][1][85] = 127, [0][1][RTW89_ETSI][0][85] = 127, [0][1][RTW89_MKK][1][85] = 127, [0][1][RTW89_MKK][0][85] = 127, [0][1][RTW89_IC][1][85] = -38, - [0][1][RTW89_IC][2][85] = 30, [0][1][RTW89_KCC][1][85] = -14, [0][1][RTW89_KCC][0][85] = 127, [0][1][RTW89_ACMA][1][85] = 127, @@ -54479,13 +52411,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][85] = 127, [0][1][RTW89_THAILAND][0][85] = 127, [0][1][RTW89_FCC][1][87] = -40, - [0][1][RTW89_FCC][2][87] = 127, [0][1][RTW89_ETSI][1][87] = 127, [0][1][RTW89_ETSI][0][87] = 127, [0][1][RTW89_MKK][1][87] = 127, [0][1][RTW89_MKK][0][87] = 127, [0][1][RTW89_IC][1][87] = -40, - [0][1][RTW89_IC][2][87] = 127, [0][1][RTW89_KCC][1][87] = -14, [0][1][RTW89_KCC][0][87] = 127, [0][1][RTW89_ACMA][1][87] = 127, @@ -54498,13 +52428,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][87] = 127, [0][1][RTW89_THAILAND][0][87] = 127, [0][1][RTW89_FCC][1][89] = -38, - [0][1][RTW89_FCC][2][89] = 127, [0][1][RTW89_ETSI][1][89] = 127, [0][1][RTW89_ETSI][0][89] = 127, [0][1][RTW89_MKK][1][89] = 127, [0][1][RTW89_MKK][0][89] = 127, [0][1][RTW89_IC][1][89] = -38, - [0][1][RTW89_IC][2][89] = 127, [0][1][RTW89_KCC][1][89] = -14, [0][1][RTW89_KCC][0][89] = 127, [0][1][RTW89_ACMA][1][89] = 127, @@ -54517,13 +52445,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][89] = 127, [0][1][RTW89_THAILAND][0][89] = 127, [0][1][RTW89_FCC][1][90] = -38, - [0][1][RTW89_FCC][2][90] = 127, [0][1][RTW89_ETSI][1][90] = 127, [0][1][RTW89_ETSI][0][90] = 127, [0][1][RTW89_MKK][1][90] = 127, [0][1][RTW89_MKK][0][90] = 127, [0][1][RTW89_IC][1][90] = -38, - [0][1][RTW89_IC][2][90] = 127, [0][1][RTW89_KCC][1][90] = -14, [0][1][RTW89_KCC][0][90] = 127, [0][1][RTW89_ACMA][1][90] = 127, @@ -54536,13 +52462,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][90] = 127, [0][1][RTW89_THAILAND][0][90] = 127, [0][1][RTW89_FCC][1][92] = -38, - [0][1][RTW89_FCC][2][92] = 127, [0][1][RTW89_ETSI][1][92] = 127, [0][1][RTW89_ETSI][0][92] = 127, [0][1][RTW89_MKK][1][92] = 127, [0][1][RTW89_MKK][0][92] = 127, [0][1][RTW89_IC][1][92] = -38, - [0][1][RTW89_IC][2][92] = 127, [0][1][RTW89_KCC][1][92] = -14, [0][1][RTW89_KCC][0][92] = 127, [0][1][RTW89_ACMA][1][92] = 127, @@ -54555,13 +52479,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][92] = 127, [0][1][RTW89_THAILAND][0][92] = 127, [0][1][RTW89_FCC][1][94] = -38, - [0][1][RTW89_FCC][2][94] = 127, [0][1][RTW89_ETSI][1][94] = 127, [0][1][RTW89_ETSI][0][94] = 127, [0][1][RTW89_MKK][1][94] = 127, [0][1][RTW89_MKK][0][94] = 127, [0][1][RTW89_IC][1][94] = -38, - [0][1][RTW89_IC][2][94] = 127, [0][1][RTW89_KCC][1][94] = -14, [0][1][RTW89_KCC][0][94] = 127, [0][1][RTW89_ACMA][1][94] = 127, @@ -54574,13 +52496,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][94] = 127, [0][1][RTW89_THAILAND][0][94] = 127, [0][1][RTW89_FCC][1][96] = -38, - [0][1][RTW89_FCC][2][96] = 127, [0][1][RTW89_ETSI][1][96] = 127, [0][1][RTW89_ETSI][0][96] = 127, [0][1][RTW89_MKK][1][96] = 127, [0][1][RTW89_MKK][0][96] = 127, [0][1][RTW89_IC][1][96] = -38, - [0][1][RTW89_IC][2][96] = 127, [0][1][RTW89_KCC][1][96] = -14, [0][1][RTW89_KCC][0][96] = 127, [0][1][RTW89_ACMA][1][96] = 127, @@ -54593,13 +52513,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][96] = 127, [0][1][RTW89_THAILAND][0][96] = 127, [0][1][RTW89_FCC][1][98] = -38, - [0][1][RTW89_FCC][2][98] = 127, [0][1][RTW89_ETSI][1][98] = 127, [0][1][RTW89_ETSI][0][98] = 127, [0][1][RTW89_MKK][1][98] = 127, [0][1][RTW89_MKK][0][98] = 127, [0][1][RTW89_IC][1][98] = -38, - [0][1][RTW89_IC][2][98] = 127, [0][1][RTW89_KCC][1][98] = -14, [0][1][RTW89_KCC][0][98] = 127, [0][1][RTW89_ACMA][1][98] = 127, @@ -54612,13 +52530,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][98] = 127, [0][1][RTW89_THAILAND][0][98] = 127, [0][1][RTW89_FCC][1][100] = -38, - [0][1][RTW89_FCC][2][100] = 127, [0][1][RTW89_ETSI][1][100] = 127, [0][1][RTW89_ETSI][0][100] = 127, [0][1][RTW89_MKK][1][100] = 127, [0][1][RTW89_MKK][0][100] = 127, [0][1][RTW89_IC][1][100] = -38, - [0][1][RTW89_IC][2][100] = 127, [0][1][RTW89_KCC][1][100] = -14, [0][1][RTW89_KCC][0][100] = 127, [0][1][RTW89_ACMA][1][100] = 127, @@ -54631,13 +52547,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][100] = 127, [0][1][RTW89_THAILAND][0][100] = 127, [0][1][RTW89_FCC][1][102] = -38, - [0][1][RTW89_FCC][2][102] = 127, [0][1][RTW89_ETSI][1][102] = 127, [0][1][RTW89_ETSI][0][102] = 127, [0][1][RTW89_MKK][1][102] = 127, [0][1][RTW89_MKK][0][102] = 127, [0][1][RTW89_IC][1][102] = -38, - [0][1][RTW89_IC][2][102] = 127, [0][1][RTW89_KCC][1][102] = -14, [0][1][RTW89_KCC][0][102] = 127, [0][1][RTW89_ACMA][1][102] = 127, @@ -54650,13 +52564,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][102] = 127, [0][1][RTW89_THAILAND][0][102] = 127, [0][1][RTW89_FCC][1][104] = -38, - [0][1][RTW89_FCC][2][104] = 127, [0][1][RTW89_ETSI][1][104] = 127, [0][1][RTW89_ETSI][0][104] = 127, [0][1][RTW89_MKK][1][104] = 127, [0][1][RTW89_MKK][0][104] = 127, [0][1][RTW89_IC][1][104] = -38, - [0][1][RTW89_IC][2][104] = 127, [0][1][RTW89_KCC][1][104] = -14, [0][1][RTW89_KCC][0][104] = 127, [0][1][RTW89_ACMA][1][104] = 127, @@ -54669,13 +52581,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][104] = 127, [0][1][RTW89_THAILAND][0][104] = 127, [0][1][RTW89_FCC][1][105] = -38, - [0][1][RTW89_FCC][2][105] = 127, [0][1][RTW89_ETSI][1][105] = 127, [0][1][RTW89_ETSI][0][105] = 127, [0][1][RTW89_MKK][1][105] = 127, [0][1][RTW89_MKK][0][105] = 127, [0][1][RTW89_IC][1][105] = -38, - [0][1][RTW89_IC][2][105] = 127, [0][1][RTW89_KCC][1][105] = -14, [0][1][RTW89_KCC][0][105] = 127, [0][1][RTW89_ACMA][1][105] = 127, @@ -54688,13 +52598,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][105] = 127, [0][1][RTW89_THAILAND][0][105] = 127, [0][1][RTW89_FCC][1][107] = -34, - [0][1][RTW89_FCC][2][107] = 127, [0][1][RTW89_ETSI][1][107] = 127, [0][1][RTW89_ETSI][0][107] = 127, [0][1][RTW89_MKK][1][107] = 127, [0][1][RTW89_MKK][0][107] = 127, [0][1][RTW89_IC][1][107] = -34, - [0][1][RTW89_IC][2][107] = 127, [0][1][RTW89_KCC][1][107] = -14, [0][1][RTW89_KCC][0][107] = 127, [0][1][RTW89_ACMA][1][107] = 127, @@ -54707,13 +52615,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][107] = 127, [0][1][RTW89_THAILAND][0][107] = 127, [0][1][RTW89_FCC][1][109] = -34, - [0][1][RTW89_FCC][2][109] = 127, [0][1][RTW89_ETSI][1][109] = 127, [0][1][RTW89_ETSI][0][109] = 127, [0][1][RTW89_MKK][1][109] = 127, [0][1][RTW89_MKK][0][109] = 127, [0][1][RTW89_IC][1][109] = -34, - [0][1][RTW89_IC][2][109] = 127, [0][1][RTW89_KCC][1][109] = 127, [0][1][RTW89_KCC][0][109] = 127, [0][1][RTW89_ACMA][1][109] = 127, @@ -54726,13 +52632,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][109] = 127, [0][1][RTW89_THAILAND][0][109] = 127, [0][1][RTW89_FCC][1][111] = 127, - [0][1][RTW89_FCC][2][111] = 127, [0][1][RTW89_ETSI][1][111] = 127, [0][1][RTW89_ETSI][0][111] = 127, [0][1][RTW89_MKK][1][111] = 127, [0][1][RTW89_MKK][0][111] = 127, [0][1][RTW89_IC][1][111] = 127, - [0][1][RTW89_IC][2][111] = 127, [0][1][RTW89_KCC][1][111] = 127, [0][1][RTW89_KCC][0][111] = 127, [0][1][RTW89_ACMA][1][111] = 127, @@ -54745,13 +52649,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][111] = 127, [0][1][RTW89_THAILAND][0][111] = 127, [0][1][RTW89_FCC][1][113] = 127, - [0][1][RTW89_FCC][2][113] = 127, [0][1][RTW89_ETSI][1][113] = 127, [0][1][RTW89_ETSI][0][113] = 127, [0][1][RTW89_MKK][1][113] = 127, [0][1][RTW89_MKK][0][113] = 127, [0][1][RTW89_IC][1][113] = 127, - [0][1][RTW89_IC][2][113] = 127, [0][1][RTW89_KCC][1][113] = 127, [0][1][RTW89_KCC][0][113] = 127, [0][1][RTW89_ACMA][1][113] = 127, @@ -54764,13 +52666,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][113] = 127, [0][1][RTW89_THAILAND][0][113] = 127, [0][1][RTW89_FCC][1][115] = 127, - [0][1][RTW89_FCC][2][115] = 127, [0][1][RTW89_ETSI][1][115] = 127, [0][1][RTW89_ETSI][0][115] = 127, [0][1][RTW89_MKK][1][115] = 127, [0][1][RTW89_MKK][0][115] = 127, [0][1][RTW89_IC][1][115] = 127, - [0][1][RTW89_IC][2][115] = 127, [0][1][RTW89_KCC][1][115] = 127, [0][1][RTW89_KCC][0][115] = 127, [0][1][RTW89_ACMA][1][115] = 127, @@ -54783,13 +52683,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][115] = 127, [0][1][RTW89_THAILAND][0][115] = 127, [0][1][RTW89_FCC][1][117] = 127, - [0][1][RTW89_FCC][2][117] = 127, [0][1][RTW89_ETSI][1][117] = 127, [0][1][RTW89_ETSI][0][117] = 127, [0][1][RTW89_MKK][1][117] = 127, [0][1][RTW89_MKK][0][117] = 127, [0][1][RTW89_IC][1][117] = 127, - [0][1][RTW89_IC][2][117] = 127, [0][1][RTW89_KCC][1][117] = 127, [0][1][RTW89_KCC][0][117] = 127, [0][1][RTW89_ACMA][1][117] = 127, @@ -54802,13 +52700,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][117] = 127, [0][1][RTW89_THAILAND][0][117] = 127, [0][1][RTW89_FCC][1][119] = 127, - [0][1][RTW89_FCC][2][119] = 127, [0][1][RTW89_ETSI][1][119] = 127, [0][1][RTW89_ETSI][0][119] = 127, [0][1][RTW89_MKK][1][119] = 127, [0][1][RTW89_MKK][0][119] = 127, [0][1][RTW89_IC][1][119] = 127, - [0][1][RTW89_IC][2][119] = 127, [0][1][RTW89_KCC][1][119] = 127, [0][1][RTW89_KCC][0][119] = 127, [0][1][RTW89_ACMA][1][119] = 127, @@ -54821,13 +52717,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [0][1][RTW89_THAILAND][1][119] = 127, [0][1][RTW89_THAILAND][0][119] = 127, [1][0][RTW89_FCC][1][0] = -4, - [1][0][RTW89_FCC][2][0] = 52, [1][0][RTW89_ETSI][1][0] = 46, [1][0][RTW89_ETSI][0][0] = 6, [1][0][RTW89_MKK][1][0] = 42, [1][0][RTW89_MKK][0][0] = 2, [1][0][RTW89_IC][1][0] = -4, - [1][0][RTW89_IC][2][0] = 52, [1][0][RTW89_KCC][1][0] = -2, [1][0][RTW89_KCC][0][0] = -2, [1][0][RTW89_ACMA][1][0] = 46, @@ -54840,13 +52734,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][0] = 42, [1][0][RTW89_THAILAND][0][0] = -4, [1][0][RTW89_FCC][1][2] = -4, - [1][0][RTW89_FCC][2][2] = 52, [1][0][RTW89_ETSI][1][2] = 46, [1][0][RTW89_ETSI][0][2] = 6, [1][0][RTW89_MKK][1][2] = 42, [1][0][RTW89_MKK][0][2] = 2, [1][0][RTW89_IC][1][2] = -4, - [1][0][RTW89_IC][2][2] = 52, [1][0][RTW89_KCC][1][2] = -2, [1][0][RTW89_KCC][0][2] = -2, [1][0][RTW89_ACMA][1][2] = 46, @@ -54859,13 +52751,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][2] = 42, [1][0][RTW89_THAILAND][0][2] = -4, [1][0][RTW89_FCC][1][4] = -4, - [1][0][RTW89_FCC][2][4] = 52, [1][0][RTW89_ETSI][1][4] = 46, [1][0][RTW89_ETSI][0][4] = 6, [1][0][RTW89_MKK][1][4] = 42, [1][0][RTW89_MKK][0][4] = 2, [1][0][RTW89_IC][1][4] = -4, - [1][0][RTW89_IC][2][4] = 52, [1][0][RTW89_KCC][1][4] = -2, [1][0][RTW89_KCC][0][4] = -2, [1][0][RTW89_ACMA][1][4] = 46, @@ -54878,13 +52768,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][4] = 42, [1][0][RTW89_THAILAND][0][4] = -4, [1][0][RTW89_FCC][1][6] = -4, - [1][0][RTW89_FCC][2][6] = 52, [1][0][RTW89_ETSI][1][6] = 46, [1][0][RTW89_ETSI][0][6] = 6, [1][0][RTW89_MKK][1][6] = 42, [1][0][RTW89_MKK][0][6] = 2, [1][0][RTW89_IC][1][6] = -4, - [1][0][RTW89_IC][2][6] = 52, [1][0][RTW89_KCC][1][6] = -2, [1][0][RTW89_KCC][0][6] = -2, [1][0][RTW89_ACMA][1][6] = 46, @@ -54897,13 +52785,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][6] = 42, [1][0][RTW89_THAILAND][0][6] = -4, [1][0][RTW89_FCC][1][8] = -4, - [1][0][RTW89_FCC][2][8] = 52, [1][0][RTW89_ETSI][1][8] = 46, [1][0][RTW89_ETSI][0][8] = 6, [1][0][RTW89_MKK][1][8] = 42, [1][0][RTW89_MKK][0][8] = 2, [1][0][RTW89_IC][1][8] = -4, - [1][0][RTW89_IC][2][8] = 52, [1][0][RTW89_KCC][1][8] = -2, [1][0][RTW89_KCC][0][8] = -2, [1][0][RTW89_ACMA][1][8] = 46, @@ -54916,13 +52802,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][8] = 42, [1][0][RTW89_THAILAND][0][8] = -4, [1][0][RTW89_FCC][1][10] = -4, - [1][0][RTW89_FCC][2][10] = 52, [1][0][RTW89_ETSI][1][10] = 46, [1][0][RTW89_ETSI][0][10] = 6, [1][0][RTW89_MKK][1][10] = 42, [1][0][RTW89_MKK][0][10] = 2, [1][0][RTW89_IC][1][10] = -4, - [1][0][RTW89_IC][2][10] = 52, [1][0][RTW89_KCC][1][10] = -2, [1][0][RTW89_KCC][0][10] = -2, [1][0][RTW89_ACMA][1][10] = 46, @@ -54935,13 +52819,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][10] = 42, [1][0][RTW89_THAILAND][0][10] = -4, [1][0][RTW89_FCC][1][12] = -4, - [1][0][RTW89_FCC][2][12] = 52, [1][0][RTW89_ETSI][1][12] = 46, [1][0][RTW89_ETSI][0][12] = 6, [1][0][RTW89_MKK][1][12] = 42, [1][0][RTW89_MKK][0][12] = 2, [1][0][RTW89_IC][1][12] = -4, - [1][0][RTW89_IC][2][12] = 52, [1][0][RTW89_KCC][1][12] = -2, [1][0][RTW89_KCC][0][12] = -2, [1][0][RTW89_ACMA][1][12] = 46, @@ -54954,13 +52836,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][12] = 42, [1][0][RTW89_THAILAND][0][12] = -4, [1][0][RTW89_FCC][1][14] = -4, - [1][0][RTW89_FCC][2][14] = 52, [1][0][RTW89_ETSI][1][14] = 46, [1][0][RTW89_ETSI][0][14] = 6, [1][0][RTW89_MKK][1][14] = 42, [1][0][RTW89_MKK][0][14] = 2, [1][0][RTW89_IC][1][14] = -4, - [1][0][RTW89_IC][2][14] = 52, [1][0][RTW89_KCC][1][14] = -2, [1][0][RTW89_KCC][0][14] = -2, [1][0][RTW89_ACMA][1][14] = 46, @@ -54973,13 +52853,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][14] = 42, [1][0][RTW89_THAILAND][0][14] = -4, [1][0][RTW89_FCC][1][15] = -4, - [1][0][RTW89_FCC][2][15] = 52, [1][0][RTW89_ETSI][1][15] = 46, [1][0][RTW89_ETSI][0][15] = 6, [1][0][RTW89_MKK][1][15] = 42, [1][0][RTW89_MKK][0][15] = 2, [1][0][RTW89_IC][1][15] = -4, - [1][0][RTW89_IC][2][15] = 52, [1][0][RTW89_KCC][1][15] = -2, [1][0][RTW89_KCC][0][15] = -2, [1][0][RTW89_ACMA][1][15] = 46, @@ -54992,13 +52870,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][15] = 42, [1][0][RTW89_THAILAND][0][15] = -4, [1][0][RTW89_FCC][1][17] = -4, - [1][0][RTW89_FCC][2][17] = 52, [1][0][RTW89_ETSI][1][17] = 46, [1][0][RTW89_ETSI][0][17] = 6, [1][0][RTW89_MKK][1][17] = 42, [1][0][RTW89_MKK][0][17] = 2, [1][0][RTW89_IC][1][17] = -4, - [1][0][RTW89_IC][2][17] = 52, [1][0][RTW89_KCC][1][17] = -2, [1][0][RTW89_KCC][0][17] = -2, [1][0][RTW89_ACMA][1][17] = 46, @@ -55011,13 +52887,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][17] = 42, [1][0][RTW89_THAILAND][0][17] = -4, [1][0][RTW89_FCC][1][19] = -4, - [1][0][RTW89_FCC][2][19] = 52, [1][0][RTW89_ETSI][1][19] = 46, [1][0][RTW89_ETSI][0][19] = 6, [1][0][RTW89_MKK][1][19] = 42, [1][0][RTW89_MKK][0][19] = 2, [1][0][RTW89_IC][1][19] = -4, - [1][0][RTW89_IC][2][19] = 52, [1][0][RTW89_KCC][1][19] = -2, [1][0][RTW89_KCC][0][19] = -2, [1][0][RTW89_ACMA][1][19] = 46, @@ -55030,13 +52904,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][19] = 42, [1][0][RTW89_THAILAND][0][19] = -4, [1][0][RTW89_FCC][1][21] = -4, - [1][0][RTW89_FCC][2][21] = 52, [1][0][RTW89_ETSI][1][21] = 46, [1][0][RTW89_ETSI][0][21] = 6, [1][0][RTW89_MKK][1][21] = 42, [1][0][RTW89_MKK][0][21] = 2, [1][0][RTW89_IC][1][21] = -4, - [1][0][RTW89_IC][2][21] = 52, [1][0][RTW89_KCC][1][21] = -2, [1][0][RTW89_KCC][0][21] = -2, [1][0][RTW89_ACMA][1][21] = 46, @@ -55049,13 +52921,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][21] = 42, [1][0][RTW89_THAILAND][0][21] = -4, [1][0][RTW89_FCC][1][23] = -4, - [1][0][RTW89_FCC][2][23] = 66, [1][0][RTW89_ETSI][1][23] = 46, [1][0][RTW89_ETSI][0][23] = 6, [1][0][RTW89_MKK][1][23] = 42, [1][0][RTW89_MKK][0][23] = 2, [1][0][RTW89_IC][1][23] = -4, - [1][0][RTW89_IC][2][23] = 66, [1][0][RTW89_KCC][1][23] = -2, [1][0][RTW89_KCC][0][23] = -2, [1][0][RTW89_ACMA][1][23] = 46, @@ -55068,13 +52938,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][23] = 42, [1][0][RTW89_THAILAND][0][23] = -4, [1][0][RTW89_FCC][1][25] = -4, - [1][0][RTW89_FCC][2][25] = 66, [1][0][RTW89_ETSI][1][25] = 46, [1][0][RTW89_ETSI][0][25] = 6, [1][0][RTW89_MKK][1][25] = 42, [1][0][RTW89_MKK][0][25] = 2, [1][0][RTW89_IC][1][25] = -4, - [1][0][RTW89_IC][2][25] = 66, [1][0][RTW89_KCC][1][25] = -2, [1][0][RTW89_KCC][0][25] = -2, [1][0][RTW89_ACMA][1][25] = 46, @@ -55087,13 +52955,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][25] = 42, [1][0][RTW89_THAILAND][0][25] = -4, [1][0][RTW89_FCC][1][27] = -4, - [1][0][RTW89_FCC][2][27] = 66, [1][0][RTW89_ETSI][1][27] = 46, [1][0][RTW89_ETSI][0][27] = 6, [1][0][RTW89_MKK][1][27] = 42, [1][0][RTW89_MKK][0][27] = 2, [1][0][RTW89_IC][1][27] = -4, - [1][0][RTW89_IC][2][27] = 66, [1][0][RTW89_KCC][1][27] = -2, [1][0][RTW89_KCC][0][27] = -2, [1][0][RTW89_ACMA][1][27] = 46, @@ -55106,13 +52972,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][27] = 42, [1][0][RTW89_THAILAND][0][27] = -4, [1][0][RTW89_FCC][1][29] = -4, - [1][0][RTW89_FCC][2][29] = 66, [1][0][RTW89_ETSI][1][29] = 46, [1][0][RTW89_ETSI][0][29] = 6, [1][0][RTW89_MKK][1][29] = 42, [1][0][RTW89_MKK][0][29] = 2, [1][0][RTW89_IC][1][29] = -4, - [1][0][RTW89_IC][2][29] = 66, [1][0][RTW89_KCC][1][29] = -2, [1][0][RTW89_KCC][0][29] = -2, [1][0][RTW89_ACMA][1][29] = 46, @@ -55125,13 +52989,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][29] = 42, [1][0][RTW89_THAILAND][0][29] = -4, [1][0][RTW89_FCC][1][30] = -4, - [1][0][RTW89_FCC][2][30] = 66, [1][0][RTW89_ETSI][1][30] = 46, [1][0][RTW89_ETSI][0][30] = 6, [1][0][RTW89_MKK][1][30] = 42, [1][0][RTW89_MKK][0][30] = 2, [1][0][RTW89_IC][1][30] = -4, - [1][0][RTW89_IC][2][30] = 66, [1][0][RTW89_KCC][1][30] = -2, [1][0][RTW89_KCC][0][30] = -2, [1][0][RTW89_ACMA][1][30] = 46, @@ -55144,13 +53006,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][30] = 42, [1][0][RTW89_THAILAND][0][30] = -4, [1][0][RTW89_FCC][1][32] = -4, - [1][0][RTW89_FCC][2][32] = 66, [1][0][RTW89_ETSI][1][32] = 46, [1][0][RTW89_ETSI][0][32] = 6, [1][0][RTW89_MKK][1][32] = 42, [1][0][RTW89_MKK][0][32] = 2, [1][0][RTW89_IC][1][32] = -4, - [1][0][RTW89_IC][2][32] = 66, [1][0][RTW89_KCC][1][32] = -2, [1][0][RTW89_KCC][0][32] = -2, [1][0][RTW89_ACMA][1][32] = 46, @@ -55163,13 +53023,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][32] = 42, [1][0][RTW89_THAILAND][0][32] = -4, [1][0][RTW89_FCC][1][34] = -4, - [1][0][RTW89_FCC][2][34] = 66, [1][0][RTW89_ETSI][1][34] = 46, [1][0][RTW89_ETSI][0][34] = 6, [1][0][RTW89_MKK][1][34] = 42, [1][0][RTW89_MKK][0][34] = 2, [1][0][RTW89_IC][1][34] = -4, - [1][0][RTW89_IC][2][34] = 66, [1][0][RTW89_KCC][1][34] = -2, [1][0][RTW89_KCC][0][34] = -2, [1][0][RTW89_ACMA][1][34] = 46, @@ -55182,13 +53040,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][34] = 42, [1][0][RTW89_THAILAND][0][34] = -4, [1][0][RTW89_FCC][1][36] = -4, - [1][0][RTW89_FCC][2][36] = 66, [1][0][RTW89_ETSI][1][36] = 46, [1][0][RTW89_ETSI][0][36] = 6, [1][0][RTW89_MKK][1][36] = 42, [1][0][RTW89_MKK][0][36] = 2, [1][0][RTW89_IC][1][36] = -4, - [1][0][RTW89_IC][2][36] = 66, [1][0][RTW89_KCC][1][36] = -2, [1][0][RTW89_KCC][0][36] = -2, [1][0][RTW89_ACMA][1][36] = 46, @@ -55201,13 +53057,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][36] = 42, [1][0][RTW89_THAILAND][0][36] = -4, [1][0][RTW89_FCC][1][38] = -4, - [1][0][RTW89_FCC][2][38] = 66, [1][0][RTW89_ETSI][1][38] = 46, [1][0][RTW89_ETSI][0][38] = 6, [1][0][RTW89_MKK][1][38] = 42, [1][0][RTW89_MKK][0][38] = 2, [1][0][RTW89_IC][1][38] = -4, - [1][0][RTW89_IC][2][38] = 66, [1][0][RTW89_KCC][1][38] = -2, [1][0][RTW89_KCC][0][38] = -2, [1][0][RTW89_ACMA][1][38] = 46, @@ -55220,13 +53074,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][38] = 42, [1][0][RTW89_THAILAND][0][38] = -4, [1][0][RTW89_FCC][1][40] = -4, - [1][0][RTW89_FCC][2][40] = 66, [1][0][RTW89_ETSI][1][40] = 46, [1][0][RTW89_ETSI][0][40] = 6, [1][0][RTW89_MKK][1][40] = 42, [1][0][RTW89_MKK][0][40] = 2, [1][0][RTW89_IC][1][40] = -4, - [1][0][RTW89_IC][2][40] = 66, [1][0][RTW89_KCC][1][40] = -2, [1][0][RTW89_KCC][0][40] = -2, [1][0][RTW89_ACMA][1][40] = 46, @@ -55239,13 +53091,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][40] = 42, [1][0][RTW89_THAILAND][0][40] = -4, [1][0][RTW89_FCC][1][42] = -4, - [1][0][RTW89_FCC][2][42] = 66, [1][0][RTW89_ETSI][1][42] = 46, [1][0][RTW89_ETSI][0][42] = 6, [1][0][RTW89_MKK][1][42] = 42, [1][0][RTW89_MKK][0][42] = 2, [1][0][RTW89_IC][1][42] = -4, - [1][0][RTW89_IC][2][42] = 66, [1][0][RTW89_KCC][1][42] = -2, [1][0][RTW89_KCC][0][42] = -2, [1][0][RTW89_ACMA][1][42] = 46, @@ -55258,13 +53108,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][42] = 42, [1][0][RTW89_THAILAND][0][42] = -4, [1][0][RTW89_FCC][1][44] = -4, - [1][0][RTW89_FCC][2][44] = 66, [1][0][RTW89_ETSI][1][44] = 46, [1][0][RTW89_ETSI][0][44] = 8, [1][0][RTW89_MKK][1][44] = 22, [1][0][RTW89_MKK][0][44] = 4, [1][0][RTW89_IC][1][44] = -4, - [1][0][RTW89_IC][2][44] = 66, [1][0][RTW89_KCC][1][44] = -2, [1][0][RTW89_KCC][0][44] = -2, [1][0][RTW89_ACMA][1][44] = 46, @@ -55277,13 +53125,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][44] = 42, [1][0][RTW89_THAILAND][0][44] = -4, [1][0][RTW89_FCC][1][45] = -4, - [1][0][RTW89_FCC][2][45] = 127, [1][0][RTW89_ETSI][1][45] = 127, [1][0][RTW89_ETSI][0][45] = 127, [1][0][RTW89_MKK][1][45] = 127, [1][0][RTW89_MKK][0][45] = 127, [1][0][RTW89_IC][1][45] = -4, - [1][0][RTW89_IC][2][45] = 68, [1][0][RTW89_KCC][1][45] = -2, [1][0][RTW89_KCC][0][45] = 127, [1][0][RTW89_ACMA][1][45] = 127, @@ -55296,13 +53142,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][45] = 127, [1][0][RTW89_THAILAND][0][45] = 127, [1][0][RTW89_FCC][1][47] = -4, - [1][0][RTW89_FCC][2][47] = 127, [1][0][RTW89_ETSI][1][47] = 127, [1][0][RTW89_ETSI][0][47] = 127, [1][0][RTW89_MKK][1][47] = 127, [1][0][RTW89_MKK][0][47] = 127, [1][0][RTW89_IC][1][47] = -4, - [1][0][RTW89_IC][2][47] = 68, [1][0][RTW89_KCC][1][47] = -2, [1][0][RTW89_KCC][0][47] = 127, [1][0][RTW89_ACMA][1][47] = 127, @@ -55315,13 +53159,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][47] = 127, [1][0][RTW89_THAILAND][0][47] = 127, [1][0][RTW89_FCC][1][49] = -4, - [1][0][RTW89_FCC][2][49] = 127, [1][0][RTW89_ETSI][1][49] = 127, [1][0][RTW89_ETSI][0][49] = 127, [1][0][RTW89_MKK][1][49] = 127, [1][0][RTW89_MKK][0][49] = 127, [1][0][RTW89_IC][1][49] = -4, - [1][0][RTW89_IC][2][49] = 68, [1][0][RTW89_KCC][1][49] = -2, [1][0][RTW89_KCC][0][49] = 127, [1][0][RTW89_ACMA][1][49] = 127, @@ -55334,13 +53176,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][49] = 127, [1][0][RTW89_THAILAND][0][49] = 127, [1][0][RTW89_FCC][1][51] = -4, - [1][0][RTW89_FCC][2][51] = 127, [1][0][RTW89_ETSI][1][51] = 127, [1][0][RTW89_ETSI][0][51] = 127, [1][0][RTW89_MKK][1][51] = 127, [1][0][RTW89_MKK][0][51] = 127, [1][0][RTW89_IC][1][51] = -4, - [1][0][RTW89_IC][2][51] = 68, [1][0][RTW89_KCC][1][51] = -2, [1][0][RTW89_KCC][0][51] = 127, [1][0][RTW89_ACMA][1][51] = 127, @@ -55353,13 +53193,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][51] = 127, [1][0][RTW89_THAILAND][0][51] = 127, [1][0][RTW89_FCC][1][53] = -4, - [1][0][RTW89_FCC][2][53] = 127, [1][0][RTW89_ETSI][1][53] = 127, [1][0][RTW89_ETSI][0][53] = 127, [1][0][RTW89_MKK][1][53] = 127, [1][0][RTW89_MKK][0][53] = 127, [1][0][RTW89_IC][1][53] = -4, - [1][0][RTW89_IC][2][53] = 68, [1][0][RTW89_KCC][1][53] = -2, [1][0][RTW89_KCC][0][53] = 127, [1][0][RTW89_ACMA][1][53] = 127, @@ -55372,13 +53210,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][53] = 127, [1][0][RTW89_THAILAND][0][53] = 127, [1][0][RTW89_FCC][1][55] = -4, - [1][0][RTW89_FCC][2][55] = 68, [1][0][RTW89_ETSI][1][55] = 127, [1][0][RTW89_ETSI][0][55] = 127, [1][0][RTW89_MKK][1][55] = 127, [1][0][RTW89_MKK][0][55] = 127, [1][0][RTW89_IC][1][55] = -4, - [1][0][RTW89_IC][2][55] = 68, [1][0][RTW89_KCC][1][55] = -2, [1][0][RTW89_KCC][0][55] = 127, [1][0][RTW89_ACMA][1][55] = 127, @@ -55391,13 +53227,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][55] = 127, [1][0][RTW89_THAILAND][0][55] = 127, [1][0][RTW89_FCC][1][57] = -4, - [1][0][RTW89_FCC][2][57] = 68, [1][0][RTW89_ETSI][1][57] = 127, [1][0][RTW89_ETSI][0][57] = 127, [1][0][RTW89_MKK][1][57] = 127, [1][0][RTW89_MKK][0][57] = 127, [1][0][RTW89_IC][1][57] = -4, - [1][0][RTW89_IC][2][57] = 68, [1][0][RTW89_KCC][1][57] = -2, [1][0][RTW89_KCC][0][57] = 127, [1][0][RTW89_ACMA][1][57] = 127, @@ -55410,13 +53244,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][57] = 127, [1][0][RTW89_THAILAND][0][57] = 127, [1][0][RTW89_FCC][1][59] = -4, - [1][0][RTW89_FCC][2][59] = 68, [1][0][RTW89_ETSI][1][59] = 127, [1][0][RTW89_ETSI][0][59] = 127, [1][0][RTW89_MKK][1][59] = 127, [1][0][RTW89_MKK][0][59] = 127, [1][0][RTW89_IC][1][59] = -4, - [1][0][RTW89_IC][2][59] = 68, [1][0][RTW89_KCC][1][59] = -2, [1][0][RTW89_KCC][0][59] = 127, [1][0][RTW89_ACMA][1][59] = 127, @@ -55429,13 +53261,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][59] = 127, [1][0][RTW89_THAILAND][0][59] = 127, [1][0][RTW89_FCC][1][60] = -4, - [1][0][RTW89_FCC][2][60] = 68, [1][0][RTW89_ETSI][1][60] = 127, [1][0][RTW89_ETSI][0][60] = 127, [1][0][RTW89_MKK][1][60] = 127, [1][0][RTW89_MKK][0][60] = 127, [1][0][RTW89_IC][1][60] = -4, - [1][0][RTW89_IC][2][60] = 68, [1][0][RTW89_KCC][1][60] = -2, [1][0][RTW89_KCC][0][60] = 127, [1][0][RTW89_ACMA][1][60] = 127, @@ -55448,13 +53278,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][60] = 127, [1][0][RTW89_THAILAND][0][60] = 127, [1][0][RTW89_FCC][1][62] = -4, - [1][0][RTW89_FCC][2][62] = 68, [1][0][RTW89_ETSI][1][62] = 127, [1][0][RTW89_ETSI][0][62] = 127, [1][0][RTW89_MKK][1][62] = 127, [1][0][RTW89_MKK][0][62] = 127, [1][0][RTW89_IC][1][62] = -4, - [1][0][RTW89_IC][2][62] = 68, [1][0][RTW89_KCC][1][62] = -2, [1][0][RTW89_KCC][0][62] = 127, [1][0][RTW89_ACMA][1][62] = 127, @@ -55467,13 +53295,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][62] = 127, [1][0][RTW89_THAILAND][0][62] = 127, [1][0][RTW89_FCC][1][64] = -4, - [1][0][RTW89_FCC][2][64] = 68, [1][0][RTW89_ETSI][1][64] = 127, [1][0][RTW89_ETSI][0][64] = 127, [1][0][RTW89_MKK][1][64] = 127, [1][0][RTW89_MKK][0][64] = 127, [1][0][RTW89_IC][1][64] = -4, - [1][0][RTW89_IC][2][64] = 68, [1][0][RTW89_KCC][1][64] = -2, [1][0][RTW89_KCC][0][64] = 127, [1][0][RTW89_ACMA][1][64] = 127, @@ -55486,13 +53312,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][64] = 127, [1][0][RTW89_THAILAND][0][64] = 127, [1][0][RTW89_FCC][1][66] = -4, - [1][0][RTW89_FCC][2][66] = 68, [1][0][RTW89_ETSI][1][66] = 127, [1][0][RTW89_ETSI][0][66] = 127, [1][0][RTW89_MKK][1][66] = 127, [1][0][RTW89_MKK][0][66] = 127, [1][0][RTW89_IC][1][66] = -4, - [1][0][RTW89_IC][2][66] = 68, [1][0][RTW89_KCC][1][66] = -2, [1][0][RTW89_KCC][0][66] = 127, [1][0][RTW89_ACMA][1][66] = 127, @@ -55505,13 +53329,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][66] = 127, [1][0][RTW89_THAILAND][0][66] = 127, [1][0][RTW89_FCC][1][68] = -4, - [1][0][RTW89_FCC][2][68] = 68, [1][0][RTW89_ETSI][1][68] = 127, [1][0][RTW89_ETSI][0][68] = 127, [1][0][RTW89_MKK][1][68] = 127, [1][0][RTW89_MKK][0][68] = 127, [1][0][RTW89_IC][1][68] = -4, - [1][0][RTW89_IC][2][68] = 68, [1][0][RTW89_KCC][1][68] = -2, [1][0][RTW89_KCC][0][68] = 127, [1][0][RTW89_ACMA][1][68] = 127, @@ -55524,13 +53346,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][68] = 127, [1][0][RTW89_THAILAND][0][68] = 127, [1][0][RTW89_FCC][1][70] = -4, - [1][0][RTW89_FCC][2][70] = 68, [1][0][RTW89_ETSI][1][70] = 127, [1][0][RTW89_ETSI][0][70] = 127, [1][0][RTW89_MKK][1][70] = 127, [1][0][RTW89_MKK][0][70] = 127, [1][0][RTW89_IC][1][70] = -4, - [1][0][RTW89_IC][2][70] = 68, [1][0][RTW89_KCC][1][70] = -2, [1][0][RTW89_KCC][0][70] = 127, [1][0][RTW89_ACMA][1][70] = 127, @@ -55543,13 +53363,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][70] = 127, [1][0][RTW89_THAILAND][0][70] = 127, [1][0][RTW89_FCC][1][72] = -4, - [1][0][RTW89_FCC][2][72] = 68, [1][0][RTW89_ETSI][1][72] = 127, [1][0][RTW89_ETSI][0][72] = 127, [1][0][RTW89_MKK][1][72] = 127, [1][0][RTW89_MKK][0][72] = 127, [1][0][RTW89_IC][1][72] = -4, - [1][0][RTW89_IC][2][72] = 68, [1][0][RTW89_KCC][1][72] = -2, [1][0][RTW89_KCC][0][72] = 127, [1][0][RTW89_ACMA][1][72] = 127, @@ -55562,13 +53380,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][72] = 127, [1][0][RTW89_THAILAND][0][72] = 127, [1][0][RTW89_FCC][1][74] = -4, - [1][0][RTW89_FCC][2][74] = 68, [1][0][RTW89_ETSI][1][74] = 127, [1][0][RTW89_ETSI][0][74] = 127, [1][0][RTW89_MKK][1][74] = 127, [1][0][RTW89_MKK][0][74] = 127, [1][0][RTW89_IC][1][74] = -4, - [1][0][RTW89_IC][2][74] = 68, [1][0][RTW89_KCC][1][74] = -2, [1][0][RTW89_KCC][0][74] = 127, [1][0][RTW89_ACMA][1][74] = 127, @@ -55581,13 +53397,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][74] = 127, [1][0][RTW89_THAILAND][0][74] = 127, [1][0][RTW89_FCC][1][75] = -4, - [1][0][RTW89_FCC][2][75] = 68, [1][0][RTW89_ETSI][1][75] = 127, [1][0][RTW89_ETSI][0][75] = 127, [1][0][RTW89_MKK][1][75] = 127, [1][0][RTW89_MKK][0][75] = 127, [1][0][RTW89_IC][1][75] = -4, - [1][0][RTW89_IC][2][75] = 68, [1][0][RTW89_KCC][1][75] = -2, [1][0][RTW89_KCC][0][75] = 127, [1][0][RTW89_ACMA][1][75] = 127, @@ -55600,13 +53414,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][75] = 127, [1][0][RTW89_THAILAND][0][75] = 127, [1][0][RTW89_FCC][1][77] = -4, - [1][0][RTW89_FCC][2][77] = 68, [1][0][RTW89_ETSI][1][77] = 127, [1][0][RTW89_ETSI][0][77] = 127, [1][0][RTW89_MKK][1][77] = 127, [1][0][RTW89_MKK][0][77] = 127, [1][0][RTW89_IC][1][77] = -4, - [1][0][RTW89_IC][2][77] = 68, [1][0][RTW89_KCC][1][77] = -2, [1][0][RTW89_KCC][0][77] = 127, [1][0][RTW89_ACMA][1][77] = 127, @@ -55619,13 +53431,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][77] = 127, [1][0][RTW89_THAILAND][0][77] = 127, [1][0][RTW89_FCC][1][79] = -4, - [1][0][RTW89_FCC][2][79] = 68, [1][0][RTW89_ETSI][1][79] = 127, [1][0][RTW89_ETSI][0][79] = 127, [1][0][RTW89_MKK][1][79] = 127, [1][0][RTW89_MKK][0][79] = 127, [1][0][RTW89_IC][1][79] = -4, - [1][0][RTW89_IC][2][79] = 68, [1][0][RTW89_KCC][1][79] = -2, [1][0][RTW89_KCC][0][79] = 127, [1][0][RTW89_ACMA][1][79] = 127, @@ -55638,13 +53448,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][79] = 127, [1][0][RTW89_THAILAND][0][79] = 127, [1][0][RTW89_FCC][1][81] = -4, - [1][0][RTW89_FCC][2][81] = 68, [1][0][RTW89_ETSI][1][81] = 127, [1][0][RTW89_ETSI][0][81] = 127, [1][0][RTW89_MKK][1][81] = 127, [1][0][RTW89_MKK][0][81] = 127, [1][0][RTW89_IC][1][81] = -4, - [1][0][RTW89_IC][2][81] = 68, [1][0][RTW89_KCC][1][81] = -2, [1][0][RTW89_KCC][0][81] = 127, [1][0][RTW89_ACMA][1][81] = 127, @@ -55657,13 +53465,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][81] = 127, [1][0][RTW89_THAILAND][0][81] = 127, [1][0][RTW89_FCC][1][83] = -4, - [1][0][RTW89_FCC][2][83] = 68, [1][0][RTW89_ETSI][1][83] = 127, [1][0][RTW89_ETSI][0][83] = 127, [1][0][RTW89_MKK][1][83] = 127, [1][0][RTW89_MKK][0][83] = 127, [1][0][RTW89_IC][1][83] = -4, - [1][0][RTW89_IC][2][83] = 68, [1][0][RTW89_KCC][1][83] = -2, [1][0][RTW89_KCC][0][83] = 127, [1][0][RTW89_ACMA][1][83] = 127, @@ -55676,13 +53482,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][83] = 127, [1][0][RTW89_THAILAND][0][83] = 127, [1][0][RTW89_FCC][1][85] = -4, - [1][0][RTW89_FCC][2][85] = 68, [1][0][RTW89_ETSI][1][85] = 127, [1][0][RTW89_ETSI][0][85] = 127, [1][0][RTW89_MKK][1][85] = 127, [1][0][RTW89_MKK][0][85] = 127, [1][0][RTW89_IC][1][85] = -4, - [1][0][RTW89_IC][2][85] = 68, [1][0][RTW89_KCC][1][85] = -2, [1][0][RTW89_KCC][0][85] = 127, [1][0][RTW89_ACMA][1][85] = 127, @@ -55695,13 +53499,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][85] = 127, [1][0][RTW89_THAILAND][0][85] = 127, [1][0][RTW89_FCC][1][87] = -4, - [1][0][RTW89_FCC][2][87] = 127, [1][0][RTW89_ETSI][1][87] = 127, [1][0][RTW89_ETSI][0][87] = 127, [1][0][RTW89_MKK][1][87] = 127, [1][0][RTW89_MKK][0][87] = 127, [1][0][RTW89_IC][1][87] = -4, - [1][0][RTW89_IC][2][87] = 127, [1][0][RTW89_KCC][1][87] = -2, [1][0][RTW89_KCC][0][87] = 127, [1][0][RTW89_ACMA][1][87] = 127, @@ -55714,13 +53516,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][87] = 127, [1][0][RTW89_THAILAND][0][87] = 127, [1][0][RTW89_FCC][1][89] = -4, - [1][0][RTW89_FCC][2][89] = 127, [1][0][RTW89_ETSI][1][89] = 127, [1][0][RTW89_ETSI][0][89] = 127, [1][0][RTW89_MKK][1][89] = 127, [1][0][RTW89_MKK][0][89] = 127, [1][0][RTW89_IC][1][89] = -4, - [1][0][RTW89_IC][2][89] = 127, [1][0][RTW89_KCC][1][89] = -2, [1][0][RTW89_KCC][0][89] = 127, [1][0][RTW89_ACMA][1][89] = 127, @@ -55733,13 +53533,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][89] = 127, [1][0][RTW89_THAILAND][0][89] = 127, [1][0][RTW89_FCC][1][90] = -4, - [1][0][RTW89_FCC][2][90] = 127, [1][0][RTW89_ETSI][1][90] = 127, [1][0][RTW89_ETSI][0][90] = 127, [1][0][RTW89_MKK][1][90] = 127, [1][0][RTW89_MKK][0][90] = 127, [1][0][RTW89_IC][1][90] = -4, - [1][0][RTW89_IC][2][90] = 127, [1][0][RTW89_KCC][1][90] = -2, [1][0][RTW89_KCC][0][90] = 127, [1][0][RTW89_ACMA][1][90] = 127, @@ -55752,13 +53550,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][90] = 127, [1][0][RTW89_THAILAND][0][90] = 127, [1][0][RTW89_FCC][1][92] = -4, - [1][0][RTW89_FCC][2][92] = 127, [1][0][RTW89_ETSI][1][92] = 127, [1][0][RTW89_ETSI][0][92] = 127, [1][0][RTW89_MKK][1][92] = 127, [1][0][RTW89_MKK][0][92] = 127, [1][0][RTW89_IC][1][92] = -4, - [1][0][RTW89_IC][2][92] = 127, [1][0][RTW89_KCC][1][92] = -2, [1][0][RTW89_KCC][0][92] = 127, [1][0][RTW89_ACMA][1][92] = 127, @@ -55771,13 +53567,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][92] = 127, [1][0][RTW89_THAILAND][0][92] = 127, [1][0][RTW89_FCC][1][94] = -4, - [1][0][RTW89_FCC][2][94] = 127, [1][0][RTW89_ETSI][1][94] = 127, [1][0][RTW89_ETSI][0][94] = 127, [1][0][RTW89_MKK][1][94] = 127, [1][0][RTW89_MKK][0][94] = 127, [1][0][RTW89_IC][1][94] = -4, - [1][0][RTW89_IC][2][94] = 127, [1][0][RTW89_KCC][1][94] = -2, [1][0][RTW89_KCC][0][94] = 127, [1][0][RTW89_ACMA][1][94] = 127, @@ -55790,13 +53584,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][94] = 127, [1][0][RTW89_THAILAND][0][94] = 127, [1][0][RTW89_FCC][1][96] = -4, - [1][0][RTW89_FCC][2][96] = 127, [1][0][RTW89_ETSI][1][96] = 127, [1][0][RTW89_ETSI][0][96] = 127, [1][0][RTW89_MKK][1][96] = 127, [1][0][RTW89_MKK][0][96] = 127, [1][0][RTW89_IC][1][96] = -4, - [1][0][RTW89_IC][2][96] = 127, [1][0][RTW89_KCC][1][96] = -2, [1][0][RTW89_KCC][0][96] = 127, [1][0][RTW89_ACMA][1][96] = 127, @@ -55809,13 +53601,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][96] = 127, [1][0][RTW89_THAILAND][0][96] = 127, [1][0][RTW89_FCC][1][98] = -4, - [1][0][RTW89_FCC][2][98] = 127, [1][0][RTW89_ETSI][1][98] = 127, [1][0][RTW89_ETSI][0][98] = 127, [1][0][RTW89_MKK][1][98] = 127, [1][0][RTW89_MKK][0][98] = 127, [1][0][RTW89_IC][1][98] = -4, - [1][0][RTW89_IC][2][98] = 127, [1][0][RTW89_KCC][1][98] = -2, [1][0][RTW89_KCC][0][98] = 127, [1][0][RTW89_ACMA][1][98] = 127, @@ -55828,13 +53618,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][98] = 127, [1][0][RTW89_THAILAND][0][98] = 127, [1][0][RTW89_FCC][1][100] = -4, - [1][0][RTW89_FCC][2][100] = 127, [1][0][RTW89_ETSI][1][100] = 127, [1][0][RTW89_ETSI][0][100] = 127, [1][0][RTW89_MKK][1][100] = 127, [1][0][RTW89_MKK][0][100] = 127, [1][0][RTW89_IC][1][100] = -4, - [1][0][RTW89_IC][2][100] = 127, [1][0][RTW89_KCC][1][100] = -2, [1][0][RTW89_KCC][0][100] = 127, [1][0][RTW89_ACMA][1][100] = 127, @@ -55847,13 +53635,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][100] = 127, [1][0][RTW89_THAILAND][0][100] = 127, [1][0][RTW89_FCC][1][102] = -4, - [1][0][RTW89_FCC][2][102] = 127, [1][0][RTW89_ETSI][1][102] = 127, [1][0][RTW89_ETSI][0][102] = 127, [1][0][RTW89_MKK][1][102] = 127, [1][0][RTW89_MKK][0][102] = 127, [1][0][RTW89_IC][1][102] = -4, - [1][0][RTW89_IC][2][102] = 127, [1][0][RTW89_KCC][1][102] = -2, [1][0][RTW89_KCC][0][102] = 127, [1][0][RTW89_ACMA][1][102] = 127, @@ -55866,13 +53652,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][102] = 127, [1][0][RTW89_THAILAND][0][102] = 127, [1][0][RTW89_FCC][1][104] = -4, - [1][0][RTW89_FCC][2][104] = 127, [1][0][RTW89_ETSI][1][104] = 127, [1][0][RTW89_ETSI][0][104] = 127, [1][0][RTW89_MKK][1][104] = 127, [1][0][RTW89_MKK][0][104] = 127, [1][0][RTW89_IC][1][104] = -4, - [1][0][RTW89_IC][2][104] = 127, [1][0][RTW89_KCC][1][104] = -2, [1][0][RTW89_KCC][0][104] = 127, [1][0][RTW89_ACMA][1][104] = 127, @@ -55885,13 +53669,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][104] = 127, [1][0][RTW89_THAILAND][0][104] = 127, [1][0][RTW89_FCC][1][105] = -4, - [1][0][RTW89_FCC][2][105] = 127, [1][0][RTW89_ETSI][1][105] = 127, [1][0][RTW89_ETSI][0][105] = 127, [1][0][RTW89_MKK][1][105] = 127, [1][0][RTW89_MKK][0][105] = 127, [1][0][RTW89_IC][1][105] = -4, - [1][0][RTW89_IC][2][105] = 127, [1][0][RTW89_KCC][1][105] = -2, [1][0][RTW89_KCC][0][105] = 127, [1][0][RTW89_ACMA][1][105] = 127, @@ -55904,13 +53686,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][105] = 127, [1][0][RTW89_THAILAND][0][105] = 127, [1][0][RTW89_FCC][1][107] = 1, - [1][0][RTW89_FCC][2][107] = 127, [1][0][RTW89_ETSI][1][107] = 127, [1][0][RTW89_ETSI][0][107] = 127, [1][0][RTW89_MKK][1][107] = 127, [1][0][RTW89_MKK][0][107] = 127, [1][0][RTW89_IC][1][107] = 1, - [1][0][RTW89_IC][2][107] = 127, [1][0][RTW89_KCC][1][107] = -2, [1][0][RTW89_KCC][0][107] = 127, [1][0][RTW89_ACMA][1][107] = 127, @@ -55923,13 +53703,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][107] = 127, [1][0][RTW89_THAILAND][0][107] = 127, [1][0][RTW89_FCC][1][109] = 2, - [1][0][RTW89_FCC][2][109] = 127, [1][0][RTW89_ETSI][1][109] = 127, [1][0][RTW89_ETSI][0][109] = 127, [1][0][RTW89_MKK][1][109] = 127, [1][0][RTW89_MKK][0][109] = 127, [1][0][RTW89_IC][1][109] = 2, - [1][0][RTW89_IC][2][109] = 127, [1][0][RTW89_KCC][1][109] = 127, [1][0][RTW89_KCC][0][109] = 127, [1][0][RTW89_ACMA][1][109] = 127, @@ -55942,13 +53720,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][109] = 127, [1][0][RTW89_THAILAND][0][109] = 127, [1][0][RTW89_FCC][1][111] = 127, - [1][0][RTW89_FCC][2][111] = 127, [1][0][RTW89_ETSI][1][111] = 127, [1][0][RTW89_ETSI][0][111] = 127, [1][0][RTW89_MKK][1][111] = 127, [1][0][RTW89_MKK][0][111] = 127, [1][0][RTW89_IC][1][111] = 127, - [1][0][RTW89_IC][2][111] = 127, [1][0][RTW89_KCC][1][111] = 127, [1][0][RTW89_KCC][0][111] = 127, [1][0][RTW89_ACMA][1][111] = 127, @@ -55961,13 +53737,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][111] = 127, [1][0][RTW89_THAILAND][0][111] = 127, [1][0][RTW89_FCC][1][113] = 127, - [1][0][RTW89_FCC][2][113] = 127, [1][0][RTW89_ETSI][1][113] = 127, [1][0][RTW89_ETSI][0][113] = 127, [1][0][RTW89_MKK][1][113] = 127, [1][0][RTW89_MKK][0][113] = 127, [1][0][RTW89_IC][1][113] = 127, - [1][0][RTW89_IC][2][113] = 127, [1][0][RTW89_KCC][1][113] = 127, [1][0][RTW89_KCC][0][113] = 127, [1][0][RTW89_ACMA][1][113] = 127, @@ -55980,13 +53754,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][113] = 127, [1][0][RTW89_THAILAND][0][113] = 127, [1][0][RTW89_FCC][1][115] = 127, - [1][0][RTW89_FCC][2][115] = 127, [1][0][RTW89_ETSI][1][115] = 127, [1][0][RTW89_ETSI][0][115] = 127, [1][0][RTW89_MKK][1][115] = 127, [1][0][RTW89_MKK][0][115] = 127, [1][0][RTW89_IC][1][115] = 127, - [1][0][RTW89_IC][2][115] = 127, [1][0][RTW89_KCC][1][115] = 127, [1][0][RTW89_KCC][0][115] = 127, [1][0][RTW89_ACMA][1][115] = 127, @@ -55999,13 +53771,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][115] = 127, [1][0][RTW89_THAILAND][0][115] = 127, [1][0][RTW89_FCC][1][117] = 127, - [1][0][RTW89_FCC][2][117] = 127, [1][0][RTW89_ETSI][1][117] = 127, [1][0][RTW89_ETSI][0][117] = 127, [1][0][RTW89_MKK][1][117] = 127, [1][0][RTW89_MKK][0][117] = 127, [1][0][RTW89_IC][1][117] = 127, - [1][0][RTW89_IC][2][117] = 127, [1][0][RTW89_KCC][1][117] = 127, [1][0][RTW89_KCC][0][117] = 127, [1][0][RTW89_ACMA][1][117] = 127, @@ -56018,13 +53788,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][117] = 127, [1][0][RTW89_THAILAND][0][117] = 127, [1][0][RTW89_FCC][1][119] = 127, - [1][0][RTW89_FCC][2][119] = 127, [1][0][RTW89_ETSI][1][119] = 127, [1][0][RTW89_ETSI][0][119] = 127, [1][0][RTW89_MKK][1][119] = 127, [1][0][RTW89_MKK][0][119] = 127, [1][0][RTW89_IC][1][119] = 127, - [1][0][RTW89_IC][2][119] = 127, [1][0][RTW89_KCC][1][119] = 127, [1][0][RTW89_KCC][0][119] = 127, [1][0][RTW89_ACMA][1][119] = 127, @@ -56037,13 +53805,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][0][RTW89_THAILAND][1][119] = 127, [1][0][RTW89_THAILAND][0][119] = 127, [1][1][RTW89_FCC][1][0] = -26, - [1][1][RTW89_FCC][2][0] = 44, [1][1][RTW89_ETSI][1][0] = 32, [1][1][RTW89_ETSI][0][0] = -6, [1][1][RTW89_MKK][1][0] = 30, [1][1][RTW89_MKK][0][0] = -10, [1][1][RTW89_IC][1][0] = -26, - [1][1][RTW89_IC][2][0] = 44, [1][1][RTW89_KCC][1][0] = -14, [1][1][RTW89_KCC][0][0] = -14, [1][1][RTW89_ACMA][1][0] = 32, @@ -56056,13 +53822,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][0] = 18, [1][1][RTW89_THAILAND][0][0] = -26, [1][1][RTW89_FCC][1][2] = -28, - [1][1][RTW89_FCC][2][2] = 44, [1][1][RTW89_ETSI][1][2] = 32, [1][1][RTW89_ETSI][0][2] = -6, [1][1][RTW89_MKK][1][2] = 30, [1][1][RTW89_MKK][0][2] = -10, [1][1][RTW89_IC][1][2] = -28, - [1][1][RTW89_IC][2][2] = 44, [1][1][RTW89_KCC][1][2] = -14, [1][1][RTW89_KCC][0][2] = -14, [1][1][RTW89_ACMA][1][2] = 32, @@ -56075,13 +53839,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][2] = 18, [1][1][RTW89_THAILAND][0][2] = -28, [1][1][RTW89_FCC][1][4] = -28, - [1][1][RTW89_FCC][2][4] = 44, [1][1][RTW89_ETSI][1][4] = 32, [1][1][RTW89_ETSI][0][4] = -6, [1][1][RTW89_MKK][1][4] = 30, [1][1][RTW89_MKK][0][4] = -10, [1][1][RTW89_IC][1][4] = -28, - [1][1][RTW89_IC][2][4] = 44, [1][1][RTW89_KCC][1][4] = -14, [1][1][RTW89_KCC][0][4] = -14, [1][1][RTW89_ACMA][1][4] = 32, @@ -56094,13 +53856,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][4] = 18, [1][1][RTW89_THAILAND][0][4] = -28, [1][1][RTW89_FCC][1][6] = -28, - [1][1][RTW89_FCC][2][6] = 44, [1][1][RTW89_ETSI][1][6] = 32, [1][1][RTW89_ETSI][0][6] = -6, [1][1][RTW89_MKK][1][6] = 30, [1][1][RTW89_MKK][0][6] = -10, [1][1][RTW89_IC][1][6] = -28, - [1][1][RTW89_IC][2][6] = 44, [1][1][RTW89_KCC][1][6] = -14, [1][1][RTW89_KCC][0][6] = -14, [1][1][RTW89_ACMA][1][6] = 32, @@ -56113,13 +53873,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][6] = 18, [1][1][RTW89_THAILAND][0][6] = -28, [1][1][RTW89_FCC][1][8] = -28, - [1][1][RTW89_FCC][2][8] = 44, [1][1][RTW89_ETSI][1][8] = 32, [1][1][RTW89_ETSI][0][8] = -6, [1][1][RTW89_MKK][1][8] = 30, [1][1][RTW89_MKK][0][8] = -10, [1][1][RTW89_IC][1][8] = -28, - [1][1][RTW89_IC][2][8] = 44, [1][1][RTW89_KCC][1][8] = -14, [1][1][RTW89_KCC][0][8] = -14, [1][1][RTW89_ACMA][1][8] = 32, @@ -56132,13 +53890,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][8] = 18, [1][1][RTW89_THAILAND][0][8] = -28, [1][1][RTW89_FCC][1][10] = -28, - [1][1][RTW89_FCC][2][10] = 44, [1][1][RTW89_ETSI][1][10] = 32, [1][1][RTW89_ETSI][0][10] = -6, [1][1][RTW89_MKK][1][10] = 30, [1][1][RTW89_MKK][0][10] = -10, [1][1][RTW89_IC][1][10] = -28, - [1][1][RTW89_IC][2][10] = 44, [1][1][RTW89_KCC][1][10] = -14, [1][1][RTW89_KCC][0][10] = -14, [1][1][RTW89_ACMA][1][10] = 32, @@ -56151,13 +53907,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][10] = 18, [1][1][RTW89_THAILAND][0][10] = -28, [1][1][RTW89_FCC][1][12] = -28, - [1][1][RTW89_FCC][2][12] = 44, [1][1][RTW89_ETSI][1][12] = 32, [1][1][RTW89_ETSI][0][12] = -6, [1][1][RTW89_MKK][1][12] = 30, [1][1][RTW89_MKK][0][12] = -10, [1][1][RTW89_IC][1][12] = -28, - [1][1][RTW89_IC][2][12] = 44, [1][1][RTW89_KCC][1][12] = -14, [1][1][RTW89_KCC][0][12] = -14, [1][1][RTW89_ACMA][1][12] = 32, @@ -56170,13 +53924,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][12] = 18, [1][1][RTW89_THAILAND][0][12] = -28, [1][1][RTW89_FCC][1][14] = -28, - [1][1][RTW89_FCC][2][14] = 44, [1][1][RTW89_ETSI][1][14] = 32, [1][1][RTW89_ETSI][0][14] = -6, [1][1][RTW89_MKK][1][14] = 30, [1][1][RTW89_MKK][0][14] = -10, [1][1][RTW89_IC][1][14] = -28, - [1][1][RTW89_IC][2][14] = 44, [1][1][RTW89_KCC][1][14] = -14, [1][1][RTW89_KCC][0][14] = -14, [1][1][RTW89_ACMA][1][14] = 32, @@ -56189,13 +53941,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][14] = 18, [1][1][RTW89_THAILAND][0][14] = -28, [1][1][RTW89_FCC][1][15] = -28, - [1][1][RTW89_FCC][2][15] = 44, [1][1][RTW89_ETSI][1][15] = 32, [1][1][RTW89_ETSI][0][15] = -6, [1][1][RTW89_MKK][1][15] = 30, [1][1][RTW89_MKK][0][15] = -10, [1][1][RTW89_IC][1][15] = -28, - [1][1][RTW89_IC][2][15] = 44, [1][1][RTW89_KCC][1][15] = -14, [1][1][RTW89_KCC][0][15] = -14, [1][1][RTW89_ACMA][1][15] = 32, @@ -56208,13 +53958,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][15] = 18, [1][1][RTW89_THAILAND][0][15] = -28, [1][1][RTW89_FCC][1][17] = -28, - [1][1][RTW89_FCC][2][17] = 44, [1][1][RTW89_ETSI][1][17] = 32, [1][1][RTW89_ETSI][0][17] = -6, [1][1][RTW89_MKK][1][17] = 30, [1][1][RTW89_MKK][0][17] = -10, [1][1][RTW89_IC][1][17] = -28, - [1][1][RTW89_IC][2][17] = 44, [1][1][RTW89_KCC][1][17] = -14, [1][1][RTW89_KCC][0][17] = -14, [1][1][RTW89_ACMA][1][17] = 32, @@ -56227,13 +53975,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][17] = 18, [1][1][RTW89_THAILAND][0][17] = -28, [1][1][RTW89_FCC][1][19] = -28, - [1][1][RTW89_FCC][2][19] = 44, [1][1][RTW89_ETSI][1][19] = 32, [1][1][RTW89_ETSI][0][19] = -6, [1][1][RTW89_MKK][1][19] = 30, [1][1][RTW89_MKK][0][19] = -10, [1][1][RTW89_IC][1][19] = -28, - [1][1][RTW89_IC][2][19] = 44, [1][1][RTW89_KCC][1][19] = -14, [1][1][RTW89_KCC][0][19] = -14, [1][1][RTW89_ACMA][1][19] = 32, @@ -56246,13 +53992,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][19] = 18, [1][1][RTW89_THAILAND][0][19] = -28, [1][1][RTW89_FCC][1][21] = -28, - [1][1][RTW89_FCC][2][21] = 44, [1][1][RTW89_ETSI][1][21] = 32, [1][1][RTW89_ETSI][0][21] = -6, [1][1][RTW89_MKK][1][21] = 30, [1][1][RTW89_MKK][0][21] = -10, [1][1][RTW89_IC][1][21] = -28, - [1][1][RTW89_IC][2][21] = 44, [1][1][RTW89_KCC][1][21] = -14, [1][1][RTW89_KCC][0][21] = -14, [1][1][RTW89_ACMA][1][21] = 32, @@ -56265,13 +54009,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][21] = 18, [1][1][RTW89_THAILAND][0][21] = -28, [1][1][RTW89_FCC][1][23] = -28, - [1][1][RTW89_FCC][2][23] = 44, [1][1][RTW89_ETSI][1][23] = 32, [1][1][RTW89_ETSI][0][23] = -6, [1][1][RTW89_MKK][1][23] = 32, [1][1][RTW89_MKK][0][23] = -10, [1][1][RTW89_IC][1][23] = -28, - [1][1][RTW89_IC][2][23] = 44, [1][1][RTW89_KCC][1][23] = -14, [1][1][RTW89_KCC][0][23] = -14, [1][1][RTW89_ACMA][1][23] = 32, @@ -56284,13 +54026,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][23] = 18, [1][1][RTW89_THAILAND][0][23] = -28, [1][1][RTW89_FCC][1][25] = -28, - [1][1][RTW89_FCC][2][25] = 44, [1][1][RTW89_ETSI][1][25] = 32, [1][1][RTW89_ETSI][0][25] = -6, [1][1][RTW89_MKK][1][25] = 32, [1][1][RTW89_MKK][0][25] = -10, [1][1][RTW89_IC][1][25] = -28, - [1][1][RTW89_IC][2][25] = 44, [1][1][RTW89_KCC][1][25] = -14, [1][1][RTW89_KCC][0][25] = -14, [1][1][RTW89_ACMA][1][25] = 32, @@ -56303,13 +54043,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][25] = 18, [1][1][RTW89_THAILAND][0][25] = -28, [1][1][RTW89_FCC][1][27] = -28, - [1][1][RTW89_FCC][2][27] = 44, [1][1][RTW89_ETSI][1][27] = 32, [1][1][RTW89_ETSI][0][27] = -6, [1][1][RTW89_MKK][1][27] = 32, [1][1][RTW89_MKK][0][27] = -10, [1][1][RTW89_IC][1][27] = -28, - [1][1][RTW89_IC][2][27] = 44, [1][1][RTW89_KCC][1][27] = -14, [1][1][RTW89_KCC][0][27] = -14, [1][1][RTW89_ACMA][1][27] = 32, @@ -56322,13 +54060,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][27] = 18, [1][1][RTW89_THAILAND][0][27] = -28, [1][1][RTW89_FCC][1][29] = -28, - [1][1][RTW89_FCC][2][29] = 44, [1][1][RTW89_ETSI][1][29] = 32, [1][1][RTW89_ETSI][0][29] = -6, [1][1][RTW89_MKK][1][29] = 32, [1][1][RTW89_MKK][0][29] = -10, [1][1][RTW89_IC][1][29] = -28, - [1][1][RTW89_IC][2][29] = 44, [1][1][RTW89_KCC][1][29] = -14, [1][1][RTW89_KCC][0][29] = -14, [1][1][RTW89_ACMA][1][29] = 32, @@ -56341,13 +54077,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][29] = 18, [1][1][RTW89_THAILAND][0][29] = -28, [1][1][RTW89_FCC][1][30] = -28, - [1][1][RTW89_FCC][2][30] = 44, [1][1][RTW89_ETSI][1][30] = 32, [1][1][RTW89_ETSI][0][30] = -6, [1][1][RTW89_MKK][1][30] = 32, [1][1][RTW89_MKK][0][30] = -10, [1][1][RTW89_IC][1][30] = -28, - [1][1][RTW89_IC][2][30] = 44, [1][1][RTW89_KCC][1][30] = -14, [1][1][RTW89_KCC][0][30] = -14, [1][1][RTW89_ACMA][1][30] = 32, @@ -56360,13 +54094,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][30] = 18, [1][1][RTW89_THAILAND][0][30] = -28, [1][1][RTW89_FCC][1][32] = -28, - [1][1][RTW89_FCC][2][32] = 44, [1][1][RTW89_ETSI][1][32] = 32, [1][1][RTW89_ETSI][0][32] = -6, [1][1][RTW89_MKK][1][32] = 32, [1][1][RTW89_MKK][0][32] = -10, [1][1][RTW89_IC][1][32] = -28, - [1][1][RTW89_IC][2][32] = 44, [1][1][RTW89_KCC][1][32] = -14, [1][1][RTW89_KCC][0][32] = -14, [1][1][RTW89_ACMA][1][32] = 32, @@ -56379,13 +54111,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][32] = 18, [1][1][RTW89_THAILAND][0][32] = -28, [1][1][RTW89_FCC][1][34] = -28, - [1][1][RTW89_FCC][2][34] = 44, [1][1][RTW89_ETSI][1][34] = 32, [1][1][RTW89_ETSI][0][34] = -6, [1][1][RTW89_MKK][1][34] = 32, [1][1][RTW89_MKK][0][34] = -10, [1][1][RTW89_IC][1][34] = -28, - [1][1][RTW89_IC][2][34] = 44, [1][1][RTW89_KCC][1][34] = -14, [1][1][RTW89_KCC][0][34] = -14, [1][1][RTW89_ACMA][1][34] = 32, @@ -56398,13 +54128,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][34] = 18, [1][1][RTW89_THAILAND][0][34] = -28, [1][1][RTW89_FCC][1][36] = -28, - [1][1][RTW89_FCC][2][36] = 44, [1][1][RTW89_ETSI][1][36] = 32, [1][1][RTW89_ETSI][0][36] = -6, [1][1][RTW89_MKK][1][36] = 32, [1][1][RTW89_MKK][0][36] = -10, [1][1][RTW89_IC][1][36] = -28, - [1][1][RTW89_IC][2][36] = 44, [1][1][RTW89_KCC][1][36] = -14, [1][1][RTW89_KCC][0][36] = -14, [1][1][RTW89_ACMA][1][36] = 32, @@ -56417,13 +54145,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][36] = 18, [1][1][RTW89_THAILAND][0][36] = -28, [1][1][RTW89_FCC][1][38] = -28, - [1][1][RTW89_FCC][2][38] = 44, [1][1][RTW89_ETSI][1][38] = 32, [1][1][RTW89_ETSI][0][38] = -6, [1][1][RTW89_MKK][1][38] = 32, [1][1][RTW89_MKK][0][38] = -10, [1][1][RTW89_IC][1][38] = -28, - [1][1][RTW89_IC][2][38] = 44, [1][1][RTW89_KCC][1][38] = -14, [1][1][RTW89_KCC][0][38] = -14, [1][1][RTW89_ACMA][1][38] = 32, @@ -56436,13 +54162,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][38] = 18, [1][1][RTW89_THAILAND][0][38] = -28, [1][1][RTW89_FCC][1][40] = -28, - [1][1][RTW89_FCC][2][40] = 44, [1][1][RTW89_ETSI][1][40] = 32, [1][1][RTW89_ETSI][0][40] = -6, [1][1][RTW89_MKK][1][40] = 32, [1][1][RTW89_MKK][0][40] = -10, [1][1][RTW89_IC][1][40] = -28, - [1][1][RTW89_IC][2][40] = 44, [1][1][RTW89_KCC][1][40] = -14, [1][1][RTW89_KCC][0][40] = -14, [1][1][RTW89_ACMA][1][40] = 32, @@ -56455,13 +54179,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][40] = 18, [1][1][RTW89_THAILAND][0][40] = -28, [1][1][RTW89_FCC][1][42] = -28, - [1][1][RTW89_FCC][2][42] = 44, [1][1][RTW89_ETSI][1][42] = 32, [1][1][RTW89_ETSI][0][42] = -6, [1][1][RTW89_MKK][1][42] = 32, [1][1][RTW89_MKK][0][42] = -10, [1][1][RTW89_IC][1][42] = -28, - [1][1][RTW89_IC][2][42] = 44, [1][1][RTW89_KCC][1][42] = -14, [1][1][RTW89_KCC][0][42] = -14, [1][1][RTW89_ACMA][1][42] = 32, @@ -56474,13 +54196,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][42] = 18, [1][1][RTW89_THAILAND][0][42] = -28, [1][1][RTW89_FCC][1][44] = -28, - [1][1][RTW89_FCC][2][44] = 44, [1][1][RTW89_ETSI][1][44] = 34, [1][1][RTW89_ETSI][0][44] = -4, [1][1][RTW89_MKK][1][44] = 4, [1][1][RTW89_MKK][0][44] = -8, [1][1][RTW89_IC][1][44] = -28, - [1][1][RTW89_IC][2][44] = 44, [1][1][RTW89_KCC][1][44] = -14, [1][1][RTW89_KCC][0][44] = -14, [1][1][RTW89_ACMA][1][44] = 34, @@ -56493,13 +54213,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][44] = 18, [1][1][RTW89_THAILAND][0][44] = -28, [1][1][RTW89_FCC][1][45] = -26, - [1][1][RTW89_FCC][2][45] = 127, [1][1][RTW89_ETSI][1][45] = 127, [1][1][RTW89_ETSI][0][45] = 127, [1][1][RTW89_MKK][1][45] = 127, [1][1][RTW89_MKK][0][45] = 127, [1][1][RTW89_IC][1][45] = -26, - [1][1][RTW89_IC][2][45] = 44, [1][1][RTW89_KCC][1][45] = -14, [1][1][RTW89_KCC][0][45] = 127, [1][1][RTW89_ACMA][1][45] = 127, @@ -56512,13 +54230,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][45] = 127, [1][1][RTW89_THAILAND][0][45] = 127, [1][1][RTW89_FCC][1][47] = -28, - [1][1][RTW89_FCC][2][47] = 127, [1][1][RTW89_ETSI][1][47] = 127, [1][1][RTW89_ETSI][0][47] = 127, [1][1][RTW89_MKK][1][47] = 127, [1][1][RTW89_MKK][0][47] = 127, [1][1][RTW89_IC][1][47] = -28, - [1][1][RTW89_IC][2][47] = 44, [1][1][RTW89_KCC][1][47] = -14, [1][1][RTW89_KCC][0][47] = 127, [1][1][RTW89_ACMA][1][47] = 127, @@ -56531,13 +54247,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][47] = 127, [1][1][RTW89_THAILAND][0][47] = 127, [1][1][RTW89_FCC][1][49] = -28, - [1][1][RTW89_FCC][2][49] = 127, [1][1][RTW89_ETSI][1][49] = 127, [1][1][RTW89_ETSI][0][49] = 127, [1][1][RTW89_MKK][1][49] = 127, [1][1][RTW89_MKK][0][49] = 127, [1][1][RTW89_IC][1][49] = -28, - [1][1][RTW89_IC][2][49] = 44, [1][1][RTW89_KCC][1][49] = -14, [1][1][RTW89_KCC][0][49] = 127, [1][1][RTW89_ACMA][1][49] = 127, @@ -56550,13 +54264,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][49] = 127, [1][1][RTW89_THAILAND][0][49] = 127, [1][1][RTW89_FCC][1][51] = -28, - [1][1][RTW89_FCC][2][51] = 127, [1][1][RTW89_ETSI][1][51] = 127, [1][1][RTW89_ETSI][0][51] = 127, [1][1][RTW89_MKK][1][51] = 127, [1][1][RTW89_MKK][0][51] = 127, [1][1][RTW89_IC][1][51] = -28, - [1][1][RTW89_IC][2][51] = 44, [1][1][RTW89_KCC][1][51] = -14, [1][1][RTW89_KCC][0][51] = 127, [1][1][RTW89_ACMA][1][51] = 127, @@ -56569,13 +54281,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][51] = 127, [1][1][RTW89_THAILAND][0][51] = 127, [1][1][RTW89_FCC][1][53] = -26, - [1][1][RTW89_FCC][2][53] = 127, [1][1][RTW89_ETSI][1][53] = 127, [1][1][RTW89_ETSI][0][53] = 127, [1][1][RTW89_MKK][1][53] = 127, [1][1][RTW89_MKK][0][53] = 127, [1][1][RTW89_IC][1][53] = -26, - [1][1][RTW89_IC][2][53] = 44, [1][1][RTW89_KCC][1][53] = -14, [1][1][RTW89_KCC][0][53] = 127, [1][1][RTW89_ACMA][1][53] = 127, @@ -56588,13 +54298,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][53] = 127, [1][1][RTW89_THAILAND][0][53] = 127, [1][1][RTW89_FCC][1][55] = -28, - [1][1][RTW89_FCC][2][55] = 44, [1][1][RTW89_ETSI][1][55] = 127, [1][1][RTW89_ETSI][0][55] = 127, [1][1][RTW89_MKK][1][55] = 127, [1][1][RTW89_MKK][0][55] = 127, [1][1][RTW89_IC][1][55] = -28, - [1][1][RTW89_IC][2][55] = 44, [1][1][RTW89_KCC][1][55] = -14, [1][1][RTW89_KCC][0][55] = 127, [1][1][RTW89_ACMA][1][55] = 127, @@ -56607,13 +54315,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][55] = 127, [1][1][RTW89_THAILAND][0][55] = 127, [1][1][RTW89_FCC][1][57] = -28, - [1][1][RTW89_FCC][2][57] = 44, [1][1][RTW89_ETSI][1][57] = 127, [1][1][RTW89_ETSI][0][57] = 127, [1][1][RTW89_MKK][1][57] = 127, [1][1][RTW89_MKK][0][57] = 127, [1][1][RTW89_IC][1][57] = -28, - [1][1][RTW89_IC][2][57] = 44, [1][1][RTW89_KCC][1][57] = -14, [1][1][RTW89_KCC][0][57] = 127, [1][1][RTW89_ACMA][1][57] = 127, @@ -56626,13 +54332,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][57] = 127, [1][1][RTW89_THAILAND][0][57] = 127, [1][1][RTW89_FCC][1][59] = -28, - [1][1][RTW89_FCC][2][59] = 44, [1][1][RTW89_ETSI][1][59] = 127, [1][1][RTW89_ETSI][0][59] = 127, [1][1][RTW89_MKK][1][59] = 127, [1][1][RTW89_MKK][0][59] = 127, [1][1][RTW89_IC][1][59] = -28, - [1][1][RTW89_IC][2][59] = 44, [1][1][RTW89_KCC][1][59] = -14, [1][1][RTW89_KCC][0][59] = 127, [1][1][RTW89_ACMA][1][59] = 127, @@ -56645,13 +54349,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][59] = 127, [1][1][RTW89_THAILAND][0][59] = 127, [1][1][RTW89_FCC][1][60] = -28, - [1][1][RTW89_FCC][2][60] = 44, [1][1][RTW89_ETSI][1][60] = 127, [1][1][RTW89_ETSI][0][60] = 127, [1][1][RTW89_MKK][1][60] = 127, [1][1][RTW89_MKK][0][60] = 127, [1][1][RTW89_IC][1][60] = -28, - [1][1][RTW89_IC][2][60] = 44, [1][1][RTW89_KCC][1][60] = -14, [1][1][RTW89_KCC][0][60] = 127, [1][1][RTW89_ACMA][1][60] = 127, @@ -56664,13 +54366,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][60] = 127, [1][1][RTW89_THAILAND][0][60] = 127, [1][1][RTW89_FCC][1][62] = -28, - [1][1][RTW89_FCC][2][62] = 44, [1][1][RTW89_ETSI][1][62] = 127, [1][1][RTW89_ETSI][0][62] = 127, [1][1][RTW89_MKK][1][62] = 127, [1][1][RTW89_MKK][0][62] = 127, [1][1][RTW89_IC][1][62] = -28, - [1][1][RTW89_IC][2][62] = 44, [1][1][RTW89_KCC][1][62] = -14, [1][1][RTW89_KCC][0][62] = 127, [1][1][RTW89_ACMA][1][62] = 127, @@ -56683,13 +54383,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][62] = 127, [1][1][RTW89_THAILAND][0][62] = 127, [1][1][RTW89_FCC][1][64] = -28, - [1][1][RTW89_FCC][2][64] = 44, [1][1][RTW89_ETSI][1][64] = 127, [1][1][RTW89_ETSI][0][64] = 127, [1][1][RTW89_MKK][1][64] = 127, [1][1][RTW89_MKK][0][64] = 127, [1][1][RTW89_IC][1][64] = -28, - [1][1][RTW89_IC][2][64] = 44, [1][1][RTW89_KCC][1][64] = -14, [1][1][RTW89_KCC][0][64] = 127, [1][1][RTW89_ACMA][1][64] = 127, @@ -56702,13 +54400,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][64] = 127, [1][1][RTW89_THAILAND][0][64] = 127, [1][1][RTW89_FCC][1][66] = -28, - [1][1][RTW89_FCC][2][66] = 44, [1][1][RTW89_ETSI][1][66] = 127, [1][1][RTW89_ETSI][0][66] = 127, [1][1][RTW89_MKK][1][66] = 127, [1][1][RTW89_MKK][0][66] = 127, [1][1][RTW89_IC][1][66] = -28, - [1][1][RTW89_IC][2][66] = 44, [1][1][RTW89_KCC][1][66] = -14, [1][1][RTW89_KCC][0][66] = 127, [1][1][RTW89_ACMA][1][66] = 127, @@ -56721,13 +54417,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][66] = 127, [1][1][RTW89_THAILAND][0][66] = 127, [1][1][RTW89_FCC][1][68] = -28, - [1][1][RTW89_FCC][2][68] = 44, [1][1][RTW89_ETSI][1][68] = 127, [1][1][RTW89_ETSI][0][68] = 127, [1][1][RTW89_MKK][1][68] = 127, [1][1][RTW89_MKK][0][68] = 127, [1][1][RTW89_IC][1][68] = -28, - [1][1][RTW89_IC][2][68] = 44, [1][1][RTW89_KCC][1][68] = -14, [1][1][RTW89_KCC][0][68] = 127, [1][1][RTW89_ACMA][1][68] = 127, @@ -56740,13 +54434,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][68] = 127, [1][1][RTW89_THAILAND][0][68] = 127, [1][1][RTW89_FCC][1][70] = -26, - [1][1][RTW89_FCC][2][70] = 44, [1][1][RTW89_ETSI][1][70] = 127, [1][1][RTW89_ETSI][0][70] = 127, [1][1][RTW89_MKK][1][70] = 127, [1][1][RTW89_MKK][0][70] = 127, [1][1][RTW89_IC][1][70] = -26, - [1][1][RTW89_IC][2][70] = 44, [1][1][RTW89_KCC][1][70] = -14, [1][1][RTW89_KCC][0][70] = 127, [1][1][RTW89_ACMA][1][70] = 127, @@ -56759,13 +54451,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][70] = 127, [1][1][RTW89_THAILAND][0][70] = 127, [1][1][RTW89_FCC][1][72] = -28, - [1][1][RTW89_FCC][2][72] = 44, [1][1][RTW89_ETSI][1][72] = 127, [1][1][RTW89_ETSI][0][72] = 127, [1][1][RTW89_MKK][1][72] = 127, [1][1][RTW89_MKK][0][72] = 127, [1][1][RTW89_IC][1][72] = -28, - [1][1][RTW89_IC][2][72] = 44, [1][1][RTW89_KCC][1][72] = -14, [1][1][RTW89_KCC][0][72] = 127, [1][1][RTW89_ACMA][1][72] = 127, @@ -56778,13 +54468,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][72] = 127, [1][1][RTW89_THAILAND][0][72] = 127, [1][1][RTW89_FCC][1][74] = -28, - [1][1][RTW89_FCC][2][74] = 44, [1][1][RTW89_ETSI][1][74] = 127, [1][1][RTW89_ETSI][0][74] = 127, [1][1][RTW89_MKK][1][74] = 127, [1][1][RTW89_MKK][0][74] = 127, [1][1][RTW89_IC][1][74] = -28, - [1][1][RTW89_IC][2][74] = 44, [1][1][RTW89_KCC][1][74] = -14, [1][1][RTW89_KCC][0][74] = 127, [1][1][RTW89_ACMA][1][74] = 127, @@ -56797,13 +54485,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][74] = 127, [1][1][RTW89_THAILAND][0][74] = 127, [1][1][RTW89_FCC][1][75] = -28, - [1][1][RTW89_FCC][2][75] = 44, [1][1][RTW89_ETSI][1][75] = 127, [1][1][RTW89_ETSI][0][75] = 127, [1][1][RTW89_MKK][1][75] = 127, [1][1][RTW89_MKK][0][75] = 127, [1][1][RTW89_IC][1][75] = -28, - [1][1][RTW89_IC][2][75] = 44, [1][1][RTW89_KCC][1][75] = -14, [1][1][RTW89_KCC][0][75] = 127, [1][1][RTW89_ACMA][1][75] = 127, @@ -56816,13 +54502,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][75] = 127, [1][1][RTW89_THAILAND][0][75] = 127, [1][1][RTW89_FCC][1][77] = -28, - [1][1][RTW89_FCC][2][77] = 44, [1][1][RTW89_ETSI][1][77] = 127, [1][1][RTW89_ETSI][0][77] = 127, [1][1][RTW89_MKK][1][77] = 127, [1][1][RTW89_MKK][0][77] = 127, [1][1][RTW89_IC][1][77] = -28, - [1][1][RTW89_IC][2][77] = 44, [1][1][RTW89_KCC][1][77] = -14, [1][1][RTW89_KCC][0][77] = 127, [1][1][RTW89_ACMA][1][77] = 127, @@ -56835,13 +54519,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][77] = 127, [1][1][RTW89_THAILAND][0][77] = 127, [1][1][RTW89_FCC][1][79] = -28, - [1][1][RTW89_FCC][2][79] = 44, [1][1][RTW89_ETSI][1][79] = 127, [1][1][RTW89_ETSI][0][79] = 127, [1][1][RTW89_MKK][1][79] = 127, [1][1][RTW89_MKK][0][79] = 127, [1][1][RTW89_IC][1][79] = -28, - [1][1][RTW89_IC][2][79] = 44, [1][1][RTW89_KCC][1][79] = -14, [1][1][RTW89_KCC][0][79] = 127, [1][1][RTW89_ACMA][1][79] = 127, @@ -56854,13 +54536,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][79] = 127, [1][1][RTW89_THAILAND][0][79] = 127, [1][1][RTW89_FCC][1][81] = -28, - [1][1][RTW89_FCC][2][81] = 44, [1][1][RTW89_ETSI][1][81] = 127, [1][1][RTW89_ETSI][0][81] = 127, [1][1][RTW89_MKK][1][81] = 127, [1][1][RTW89_MKK][0][81] = 127, [1][1][RTW89_IC][1][81] = -28, - [1][1][RTW89_IC][2][81] = 44, [1][1][RTW89_KCC][1][81] = -14, [1][1][RTW89_KCC][0][81] = 127, [1][1][RTW89_ACMA][1][81] = 127, @@ -56873,13 +54553,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][81] = 127, [1][1][RTW89_THAILAND][0][81] = 127, [1][1][RTW89_FCC][1][83] = -28, - [1][1][RTW89_FCC][2][83] = 44, [1][1][RTW89_ETSI][1][83] = 127, [1][1][RTW89_ETSI][0][83] = 127, [1][1][RTW89_MKK][1][83] = 127, [1][1][RTW89_MKK][0][83] = 127, [1][1][RTW89_IC][1][83] = -28, - [1][1][RTW89_IC][2][83] = 44, [1][1][RTW89_KCC][1][83] = -14, [1][1][RTW89_KCC][0][83] = 127, [1][1][RTW89_ACMA][1][83] = 127, @@ -56892,13 +54570,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][83] = 127, [1][1][RTW89_THAILAND][0][83] = 127, [1][1][RTW89_FCC][1][85] = -28, - [1][1][RTW89_FCC][2][85] = 44, [1][1][RTW89_ETSI][1][85] = 127, [1][1][RTW89_ETSI][0][85] = 127, [1][1][RTW89_MKK][1][85] = 127, [1][1][RTW89_MKK][0][85] = 127, [1][1][RTW89_IC][1][85] = -28, - [1][1][RTW89_IC][2][85] = 44, [1][1][RTW89_KCC][1][85] = -14, [1][1][RTW89_KCC][0][85] = 127, [1][1][RTW89_ACMA][1][85] = 127, @@ -56911,13 +54587,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][85] = 127, [1][1][RTW89_THAILAND][0][85] = 127, [1][1][RTW89_FCC][1][87] = -28, - [1][1][RTW89_FCC][2][87] = 127, [1][1][RTW89_ETSI][1][87] = 127, [1][1][RTW89_ETSI][0][87] = 127, [1][1][RTW89_MKK][1][87] = 127, [1][1][RTW89_MKK][0][87] = 127, [1][1][RTW89_IC][1][87] = -28, - [1][1][RTW89_IC][2][87] = 127, [1][1][RTW89_KCC][1][87] = -14, [1][1][RTW89_KCC][0][87] = 127, [1][1][RTW89_ACMA][1][87] = 127, @@ -56930,13 +54604,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][87] = 127, [1][1][RTW89_THAILAND][0][87] = 127, [1][1][RTW89_FCC][1][89] = -26, - [1][1][RTW89_FCC][2][89] = 127, [1][1][RTW89_ETSI][1][89] = 127, [1][1][RTW89_ETSI][0][89] = 127, [1][1][RTW89_MKK][1][89] = 127, [1][1][RTW89_MKK][0][89] = 127, [1][1][RTW89_IC][1][89] = -26, - [1][1][RTW89_IC][2][89] = 127, [1][1][RTW89_KCC][1][89] = -14, [1][1][RTW89_KCC][0][89] = 127, [1][1][RTW89_ACMA][1][89] = 127, @@ -56949,13 +54621,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][89] = 127, [1][1][RTW89_THAILAND][0][89] = 127, [1][1][RTW89_FCC][1][90] = -26, - [1][1][RTW89_FCC][2][90] = 127, [1][1][RTW89_ETSI][1][90] = 127, [1][1][RTW89_ETSI][0][90] = 127, [1][1][RTW89_MKK][1][90] = 127, [1][1][RTW89_MKK][0][90] = 127, [1][1][RTW89_IC][1][90] = -26, - [1][1][RTW89_IC][2][90] = 127, [1][1][RTW89_KCC][1][90] = -14, [1][1][RTW89_KCC][0][90] = 127, [1][1][RTW89_ACMA][1][90] = 127, @@ -56968,13 +54638,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][90] = 127, [1][1][RTW89_THAILAND][0][90] = 127, [1][1][RTW89_FCC][1][92] = -26, - [1][1][RTW89_FCC][2][92] = 127, [1][1][RTW89_ETSI][1][92] = 127, [1][1][RTW89_ETSI][0][92] = 127, [1][1][RTW89_MKK][1][92] = 127, [1][1][RTW89_MKK][0][92] = 127, [1][1][RTW89_IC][1][92] = -26, - [1][1][RTW89_IC][2][92] = 127, [1][1][RTW89_KCC][1][92] = -14, [1][1][RTW89_KCC][0][92] = 127, [1][1][RTW89_ACMA][1][92] = 127, @@ -56987,13 +54655,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][92] = 127, [1][1][RTW89_THAILAND][0][92] = 127, [1][1][RTW89_FCC][1][94] = -26, - [1][1][RTW89_FCC][2][94] = 127, [1][1][RTW89_ETSI][1][94] = 127, [1][1][RTW89_ETSI][0][94] = 127, [1][1][RTW89_MKK][1][94] = 127, [1][1][RTW89_MKK][0][94] = 127, [1][1][RTW89_IC][1][94] = -26, - [1][1][RTW89_IC][2][94] = 127, [1][1][RTW89_KCC][1][94] = -14, [1][1][RTW89_KCC][0][94] = 127, [1][1][RTW89_ACMA][1][94] = 127, @@ -57006,13 +54672,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][94] = 127, [1][1][RTW89_THAILAND][0][94] = 127, [1][1][RTW89_FCC][1][96] = -26, - [1][1][RTW89_FCC][2][96] = 127, [1][1][RTW89_ETSI][1][96] = 127, [1][1][RTW89_ETSI][0][96] = 127, [1][1][RTW89_MKK][1][96] = 127, [1][1][RTW89_MKK][0][96] = 127, [1][1][RTW89_IC][1][96] = -26, - [1][1][RTW89_IC][2][96] = 127, [1][1][RTW89_KCC][1][96] = -14, [1][1][RTW89_KCC][0][96] = 127, [1][1][RTW89_ACMA][1][96] = 127, @@ -57025,13 +54689,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][96] = 127, [1][1][RTW89_THAILAND][0][96] = 127, [1][1][RTW89_FCC][1][98] = -26, - [1][1][RTW89_FCC][2][98] = 127, [1][1][RTW89_ETSI][1][98] = 127, [1][1][RTW89_ETSI][0][98] = 127, [1][1][RTW89_MKK][1][98] = 127, [1][1][RTW89_MKK][0][98] = 127, [1][1][RTW89_IC][1][98] = -26, - [1][1][RTW89_IC][2][98] = 127, [1][1][RTW89_KCC][1][98] = -14, [1][1][RTW89_KCC][0][98] = 127, [1][1][RTW89_ACMA][1][98] = 127, @@ -57044,13 +54706,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][98] = 127, [1][1][RTW89_THAILAND][0][98] = 127, [1][1][RTW89_FCC][1][100] = -26, - [1][1][RTW89_FCC][2][100] = 127, [1][1][RTW89_ETSI][1][100] = 127, [1][1][RTW89_ETSI][0][100] = 127, [1][1][RTW89_MKK][1][100] = 127, [1][1][RTW89_MKK][0][100] = 127, [1][1][RTW89_IC][1][100] = -26, - [1][1][RTW89_IC][2][100] = 127, [1][1][RTW89_KCC][1][100] = -14, [1][1][RTW89_KCC][0][100] = 127, [1][1][RTW89_ACMA][1][100] = 127, @@ -57063,13 +54723,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][100] = 127, [1][1][RTW89_THAILAND][0][100] = 127, [1][1][RTW89_FCC][1][102] = -26, - [1][1][RTW89_FCC][2][102] = 127, [1][1][RTW89_ETSI][1][102] = 127, [1][1][RTW89_ETSI][0][102] = 127, [1][1][RTW89_MKK][1][102] = 127, [1][1][RTW89_MKK][0][102] = 127, [1][1][RTW89_IC][1][102] = -26, - [1][1][RTW89_IC][2][102] = 127, [1][1][RTW89_KCC][1][102] = -14, [1][1][RTW89_KCC][0][102] = 127, [1][1][RTW89_ACMA][1][102] = 127, @@ -57082,13 +54740,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][102] = 127, [1][1][RTW89_THAILAND][0][102] = 127, [1][1][RTW89_FCC][1][104] = -26, - [1][1][RTW89_FCC][2][104] = 127, [1][1][RTW89_ETSI][1][104] = 127, [1][1][RTW89_ETSI][0][104] = 127, [1][1][RTW89_MKK][1][104] = 127, [1][1][RTW89_MKK][0][104] = 127, [1][1][RTW89_IC][1][104] = -26, - [1][1][RTW89_IC][2][104] = 127, [1][1][RTW89_KCC][1][104] = -14, [1][1][RTW89_KCC][0][104] = 127, [1][1][RTW89_ACMA][1][104] = 127, @@ -57101,13 +54757,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][104] = 127, [1][1][RTW89_THAILAND][0][104] = 127, [1][1][RTW89_FCC][1][105] = -26, - [1][1][RTW89_FCC][2][105] = 127, [1][1][RTW89_ETSI][1][105] = 127, [1][1][RTW89_ETSI][0][105] = 127, [1][1][RTW89_MKK][1][105] = 127, [1][1][RTW89_MKK][0][105] = 127, [1][1][RTW89_IC][1][105] = -26, - [1][1][RTW89_IC][2][105] = 127, [1][1][RTW89_KCC][1][105] = -14, [1][1][RTW89_KCC][0][105] = 127, [1][1][RTW89_ACMA][1][105] = 127, @@ -57120,13 +54774,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][105] = 127, [1][1][RTW89_THAILAND][0][105] = 127, [1][1][RTW89_FCC][1][107] = -22, - [1][1][RTW89_FCC][2][107] = 127, [1][1][RTW89_ETSI][1][107] = 127, [1][1][RTW89_ETSI][0][107] = 127, [1][1][RTW89_MKK][1][107] = 127, [1][1][RTW89_MKK][0][107] = 127, [1][1][RTW89_IC][1][107] = -22, - [1][1][RTW89_IC][2][107] = 127, [1][1][RTW89_KCC][1][107] = -14, [1][1][RTW89_KCC][0][107] = 127, [1][1][RTW89_ACMA][1][107] = 127, @@ -57139,13 +54791,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][107] = 127, [1][1][RTW89_THAILAND][0][107] = 127, [1][1][RTW89_FCC][1][109] = -22, - [1][1][RTW89_FCC][2][109] = 127, [1][1][RTW89_ETSI][1][109] = 127, [1][1][RTW89_ETSI][0][109] = 127, [1][1][RTW89_MKK][1][109] = 127, [1][1][RTW89_MKK][0][109] = 127, [1][1][RTW89_IC][1][109] = -22, - [1][1][RTW89_IC][2][109] = 127, [1][1][RTW89_KCC][1][109] = 127, [1][1][RTW89_KCC][0][109] = 127, [1][1][RTW89_ACMA][1][109] = 127, @@ -57158,13 +54808,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][109] = 127, [1][1][RTW89_THAILAND][0][109] = 127, [1][1][RTW89_FCC][1][111] = 127, - [1][1][RTW89_FCC][2][111] = 127, [1][1][RTW89_ETSI][1][111] = 127, [1][1][RTW89_ETSI][0][111] = 127, [1][1][RTW89_MKK][1][111] = 127, [1][1][RTW89_MKK][0][111] = 127, [1][1][RTW89_IC][1][111] = 127, - [1][1][RTW89_IC][2][111] = 127, [1][1][RTW89_KCC][1][111] = 127, [1][1][RTW89_KCC][0][111] = 127, [1][1][RTW89_ACMA][1][111] = 127, @@ -57177,13 +54825,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][111] = 127, [1][1][RTW89_THAILAND][0][111] = 127, [1][1][RTW89_FCC][1][113] = 127, - [1][1][RTW89_FCC][2][113] = 127, [1][1][RTW89_ETSI][1][113] = 127, [1][1][RTW89_ETSI][0][113] = 127, [1][1][RTW89_MKK][1][113] = 127, [1][1][RTW89_MKK][0][113] = 127, [1][1][RTW89_IC][1][113] = 127, - [1][1][RTW89_IC][2][113] = 127, [1][1][RTW89_KCC][1][113] = 127, [1][1][RTW89_KCC][0][113] = 127, [1][1][RTW89_ACMA][1][113] = 127, @@ -57196,13 +54842,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][113] = 127, [1][1][RTW89_THAILAND][0][113] = 127, [1][1][RTW89_FCC][1][115] = 127, - [1][1][RTW89_FCC][2][115] = 127, [1][1][RTW89_ETSI][1][115] = 127, [1][1][RTW89_ETSI][0][115] = 127, [1][1][RTW89_MKK][1][115] = 127, [1][1][RTW89_MKK][0][115] = 127, [1][1][RTW89_IC][1][115] = 127, - [1][1][RTW89_IC][2][115] = 127, [1][1][RTW89_KCC][1][115] = 127, [1][1][RTW89_KCC][0][115] = 127, [1][1][RTW89_ACMA][1][115] = 127, @@ -57215,13 +54859,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][115] = 127, [1][1][RTW89_THAILAND][0][115] = 127, [1][1][RTW89_FCC][1][117] = 127, - [1][1][RTW89_FCC][2][117] = 127, [1][1][RTW89_ETSI][1][117] = 127, [1][1][RTW89_ETSI][0][117] = 127, [1][1][RTW89_MKK][1][117] = 127, [1][1][RTW89_MKK][0][117] = 127, [1][1][RTW89_IC][1][117] = 127, - [1][1][RTW89_IC][2][117] = 127, [1][1][RTW89_KCC][1][117] = 127, [1][1][RTW89_KCC][0][117] = 127, [1][1][RTW89_ACMA][1][117] = 127, @@ -57234,13 +54876,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][117] = 127, [1][1][RTW89_THAILAND][0][117] = 127, [1][1][RTW89_FCC][1][119] = 127, - [1][1][RTW89_FCC][2][119] = 127, [1][1][RTW89_ETSI][1][119] = 127, [1][1][RTW89_ETSI][0][119] = 127, [1][1][RTW89_MKK][1][119] = 127, [1][1][RTW89_MKK][0][119] = 127, [1][1][RTW89_IC][1][119] = 127, - [1][1][RTW89_IC][2][119] = 127, [1][1][RTW89_KCC][1][119] = 127, [1][1][RTW89_KCC][0][119] = 127, [1][1][RTW89_ACMA][1][119] = 127, @@ -57253,13 +54893,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [1][1][RTW89_THAILAND][1][119] = 127, [1][1][RTW89_THAILAND][0][119] = 127, [2][0][RTW89_FCC][1][0] = 8, - [2][0][RTW89_FCC][2][0] = 60, [2][0][RTW89_ETSI][1][0] = 56, [2][0][RTW89_ETSI][0][0] = 18, [2][0][RTW89_MKK][1][0] = 54, [2][0][RTW89_MKK][0][0] = 14, [2][0][RTW89_IC][1][0] = 8, - [2][0][RTW89_IC][2][0] = 60, [2][0][RTW89_KCC][1][0] = -2, [2][0][RTW89_KCC][0][0] = -2, [2][0][RTW89_ACMA][1][0] = 56, @@ -57272,13 +54910,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][0] = 52, [2][0][RTW89_THAILAND][0][0] = 8, [2][0][RTW89_FCC][1][2] = 8, - [2][0][RTW89_FCC][2][2] = 60, [2][0][RTW89_ETSI][1][2] = 56, [2][0][RTW89_ETSI][0][2] = 18, [2][0][RTW89_MKK][1][2] = 54, [2][0][RTW89_MKK][0][2] = 14, [2][0][RTW89_IC][1][2] = 8, - [2][0][RTW89_IC][2][2] = 60, [2][0][RTW89_KCC][1][2] = -2, [2][0][RTW89_KCC][0][2] = -2, [2][0][RTW89_ACMA][1][2] = 56, @@ -57291,13 +54927,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][2] = 52, [2][0][RTW89_THAILAND][0][2] = 8, [2][0][RTW89_FCC][1][4] = 8, - [2][0][RTW89_FCC][2][4] = 60, [2][0][RTW89_ETSI][1][4] = 56, [2][0][RTW89_ETSI][0][4] = 18, [2][0][RTW89_MKK][1][4] = 54, [2][0][RTW89_MKK][0][4] = 14, [2][0][RTW89_IC][1][4] = 8, - [2][0][RTW89_IC][2][4] = 60, [2][0][RTW89_KCC][1][4] = -2, [2][0][RTW89_KCC][0][4] = -2, [2][0][RTW89_ACMA][1][4] = 56, @@ -57310,13 +54944,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][4] = 52, [2][0][RTW89_THAILAND][0][4] = 8, [2][0][RTW89_FCC][1][6] = 8, - [2][0][RTW89_FCC][2][6] = 60, [2][0][RTW89_ETSI][1][6] = 56, [2][0][RTW89_ETSI][0][6] = 18, [2][0][RTW89_MKK][1][6] = 54, [2][0][RTW89_MKK][0][6] = 14, [2][0][RTW89_IC][1][6] = 8, - [2][0][RTW89_IC][2][6] = 60, [2][0][RTW89_KCC][1][6] = -2, [2][0][RTW89_KCC][0][6] = -2, [2][0][RTW89_ACMA][1][6] = 56, @@ -57329,13 +54961,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][6] = 52, [2][0][RTW89_THAILAND][0][6] = 8, [2][0][RTW89_FCC][1][8] = 8, - [2][0][RTW89_FCC][2][8] = 60, [2][0][RTW89_ETSI][1][8] = 56, [2][0][RTW89_ETSI][0][8] = 18, [2][0][RTW89_MKK][1][8] = 54, [2][0][RTW89_MKK][0][8] = 14, [2][0][RTW89_IC][1][8] = 8, - [2][0][RTW89_IC][2][8] = 60, [2][0][RTW89_KCC][1][8] = -2, [2][0][RTW89_KCC][0][8] = -2, [2][0][RTW89_ACMA][1][8] = 56, @@ -57348,13 +54978,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][8] = 52, [2][0][RTW89_THAILAND][0][8] = 8, [2][0][RTW89_FCC][1][10] = 8, - [2][0][RTW89_FCC][2][10] = 60, [2][0][RTW89_ETSI][1][10] = 56, [2][0][RTW89_ETSI][0][10] = 18, [2][0][RTW89_MKK][1][10] = 54, [2][0][RTW89_MKK][0][10] = 14, [2][0][RTW89_IC][1][10] = 8, - [2][0][RTW89_IC][2][10] = 60, [2][0][RTW89_KCC][1][10] = -2, [2][0][RTW89_KCC][0][10] = -2, [2][0][RTW89_ACMA][1][10] = 56, @@ -57367,13 +54995,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][10] = 52, [2][0][RTW89_THAILAND][0][10] = 8, [2][0][RTW89_FCC][1][12] = 8, - [2][0][RTW89_FCC][2][12] = 60, [2][0][RTW89_ETSI][1][12] = 56, [2][0][RTW89_ETSI][0][12] = 18, [2][0][RTW89_MKK][1][12] = 54, [2][0][RTW89_MKK][0][12] = 14, [2][0][RTW89_IC][1][12] = 8, - [2][0][RTW89_IC][2][12] = 60, [2][0][RTW89_KCC][1][12] = -2, [2][0][RTW89_KCC][0][12] = -2, [2][0][RTW89_ACMA][1][12] = 56, @@ -57386,13 +55012,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][12] = 52, [2][0][RTW89_THAILAND][0][12] = 8, [2][0][RTW89_FCC][1][14] = 8, - [2][0][RTW89_FCC][2][14] = 60, [2][0][RTW89_ETSI][1][14] = 56, [2][0][RTW89_ETSI][0][14] = 18, [2][0][RTW89_MKK][1][14] = 54, [2][0][RTW89_MKK][0][14] = 14, [2][0][RTW89_IC][1][14] = 8, - [2][0][RTW89_IC][2][14] = 60, [2][0][RTW89_KCC][1][14] = -2, [2][0][RTW89_KCC][0][14] = -2, [2][0][RTW89_ACMA][1][14] = 56, @@ -57405,13 +55029,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][14] = 52, [2][0][RTW89_THAILAND][0][14] = 8, [2][0][RTW89_FCC][1][15] = 8, - [2][0][RTW89_FCC][2][15] = 60, [2][0][RTW89_ETSI][1][15] = 56, [2][0][RTW89_ETSI][0][15] = 18, [2][0][RTW89_MKK][1][15] = 54, [2][0][RTW89_MKK][0][15] = 14, [2][0][RTW89_IC][1][15] = 8, - [2][0][RTW89_IC][2][15] = 60, [2][0][RTW89_KCC][1][15] = -2, [2][0][RTW89_KCC][0][15] = -2, [2][0][RTW89_ACMA][1][15] = 56, @@ -57424,13 +55046,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][15] = 52, [2][0][RTW89_THAILAND][0][15] = 8, [2][0][RTW89_FCC][1][17] = 8, - [2][0][RTW89_FCC][2][17] = 60, [2][0][RTW89_ETSI][1][17] = 56, [2][0][RTW89_ETSI][0][17] = 18, [2][0][RTW89_MKK][1][17] = 54, [2][0][RTW89_MKK][0][17] = 14, [2][0][RTW89_IC][1][17] = 8, - [2][0][RTW89_IC][2][17] = 60, [2][0][RTW89_KCC][1][17] = -2, [2][0][RTW89_KCC][0][17] = -2, [2][0][RTW89_ACMA][1][17] = 56, @@ -57443,13 +55063,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][17] = 52, [2][0][RTW89_THAILAND][0][17] = 8, [2][0][RTW89_FCC][1][19] = 8, - [2][0][RTW89_FCC][2][19] = 60, [2][0][RTW89_ETSI][1][19] = 56, [2][0][RTW89_ETSI][0][19] = 18, [2][0][RTW89_MKK][1][19] = 54, [2][0][RTW89_MKK][0][19] = 14, [2][0][RTW89_IC][1][19] = 8, - [2][0][RTW89_IC][2][19] = 60, [2][0][RTW89_KCC][1][19] = -2, [2][0][RTW89_KCC][0][19] = -2, [2][0][RTW89_ACMA][1][19] = 56, @@ -57462,13 +55080,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][19] = 52, [2][0][RTW89_THAILAND][0][19] = 8, [2][0][RTW89_FCC][1][21] = 8, - [2][0][RTW89_FCC][2][21] = 60, [2][0][RTW89_ETSI][1][21] = 56, [2][0][RTW89_ETSI][0][21] = 18, [2][0][RTW89_MKK][1][21] = 54, [2][0][RTW89_MKK][0][21] = 14, [2][0][RTW89_IC][1][21] = 8, - [2][0][RTW89_IC][2][21] = 60, [2][0][RTW89_KCC][1][21] = -2, [2][0][RTW89_KCC][0][21] = -2, [2][0][RTW89_ACMA][1][21] = 56, @@ -57481,13 +55097,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][21] = 52, [2][0][RTW89_THAILAND][0][21] = 8, [2][0][RTW89_FCC][1][23] = 8, - [2][0][RTW89_FCC][2][23] = 70, [2][0][RTW89_ETSI][1][23] = 56, [2][0][RTW89_ETSI][0][23] = 18, [2][0][RTW89_MKK][1][23] = 56, [2][0][RTW89_MKK][0][23] = 14, [2][0][RTW89_IC][1][23] = 8, - [2][0][RTW89_IC][2][23] = 70, [2][0][RTW89_KCC][1][23] = -2, [2][0][RTW89_KCC][0][23] = -2, [2][0][RTW89_ACMA][1][23] = 56, @@ -57500,13 +55114,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][23] = 52, [2][0][RTW89_THAILAND][0][23] = 8, [2][0][RTW89_FCC][1][25] = 8, - [2][0][RTW89_FCC][2][25] = 70, [2][0][RTW89_ETSI][1][25] = 56, [2][0][RTW89_ETSI][0][25] = 18, [2][0][RTW89_MKK][1][25] = 56, [2][0][RTW89_MKK][0][25] = 14, [2][0][RTW89_IC][1][25] = 8, - [2][0][RTW89_IC][2][25] = 70, [2][0][RTW89_KCC][1][25] = -2, [2][0][RTW89_KCC][0][25] = -2, [2][0][RTW89_ACMA][1][25] = 56, @@ -57519,13 +55131,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][25] = 52, [2][0][RTW89_THAILAND][0][25] = 8, [2][0][RTW89_FCC][1][27] = 8, - [2][0][RTW89_FCC][2][27] = 70, [2][0][RTW89_ETSI][1][27] = 56, [2][0][RTW89_ETSI][0][27] = 18, [2][0][RTW89_MKK][1][27] = 56, [2][0][RTW89_MKK][0][27] = 14, [2][0][RTW89_IC][1][27] = 8, - [2][0][RTW89_IC][2][27] = 70, [2][0][RTW89_KCC][1][27] = -2, [2][0][RTW89_KCC][0][27] = -2, [2][0][RTW89_ACMA][1][27] = 56, @@ -57538,13 +55148,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][27] = 52, [2][0][RTW89_THAILAND][0][27] = 8, [2][0][RTW89_FCC][1][29] = 8, - [2][0][RTW89_FCC][2][29] = 70, [2][0][RTW89_ETSI][1][29] = 56, [2][0][RTW89_ETSI][0][29] = 18, [2][0][RTW89_MKK][1][29] = 56, [2][0][RTW89_MKK][0][29] = 14, [2][0][RTW89_IC][1][29] = 8, - [2][0][RTW89_IC][2][29] = 70, [2][0][RTW89_KCC][1][29] = -2, [2][0][RTW89_KCC][0][29] = -2, [2][0][RTW89_ACMA][1][29] = 56, @@ -57557,13 +55165,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][29] = 52, [2][0][RTW89_THAILAND][0][29] = 8, [2][0][RTW89_FCC][1][30] = 8, - [2][0][RTW89_FCC][2][30] = 70, [2][0][RTW89_ETSI][1][30] = 56, [2][0][RTW89_ETSI][0][30] = 18, [2][0][RTW89_MKK][1][30] = 56, [2][0][RTW89_MKK][0][30] = 14, [2][0][RTW89_IC][1][30] = 8, - [2][0][RTW89_IC][2][30] = 70, [2][0][RTW89_KCC][1][30] = -2, [2][0][RTW89_KCC][0][30] = -2, [2][0][RTW89_ACMA][1][30] = 56, @@ -57576,13 +55182,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][30] = 52, [2][0][RTW89_THAILAND][0][30] = 8, [2][0][RTW89_FCC][1][32] = 8, - [2][0][RTW89_FCC][2][32] = 70, [2][0][RTW89_ETSI][1][32] = 56, [2][0][RTW89_ETSI][0][32] = 18, [2][0][RTW89_MKK][1][32] = 56, [2][0][RTW89_MKK][0][32] = 14, [2][0][RTW89_IC][1][32] = 8, - [2][0][RTW89_IC][2][32] = 70, [2][0][RTW89_KCC][1][32] = -2, [2][0][RTW89_KCC][0][32] = -2, [2][0][RTW89_ACMA][1][32] = 56, @@ -57595,13 +55199,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][32] = 52, [2][0][RTW89_THAILAND][0][32] = 8, [2][0][RTW89_FCC][1][34] = 8, - [2][0][RTW89_FCC][2][34] = 70, [2][0][RTW89_ETSI][1][34] = 56, [2][0][RTW89_ETSI][0][34] = 18, [2][0][RTW89_MKK][1][34] = 56, [2][0][RTW89_MKK][0][34] = 14, [2][0][RTW89_IC][1][34] = 8, - [2][0][RTW89_IC][2][34] = 70, [2][0][RTW89_KCC][1][34] = -2, [2][0][RTW89_KCC][0][34] = -2, [2][0][RTW89_ACMA][1][34] = 56, @@ -57614,13 +55216,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][34] = 52, [2][0][RTW89_THAILAND][0][34] = 8, [2][0][RTW89_FCC][1][36] = 8, - [2][0][RTW89_FCC][2][36] = 70, [2][0][RTW89_ETSI][1][36] = 56, [2][0][RTW89_ETSI][0][36] = 18, [2][0][RTW89_MKK][1][36] = 56, [2][0][RTW89_MKK][0][36] = 14, [2][0][RTW89_IC][1][36] = 8, - [2][0][RTW89_IC][2][36] = 70, [2][0][RTW89_KCC][1][36] = -2, [2][0][RTW89_KCC][0][36] = -2, [2][0][RTW89_ACMA][1][36] = 56, @@ -57633,13 +55233,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][36] = 52, [2][0][RTW89_THAILAND][0][36] = 8, [2][0][RTW89_FCC][1][38] = 8, - [2][0][RTW89_FCC][2][38] = 70, [2][0][RTW89_ETSI][1][38] = 56, [2][0][RTW89_ETSI][0][38] = 18, [2][0][RTW89_MKK][1][38] = 56, [2][0][RTW89_MKK][0][38] = 14, [2][0][RTW89_IC][1][38] = 8, - [2][0][RTW89_IC][2][38] = 70, [2][0][RTW89_KCC][1][38] = -2, [2][0][RTW89_KCC][0][38] = -2, [2][0][RTW89_ACMA][1][38] = 56, @@ -57652,13 +55250,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][38] = 52, [2][0][RTW89_THAILAND][0][38] = 8, [2][0][RTW89_FCC][1][40] = 8, - [2][0][RTW89_FCC][2][40] = 70, [2][0][RTW89_ETSI][1][40] = 56, [2][0][RTW89_ETSI][0][40] = 18, [2][0][RTW89_MKK][1][40] = 56, [2][0][RTW89_MKK][0][40] = 14, [2][0][RTW89_IC][1][40] = 8, - [2][0][RTW89_IC][2][40] = 70, [2][0][RTW89_KCC][1][40] = -2, [2][0][RTW89_KCC][0][40] = -2, [2][0][RTW89_ACMA][1][40] = 56, @@ -57671,13 +55267,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][40] = 52, [2][0][RTW89_THAILAND][0][40] = 8, [2][0][RTW89_FCC][1][42] = 8, - [2][0][RTW89_FCC][2][42] = 70, [2][0][RTW89_ETSI][1][42] = 56, [2][0][RTW89_ETSI][0][42] = 18, [2][0][RTW89_MKK][1][42] = 56, [2][0][RTW89_MKK][0][42] = 14, [2][0][RTW89_IC][1][42] = 8, - [2][0][RTW89_IC][2][42] = 70, [2][0][RTW89_KCC][1][42] = -2, [2][0][RTW89_KCC][0][42] = -2, [2][0][RTW89_ACMA][1][42] = 56, @@ -57690,13 +55284,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][42] = 52, [2][0][RTW89_THAILAND][0][42] = 8, [2][0][RTW89_FCC][1][44] = 8, - [2][0][RTW89_FCC][2][44] = 70, [2][0][RTW89_ETSI][1][44] = 56, [2][0][RTW89_ETSI][0][44] = 18, [2][0][RTW89_MKK][1][44] = 32, [2][0][RTW89_MKK][0][44] = 14, [2][0][RTW89_IC][1][44] = 8, - [2][0][RTW89_IC][2][44] = 70, [2][0][RTW89_KCC][1][44] = -2, [2][0][RTW89_KCC][0][44] = -2, [2][0][RTW89_ACMA][1][44] = 56, @@ -57709,13 +55301,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][44] = 52, [2][0][RTW89_THAILAND][0][44] = 8, [2][0][RTW89_FCC][1][45] = 8, - [2][0][RTW89_FCC][2][45] = 127, [2][0][RTW89_ETSI][1][45] = 127, [2][0][RTW89_ETSI][0][45] = 127, [2][0][RTW89_MKK][1][45] = 127, [2][0][RTW89_MKK][0][45] = 127, [2][0][RTW89_IC][1][45] = 8, - [2][0][RTW89_IC][2][45] = 70, [2][0][RTW89_KCC][1][45] = -2, [2][0][RTW89_KCC][0][45] = 127, [2][0][RTW89_ACMA][1][45] = 127, @@ -57728,13 +55318,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][45] = 127, [2][0][RTW89_THAILAND][0][45] = 127, [2][0][RTW89_FCC][1][47] = 8, - [2][0][RTW89_FCC][2][47] = 127, [2][0][RTW89_ETSI][1][47] = 127, [2][0][RTW89_ETSI][0][47] = 127, [2][0][RTW89_MKK][1][47] = 127, [2][0][RTW89_MKK][0][47] = 127, [2][0][RTW89_IC][1][47] = 8, - [2][0][RTW89_IC][2][47] = 70, [2][0][RTW89_KCC][1][47] = -2, [2][0][RTW89_KCC][0][47] = 127, [2][0][RTW89_ACMA][1][47] = 127, @@ -57747,13 +55335,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][47] = 127, [2][0][RTW89_THAILAND][0][47] = 127, [2][0][RTW89_FCC][1][49] = 8, - [2][0][RTW89_FCC][2][49] = 127, [2][0][RTW89_ETSI][1][49] = 127, [2][0][RTW89_ETSI][0][49] = 127, [2][0][RTW89_MKK][1][49] = 127, [2][0][RTW89_MKK][0][49] = 127, [2][0][RTW89_IC][1][49] = 8, - [2][0][RTW89_IC][2][49] = 70, [2][0][RTW89_KCC][1][49] = -2, [2][0][RTW89_KCC][0][49] = 127, [2][0][RTW89_ACMA][1][49] = 127, @@ -57766,13 +55352,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][49] = 127, [2][0][RTW89_THAILAND][0][49] = 127, [2][0][RTW89_FCC][1][51] = 8, - [2][0][RTW89_FCC][2][51] = 127, [2][0][RTW89_ETSI][1][51] = 127, [2][0][RTW89_ETSI][0][51] = 127, [2][0][RTW89_MKK][1][51] = 127, [2][0][RTW89_MKK][0][51] = 127, [2][0][RTW89_IC][1][51] = 8, - [2][0][RTW89_IC][2][51] = 70, [2][0][RTW89_KCC][1][51] = -2, [2][0][RTW89_KCC][0][51] = 127, [2][0][RTW89_ACMA][1][51] = 127, @@ -57785,13 +55369,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][51] = 127, [2][0][RTW89_THAILAND][0][51] = 127, [2][0][RTW89_FCC][1][53] = 8, - [2][0][RTW89_FCC][2][53] = 127, [2][0][RTW89_ETSI][1][53] = 127, [2][0][RTW89_ETSI][0][53] = 127, [2][0][RTW89_MKK][1][53] = 127, [2][0][RTW89_MKK][0][53] = 127, [2][0][RTW89_IC][1][53] = 8, - [2][0][RTW89_IC][2][53] = 70, [2][0][RTW89_KCC][1][53] = -2, [2][0][RTW89_KCC][0][53] = 127, [2][0][RTW89_ACMA][1][53] = 127, @@ -57804,13 +55386,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][53] = 127, [2][0][RTW89_THAILAND][0][53] = 127, [2][0][RTW89_FCC][1][55] = 8, - [2][0][RTW89_FCC][2][55] = 68, [2][0][RTW89_ETSI][1][55] = 127, [2][0][RTW89_ETSI][0][55] = 127, [2][0][RTW89_MKK][1][55] = 127, [2][0][RTW89_MKK][0][55] = 127, [2][0][RTW89_IC][1][55] = 8, - [2][0][RTW89_IC][2][55] = 68, [2][0][RTW89_KCC][1][55] = -2, [2][0][RTW89_KCC][0][55] = 127, [2][0][RTW89_ACMA][1][55] = 127, @@ -57823,13 +55403,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][55] = 127, [2][0][RTW89_THAILAND][0][55] = 127, [2][0][RTW89_FCC][1][57] = 8, - [2][0][RTW89_FCC][2][57] = 68, [2][0][RTW89_ETSI][1][57] = 127, [2][0][RTW89_ETSI][0][57] = 127, [2][0][RTW89_MKK][1][57] = 127, [2][0][RTW89_MKK][0][57] = 127, [2][0][RTW89_IC][1][57] = 8, - [2][0][RTW89_IC][2][57] = 68, [2][0][RTW89_KCC][1][57] = -2, [2][0][RTW89_KCC][0][57] = 127, [2][0][RTW89_ACMA][1][57] = 127, @@ -57842,13 +55420,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][57] = 127, [2][0][RTW89_THAILAND][0][57] = 127, [2][0][RTW89_FCC][1][59] = 8, - [2][0][RTW89_FCC][2][59] = 68, [2][0][RTW89_ETSI][1][59] = 127, [2][0][RTW89_ETSI][0][59] = 127, [2][0][RTW89_MKK][1][59] = 127, [2][0][RTW89_MKK][0][59] = 127, [2][0][RTW89_IC][1][59] = 8, - [2][0][RTW89_IC][2][59] = 68, [2][0][RTW89_KCC][1][59] = -2, [2][0][RTW89_KCC][0][59] = 127, [2][0][RTW89_ACMA][1][59] = 127, @@ -57861,13 +55437,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][59] = 127, [2][0][RTW89_THAILAND][0][59] = 127, [2][0][RTW89_FCC][1][60] = 8, - [2][0][RTW89_FCC][2][60] = 68, [2][0][RTW89_ETSI][1][60] = 127, [2][0][RTW89_ETSI][0][60] = 127, [2][0][RTW89_MKK][1][60] = 127, [2][0][RTW89_MKK][0][60] = 127, [2][0][RTW89_IC][1][60] = 8, - [2][0][RTW89_IC][2][60] = 68, [2][0][RTW89_KCC][1][60] = -2, [2][0][RTW89_KCC][0][60] = 127, [2][0][RTW89_ACMA][1][60] = 127, @@ -57880,13 +55454,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][60] = 127, [2][0][RTW89_THAILAND][0][60] = 127, [2][0][RTW89_FCC][1][62] = 8, - [2][0][RTW89_FCC][2][62] = 68, [2][0][RTW89_ETSI][1][62] = 127, [2][0][RTW89_ETSI][0][62] = 127, [2][0][RTW89_MKK][1][62] = 127, [2][0][RTW89_MKK][0][62] = 127, [2][0][RTW89_IC][1][62] = 8, - [2][0][RTW89_IC][2][62] = 68, [2][0][RTW89_KCC][1][62] = -2, [2][0][RTW89_KCC][0][62] = 127, [2][0][RTW89_ACMA][1][62] = 127, @@ -57899,13 +55471,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][62] = 127, [2][0][RTW89_THAILAND][0][62] = 127, [2][0][RTW89_FCC][1][64] = 8, - [2][0][RTW89_FCC][2][64] = 68, [2][0][RTW89_ETSI][1][64] = 127, [2][0][RTW89_ETSI][0][64] = 127, [2][0][RTW89_MKK][1][64] = 127, [2][0][RTW89_MKK][0][64] = 127, [2][0][RTW89_IC][1][64] = 8, - [2][0][RTW89_IC][2][64] = 68, [2][0][RTW89_KCC][1][64] = -2, [2][0][RTW89_KCC][0][64] = 127, [2][0][RTW89_ACMA][1][64] = 127, @@ -57918,13 +55488,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][64] = 127, [2][0][RTW89_THAILAND][0][64] = 127, [2][0][RTW89_FCC][1][66] = 8, - [2][0][RTW89_FCC][2][66] = 68, [2][0][RTW89_ETSI][1][66] = 127, [2][0][RTW89_ETSI][0][66] = 127, [2][0][RTW89_MKK][1][66] = 127, [2][0][RTW89_MKK][0][66] = 127, [2][0][RTW89_IC][1][66] = 8, - [2][0][RTW89_IC][2][66] = 68, [2][0][RTW89_KCC][1][66] = -2, [2][0][RTW89_KCC][0][66] = 127, [2][0][RTW89_ACMA][1][66] = 127, @@ -57937,13 +55505,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][66] = 127, [2][0][RTW89_THAILAND][0][66] = 127, [2][0][RTW89_FCC][1][68] = 8, - [2][0][RTW89_FCC][2][68] = 68, [2][0][RTW89_ETSI][1][68] = 127, [2][0][RTW89_ETSI][0][68] = 127, [2][0][RTW89_MKK][1][68] = 127, [2][0][RTW89_MKK][0][68] = 127, [2][0][RTW89_IC][1][68] = 8, - [2][0][RTW89_IC][2][68] = 68, [2][0][RTW89_KCC][1][68] = -2, [2][0][RTW89_KCC][0][68] = 127, [2][0][RTW89_ACMA][1][68] = 127, @@ -57956,13 +55522,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][68] = 127, [2][0][RTW89_THAILAND][0][68] = 127, [2][0][RTW89_FCC][1][70] = 8, - [2][0][RTW89_FCC][2][70] = 68, [2][0][RTW89_ETSI][1][70] = 127, [2][0][RTW89_ETSI][0][70] = 127, [2][0][RTW89_MKK][1][70] = 127, [2][0][RTW89_MKK][0][70] = 127, [2][0][RTW89_IC][1][70] = 8, - [2][0][RTW89_IC][2][70] = 68, [2][0][RTW89_KCC][1][70] = -2, [2][0][RTW89_KCC][0][70] = 127, [2][0][RTW89_ACMA][1][70] = 127, @@ -57975,13 +55539,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][70] = 127, [2][0][RTW89_THAILAND][0][70] = 127, [2][0][RTW89_FCC][1][72] = 8, - [2][0][RTW89_FCC][2][72] = 68, [2][0][RTW89_ETSI][1][72] = 127, [2][0][RTW89_ETSI][0][72] = 127, [2][0][RTW89_MKK][1][72] = 127, [2][0][RTW89_MKK][0][72] = 127, [2][0][RTW89_IC][1][72] = 8, - [2][0][RTW89_IC][2][72] = 68, [2][0][RTW89_KCC][1][72] = -2, [2][0][RTW89_KCC][0][72] = 127, [2][0][RTW89_ACMA][1][72] = 127, @@ -57994,13 +55556,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][72] = 127, [2][0][RTW89_THAILAND][0][72] = 127, [2][0][RTW89_FCC][1][74] = 8, - [2][0][RTW89_FCC][2][74] = 68, [2][0][RTW89_ETSI][1][74] = 127, [2][0][RTW89_ETSI][0][74] = 127, [2][0][RTW89_MKK][1][74] = 127, [2][0][RTW89_MKK][0][74] = 127, [2][0][RTW89_IC][1][74] = 8, - [2][0][RTW89_IC][2][74] = 68, [2][0][RTW89_KCC][1][74] = -2, [2][0][RTW89_KCC][0][74] = 127, [2][0][RTW89_ACMA][1][74] = 127, @@ -58013,13 +55573,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][74] = 127, [2][0][RTW89_THAILAND][0][74] = 127, [2][0][RTW89_FCC][1][75] = 8, - [2][0][RTW89_FCC][2][75] = 68, [2][0][RTW89_ETSI][1][75] = 127, [2][0][RTW89_ETSI][0][75] = 127, [2][0][RTW89_MKK][1][75] = 127, [2][0][RTW89_MKK][0][75] = 127, [2][0][RTW89_IC][1][75] = 8, - [2][0][RTW89_IC][2][75] = 68, [2][0][RTW89_KCC][1][75] = -2, [2][0][RTW89_KCC][0][75] = 127, [2][0][RTW89_ACMA][1][75] = 127, @@ -58032,13 +55590,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][75] = 127, [2][0][RTW89_THAILAND][0][75] = 127, [2][0][RTW89_FCC][1][77] = 8, - [2][0][RTW89_FCC][2][77] = 68, [2][0][RTW89_ETSI][1][77] = 127, [2][0][RTW89_ETSI][0][77] = 127, [2][0][RTW89_MKK][1][77] = 127, [2][0][RTW89_MKK][0][77] = 127, [2][0][RTW89_IC][1][77] = 8, - [2][0][RTW89_IC][2][77] = 68, [2][0][RTW89_KCC][1][77] = -2, [2][0][RTW89_KCC][0][77] = 127, [2][0][RTW89_ACMA][1][77] = 127, @@ -58051,13 +55607,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][77] = 127, [2][0][RTW89_THAILAND][0][77] = 127, [2][0][RTW89_FCC][1][79] = 8, - [2][0][RTW89_FCC][2][79] = 68, [2][0][RTW89_ETSI][1][79] = 127, [2][0][RTW89_ETSI][0][79] = 127, [2][0][RTW89_MKK][1][79] = 127, [2][0][RTW89_MKK][0][79] = 127, [2][0][RTW89_IC][1][79] = 8, - [2][0][RTW89_IC][2][79] = 68, [2][0][RTW89_KCC][1][79] = -2, [2][0][RTW89_KCC][0][79] = 127, [2][0][RTW89_ACMA][1][79] = 127, @@ -58070,13 +55624,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][79] = 127, [2][0][RTW89_THAILAND][0][79] = 127, [2][0][RTW89_FCC][1][81] = 8, - [2][0][RTW89_FCC][2][81] = 68, [2][0][RTW89_ETSI][1][81] = 127, [2][0][RTW89_ETSI][0][81] = 127, [2][0][RTW89_MKK][1][81] = 127, [2][0][RTW89_MKK][0][81] = 127, [2][0][RTW89_IC][1][81] = 8, - [2][0][RTW89_IC][2][81] = 68, [2][0][RTW89_KCC][1][81] = -2, [2][0][RTW89_KCC][0][81] = 127, [2][0][RTW89_ACMA][1][81] = 127, @@ -58089,13 +55641,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][81] = 127, [2][0][RTW89_THAILAND][0][81] = 127, [2][0][RTW89_FCC][1][83] = 8, - [2][0][RTW89_FCC][2][83] = 68, [2][0][RTW89_ETSI][1][83] = 127, [2][0][RTW89_ETSI][0][83] = 127, [2][0][RTW89_MKK][1][83] = 127, [2][0][RTW89_MKK][0][83] = 127, [2][0][RTW89_IC][1][83] = 8, - [2][0][RTW89_IC][2][83] = 68, [2][0][RTW89_KCC][1][83] = -2, [2][0][RTW89_KCC][0][83] = 127, [2][0][RTW89_ACMA][1][83] = 127, @@ -58108,13 +55658,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][83] = 127, [2][0][RTW89_THAILAND][0][83] = 127, [2][0][RTW89_FCC][1][85] = 8, - [2][0][RTW89_FCC][2][85] = 68, [2][0][RTW89_ETSI][1][85] = 127, [2][0][RTW89_ETSI][0][85] = 127, [2][0][RTW89_MKK][1][85] = 127, [2][0][RTW89_MKK][0][85] = 127, [2][0][RTW89_IC][1][85] = 8, - [2][0][RTW89_IC][2][85] = 68, [2][0][RTW89_KCC][1][85] = -2, [2][0][RTW89_KCC][0][85] = 127, [2][0][RTW89_ACMA][1][85] = 127, @@ -58127,13 +55675,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][85] = 127, [2][0][RTW89_THAILAND][0][85] = 127, [2][0][RTW89_FCC][1][87] = 8, - [2][0][RTW89_FCC][2][87] = 127, [2][0][RTW89_ETSI][1][87] = 127, [2][0][RTW89_ETSI][0][87] = 127, [2][0][RTW89_MKK][1][87] = 127, [2][0][RTW89_MKK][0][87] = 127, [2][0][RTW89_IC][1][87] = 8, - [2][0][RTW89_IC][2][87] = 127, [2][0][RTW89_KCC][1][87] = -2, [2][0][RTW89_KCC][0][87] = 127, [2][0][RTW89_ACMA][1][87] = 127, @@ -58146,13 +55692,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][87] = 127, [2][0][RTW89_THAILAND][0][87] = 127, [2][0][RTW89_FCC][1][89] = 8, - [2][0][RTW89_FCC][2][89] = 127, [2][0][RTW89_ETSI][1][89] = 127, [2][0][RTW89_ETSI][0][89] = 127, [2][0][RTW89_MKK][1][89] = 127, [2][0][RTW89_MKK][0][89] = 127, [2][0][RTW89_IC][1][89] = 8, - [2][0][RTW89_IC][2][89] = 127, [2][0][RTW89_KCC][1][89] = -2, [2][0][RTW89_KCC][0][89] = 127, [2][0][RTW89_ACMA][1][89] = 127, @@ -58165,13 +55709,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][89] = 127, [2][0][RTW89_THAILAND][0][89] = 127, [2][0][RTW89_FCC][1][90] = 8, - [2][0][RTW89_FCC][2][90] = 127, [2][0][RTW89_ETSI][1][90] = 127, [2][0][RTW89_ETSI][0][90] = 127, [2][0][RTW89_MKK][1][90] = 127, [2][0][RTW89_MKK][0][90] = 127, [2][0][RTW89_IC][1][90] = 8, - [2][0][RTW89_IC][2][90] = 127, [2][0][RTW89_KCC][1][90] = -2, [2][0][RTW89_KCC][0][90] = 127, [2][0][RTW89_ACMA][1][90] = 127, @@ -58184,13 +55726,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][90] = 127, [2][0][RTW89_THAILAND][0][90] = 127, [2][0][RTW89_FCC][1][92] = 8, - [2][0][RTW89_FCC][2][92] = 127, [2][0][RTW89_ETSI][1][92] = 127, [2][0][RTW89_ETSI][0][92] = 127, [2][0][RTW89_MKK][1][92] = 127, [2][0][RTW89_MKK][0][92] = 127, [2][0][RTW89_IC][1][92] = 8, - [2][0][RTW89_IC][2][92] = 127, [2][0][RTW89_KCC][1][92] = -2, [2][0][RTW89_KCC][0][92] = 127, [2][0][RTW89_ACMA][1][92] = 127, @@ -58203,13 +55743,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][92] = 127, [2][0][RTW89_THAILAND][0][92] = 127, [2][0][RTW89_FCC][1][94] = 8, - [2][0][RTW89_FCC][2][94] = 127, [2][0][RTW89_ETSI][1][94] = 127, [2][0][RTW89_ETSI][0][94] = 127, [2][0][RTW89_MKK][1][94] = 127, [2][0][RTW89_MKK][0][94] = 127, [2][0][RTW89_IC][1][94] = 8, - [2][0][RTW89_IC][2][94] = 127, [2][0][RTW89_KCC][1][94] = -2, [2][0][RTW89_KCC][0][94] = 127, [2][0][RTW89_ACMA][1][94] = 127, @@ -58222,13 +55760,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][94] = 127, [2][0][RTW89_THAILAND][0][94] = 127, [2][0][RTW89_FCC][1][96] = 8, - [2][0][RTW89_FCC][2][96] = 127, [2][0][RTW89_ETSI][1][96] = 127, [2][0][RTW89_ETSI][0][96] = 127, [2][0][RTW89_MKK][1][96] = 127, [2][0][RTW89_MKK][0][96] = 127, [2][0][RTW89_IC][1][96] = 8, - [2][0][RTW89_IC][2][96] = 127, [2][0][RTW89_KCC][1][96] = -2, [2][0][RTW89_KCC][0][96] = 127, [2][0][RTW89_ACMA][1][96] = 127, @@ -58241,13 +55777,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][96] = 127, [2][0][RTW89_THAILAND][0][96] = 127, [2][0][RTW89_FCC][1][98] = 8, - [2][0][RTW89_FCC][2][98] = 127, [2][0][RTW89_ETSI][1][98] = 127, [2][0][RTW89_ETSI][0][98] = 127, [2][0][RTW89_MKK][1][98] = 127, [2][0][RTW89_MKK][0][98] = 127, [2][0][RTW89_IC][1][98] = 8, - [2][0][RTW89_IC][2][98] = 127, [2][0][RTW89_KCC][1][98] = -2, [2][0][RTW89_KCC][0][98] = 127, [2][0][RTW89_ACMA][1][98] = 127, @@ -58260,13 +55794,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][98] = 127, [2][0][RTW89_THAILAND][0][98] = 127, [2][0][RTW89_FCC][1][100] = 8, - [2][0][RTW89_FCC][2][100] = 127, [2][0][RTW89_ETSI][1][100] = 127, [2][0][RTW89_ETSI][0][100] = 127, [2][0][RTW89_MKK][1][100] = 127, [2][0][RTW89_MKK][0][100] = 127, [2][0][RTW89_IC][1][100] = 8, - [2][0][RTW89_IC][2][100] = 127, [2][0][RTW89_KCC][1][100] = -2, [2][0][RTW89_KCC][0][100] = 127, [2][0][RTW89_ACMA][1][100] = 127, @@ -58279,13 +55811,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][100] = 127, [2][0][RTW89_THAILAND][0][100] = 127, [2][0][RTW89_FCC][1][102] = 8, - [2][0][RTW89_FCC][2][102] = 127, [2][0][RTW89_ETSI][1][102] = 127, [2][0][RTW89_ETSI][0][102] = 127, [2][0][RTW89_MKK][1][102] = 127, [2][0][RTW89_MKK][0][102] = 127, [2][0][RTW89_IC][1][102] = 8, - [2][0][RTW89_IC][2][102] = 127, [2][0][RTW89_KCC][1][102] = -2, [2][0][RTW89_KCC][0][102] = 127, [2][0][RTW89_ACMA][1][102] = 127, @@ -58298,13 +55828,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][102] = 127, [2][0][RTW89_THAILAND][0][102] = 127, [2][0][RTW89_FCC][1][104] = 8, - [2][0][RTW89_FCC][2][104] = 127, [2][0][RTW89_ETSI][1][104] = 127, [2][0][RTW89_ETSI][0][104] = 127, [2][0][RTW89_MKK][1][104] = 127, [2][0][RTW89_MKK][0][104] = 127, [2][0][RTW89_IC][1][104] = 8, - [2][0][RTW89_IC][2][104] = 127, [2][0][RTW89_KCC][1][104] = -2, [2][0][RTW89_KCC][0][104] = 127, [2][0][RTW89_ACMA][1][104] = 127, @@ -58317,13 +55845,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][104] = 127, [2][0][RTW89_THAILAND][0][104] = 127, [2][0][RTW89_FCC][1][105] = 8, - [2][0][RTW89_FCC][2][105] = 127, [2][0][RTW89_ETSI][1][105] = 127, [2][0][RTW89_ETSI][0][105] = 127, [2][0][RTW89_MKK][1][105] = 127, [2][0][RTW89_MKK][0][105] = 127, [2][0][RTW89_IC][1][105] = 8, - [2][0][RTW89_IC][2][105] = 127, [2][0][RTW89_KCC][1][105] = -2, [2][0][RTW89_KCC][0][105] = 127, [2][0][RTW89_ACMA][1][105] = 127, @@ -58336,13 +55862,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][105] = 127, [2][0][RTW89_THAILAND][0][105] = 127, [2][0][RTW89_FCC][1][107] = 10, - [2][0][RTW89_FCC][2][107] = 127, [2][0][RTW89_ETSI][1][107] = 127, [2][0][RTW89_ETSI][0][107] = 127, [2][0][RTW89_MKK][1][107] = 127, [2][0][RTW89_MKK][0][107] = 127, [2][0][RTW89_IC][1][107] = 10, - [2][0][RTW89_IC][2][107] = 127, [2][0][RTW89_KCC][1][107] = -2, [2][0][RTW89_KCC][0][107] = 127, [2][0][RTW89_ACMA][1][107] = 127, @@ -58355,13 +55879,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][107] = 127, [2][0][RTW89_THAILAND][0][107] = 127, [2][0][RTW89_FCC][1][109] = 12, - [2][0][RTW89_FCC][2][109] = 127, [2][0][RTW89_ETSI][1][109] = 127, [2][0][RTW89_ETSI][0][109] = 127, [2][0][RTW89_MKK][1][109] = 127, [2][0][RTW89_MKK][0][109] = 127, [2][0][RTW89_IC][1][109] = 12, - [2][0][RTW89_IC][2][109] = 127, [2][0][RTW89_KCC][1][109] = 127, [2][0][RTW89_KCC][0][109] = 127, [2][0][RTW89_ACMA][1][109] = 127, @@ -58374,13 +55896,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][109] = 127, [2][0][RTW89_THAILAND][0][109] = 127, [2][0][RTW89_FCC][1][111] = 127, - [2][0][RTW89_FCC][2][111] = 127, [2][0][RTW89_ETSI][1][111] = 127, [2][0][RTW89_ETSI][0][111] = 127, [2][0][RTW89_MKK][1][111] = 127, [2][0][RTW89_MKK][0][111] = 127, [2][0][RTW89_IC][1][111] = 127, - [2][0][RTW89_IC][2][111] = 127, [2][0][RTW89_KCC][1][111] = 127, [2][0][RTW89_KCC][0][111] = 127, [2][0][RTW89_ACMA][1][111] = 127, @@ -58393,13 +55913,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][111] = 127, [2][0][RTW89_THAILAND][0][111] = 127, [2][0][RTW89_FCC][1][113] = 127, - [2][0][RTW89_FCC][2][113] = 127, [2][0][RTW89_ETSI][1][113] = 127, [2][0][RTW89_ETSI][0][113] = 127, [2][0][RTW89_MKK][1][113] = 127, [2][0][RTW89_MKK][0][113] = 127, [2][0][RTW89_IC][1][113] = 127, - [2][0][RTW89_IC][2][113] = 127, [2][0][RTW89_KCC][1][113] = 127, [2][0][RTW89_KCC][0][113] = 127, [2][0][RTW89_ACMA][1][113] = 127, @@ -58412,13 +55930,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][113] = 127, [2][0][RTW89_THAILAND][0][113] = 127, [2][0][RTW89_FCC][1][115] = 127, - [2][0][RTW89_FCC][2][115] = 127, [2][0][RTW89_ETSI][1][115] = 127, [2][0][RTW89_ETSI][0][115] = 127, [2][0][RTW89_MKK][1][115] = 127, [2][0][RTW89_MKK][0][115] = 127, [2][0][RTW89_IC][1][115] = 127, - [2][0][RTW89_IC][2][115] = 127, [2][0][RTW89_KCC][1][115] = 127, [2][0][RTW89_KCC][0][115] = 127, [2][0][RTW89_ACMA][1][115] = 127, @@ -58431,13 +55947,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][115] = 127, [2][0][RTW89_THAILAND][0][115] = 127, [2][0][RTW89_FCC][1][117] = 127, - [2][0][RTW89_FCC][2][117] = 127, [2][0][RTW89_ETSI][1][117] = 127, [2][0][RTW89_ETSI][0][117] = 127, [2][0][RTW89_MKK][1][117] = 127, [2][0][RTW89_MKK][0][117] = 127, [2][0][RTW89_IC][1][117] = 127, - [2][0][RTW89_IC][2][117] = 127, [2][0][RTW89_KCC][1][117] = 127, [2][0][RTW89_KCC][0][117] = 127, [2][0][RTW89_ACMA][1][117] = 127, @@ -58450,13 +55964,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][117] = 127, [2][0][RTW89_THAILAND][0][117] = 127, [2][0][RTW89_FCC][1][119] = 127, - [2][0][RTW89_FCC][2][119] = 127, [2][0][RTW89_ETSI][1][119] = 127, [2][0][RTW89_ETSI][0][119] = 127, [2][0][RTW89_MKK][1][119] = 127, [2][0][RTW89_MKK][0][119] = 127, [2][0][RTW89_IC][1][119] = 127, - [2][0][RTW89_IC][2][119] = 127, [2][0][RTW89_KCC][1][119] = 127, [2][0][RTW89_KCC][0][119] = 127, [2][0][RTW89_ACMA][1][119] = 127, @@ -58469,13 +55981,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][0][RTW89_THAILAND][1][119] = 127, [2][0][RTW89_THAILAND][0][119] = 127, [2][1][RTW89_FCC][1][0] = -16, - [2][1][RTW89_FCC][2][0] = 54, [2][1][RTW89_ETSI][1][0] = 44, [2][1][RTW89_ETSI][0][0] = 6, [2][1][RTW89_MKK][1][0] = 42, [2][1][RTW89_MKK][0][0] = 2, [2][1][RTW89_IC][1][0] = -16, - [2][1][RTW89_IC][2][0] = 54, [2][1][RTW89_KCC][1][0] = -14, [2][1][RTW89_KCC][0][0] = -14, [2][1][RTW89_ACMA][1][0] = 44, @@ -58488,13 +55998,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][0] = 28, [2][1][RTW89_THAILAND][0][0] = -16, [2][1][RTW89_FCC][1][2] = -16, - [2][1][RTW89_FCC][2][2] = 54, [2][1][RTW89_ETSI][1][2] = 44, [2][1][RTW89_ETSI][0][2] = 6, [2][1][RTW89_MKK][1][2] = 40, [2][1][RTW89_MKK][0][2] = 2, [2][1][RTW89_IC][1][2] = -16, - [2][1][RTW89_IC][2][2] = 54, [2][1][RTW89_KCC][1][2] = -14, [2][1][RTW89_KCC][0][2] = -14, [2][1][RTW89_ACMA][1][2] = 44, @@ -58507,13 +56015,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][2] = 28, [2][1][RTW89_THAILAND][0][2] = -16, [2][1][RTW89_FCC][1][4] = -16, - [2][1][RTW89_FCC][2][4] = 54, [2][1][RTW89_ETSI][1][4] = 44, [2][1][RTW89_ETSI][0][4] = 6, [2][1][RTW89_MKK][1][4] = 40, [2][1][RTW89_MKK][0][4] = 2, [2][1][RTW89_IC][1][4] = -16, - [2][1][RTW89_IC][2][4] = 54, [2][1][RTW89_KCC][1][4] = -14, [2][1][RTW89_KCC][0][4] = -14, [2][1][RTW89_ACMA][1][4] = 44, @@ -58526,13 +56032,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][4] = 28, [2][1][RTW89_THAILAND][0][4] = -16, [2][1][RTW89_FCC][1][6] = -16, - [2][1][RTW89_FCC][2][6] = 54, [2][1][RTW89_ETSI][1][6] = 44, [2][1][RTW89_ETSI][0][6] = 6, [2][1][RTW89_MKK][1][6] = 40, [2][1][RTW89_MKK][0][6] = 2, [2][1][RTW89_IC][1][6] = -16, - [2][1][RTW89_IC][2][6] = 54, [2][1][RTW89_KCC][1][6] = -14, [2][1][RTW89_KCC][0][6] = -14, [2][1][RTW89_ACMA][1][6] = 44, @@ -58545,13 +56049,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][6] = 28, [2][1][RTW89_THAILAND][0][6] = -16, [2][1][RTW89_FCC][1][8] = -16, - [2][1][RTW89_FCC][2][8] = 54, [2][1][RTW89_ETSI][1][8] = 44, [2][1][RTW89_ETSI][0][8] = 6, [2][1][RTW89_MKK][1][8] = 40, [2][1][RTW89_MKK][0][8] = 2, [2][1][RTW89_IC][1][8] = -16, - [2][1][RTW89_IC][2][8] = 54, [2][1][RTW89_KCC][1][8] = -14, [2][1][RTW89_KCC][0][8] = -14, [2][1][RTW89_ACMA][1][8] = 44, @@ -58564,13 +56066,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][8] = 28, [2][1][RTW89_THAILAND][0][8] = -16, [2][1][RTW89_FCC][1][10] = -16, - [2][1][RTW89_FCC][2][10] = 54, [2][1][RTW89_ETSI][1][10] = 44, [2][1][RTW89_ETSI][0][10] = 6, [2][1][RTW89_MKK][1][10] = 40, [2][1][RTW89_MKK][0][10] = 2, [2][1][RTW89_IC][1][10] = -16, - [2][1][RTW89_IC][2][10] = 54, [2][1][RTW89_KCC][1][10] = -14, [2][1][RTW89_KCC][0][10] = -14, [2][1][RTW89_ACMA][1][10] = 44, @@ -58583,13 +56083,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][10] = 28, [2][1][RTW89_THAILAND][0][10] = -16, [2][1][RTW89_FCC][1][12] = -16, - [2][1][RTW89_FCC][2][12] = 54, [2][1][RTW89_ETSI][1][12] = 44, [2][1][RTW89_ETSI][0][12] = 6, [2][1][RTW89_MKK][1][12] = 40, [2][1][RTW89_MKK][0][12] = 2, [2][1][RTW89_IC][1][12] = -16, - [2][1][RTW89_IC][2][12] = 54, [2][1][RTW89_KCC][1][12] = -14, [2][1][RTW89_KCC][0][12] = -14, [2][1][RTW89_ACMA][1][12] = 44, @@ -58602,13 +56100,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][12] = 28, [2][1][RTW89_THAILAND][0][12] = -16, [2][1][RTW89_FCC][1][14] = -16, - [2][1][RTW89_FCC][2][14] = 54, [2][1][RTW89_ETSI][1][14] = 44, [2][1][RTW89_ETSI][0][14] = 6, [2][1][RTW89_MKK][1][14] = 40, [2][1][RTW89_MKK][0][14] = 2, [2][1][RTW89_IC][1][14] = -16, - [2][1][RTW89_IC][2][14] = 54, [2][1][RTW89_KCC][1][14] = -14, [2][1][RTW89_KCC][0][14] = -14, [2][1][RTW89_ACMA][1][14] = 44, @@ -58621,13 +56117,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][14] = 28, [2][1][RTW89_THAILAND][0][14] = -16, [2][1][RTW89_FCC][1][15] = -16, - [2][1][RTW89_FCC][2][15] = 54, [2][1][RTW89_ETSI][1][15] = 44, [2][1][RTW89_ETSI][0][15] = 6, [2][1][RTW89_MKK][1][15] = 40, [2][1][RTW89_MKK][0][15] = 2, [2][1][RTW89_IC][1][15] = -16, - [2][1][RTW89_IC][2][15] = 54, [2][1][RTW89_KCC][1][15] = -14, [2][1][RTW89_KCC][0][15] = -14, [2][1][RTW89_ACMA][1][15] = 44, @@ -58640,13 +56134,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][15] = 28, [2][1][RTW89_THAILAND][0][15] = -16, [2][1][RTW89_FCC][1][17] = -16, - [2][1][RTW89_FCC][2][17] = 54, [2][1][RTW89_ETSI][1][17] = 44, [2][1][RTW89_ETSI][0][17] = 6, [2][1][RTW89_MKK][1][17] = 40, [2][1][RTW89_MKK][0][17] = 2, [2][1][RTW89_IC][1][17] = -16, - [2][1][RTW89_IC][2][17] = 54, [2][1][RTW89_KCC][1][17] = -14, [2][1][RTW89_KCC][0][17] = -14, [2][1][RTW89_ACMA][1][17] = 44, @@ -58659,13 +56151,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][17] = 28, [2][1][RTW89_THAILAND][0][17] = -16, [2][1][RTW89_FCC][1][19] = -16, - [2][1][RTW89_FCC][2][19] = 54, [2][1][RTW89_ETSI][1][19] = 44, [2][1][RTW89_ETSI][0][19] = 6, [2][1][RTW89_MKK][1][19] = 40, [2][1][RTW89_MKK][0][19] = 2, [2][1][RTW89_IC][1][19] = -16, - [2][1][RTW89_IC][2][19] = 54, [2][1][RTW89_KCC][1][19] = -14, [2][1][RTW89_KCC][0][19] = -14, [2][1][RTW89_ACMA][1][19] = 44, @@ -58678,13 +56168,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][19] = 28, [2][1][RTW89_THAILAND][0][19] = -16, [2][1][RTW89_FCC][1][21] = -16, - [2][1][RTW89_FCC][2][21] = 54, [2][1][RTW89_ETSI][1][21] = 44, [2][1][RTW89_ETSI][0][21] = 6, [2][1][RTW89_MKK][1][21] = 40, [2][1][RTW89_MKK][0][21] = 2, [2][1][RTW89_IC][1][21] = -16, - [2][1][RTW89_IC][2][21] = 54, [2][1][RTW89_KCC][1][21] = -14, [2][1][RTW89_KCC][0][21] = -14, [2][1][RTW89_ACMA][1][21] = 44, @@ -58697,13 +56185,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][21] = 28, [2][1][RTW89_THAILAND][0][21] = -16, [2][1][RTW89_FCC][1][23] = -16, - [2][1][RTW89_FCC][2][23] = 54, [2][1][RTW89_ETSI][1][23] = 44, [2][1][RTW89_ETSI][0][23] = 6, [2][1][RTW89_MKK][1][23] = 40, [2][1][RTW89_MKK][0][23] = 2, [2][1][RTW89_IC][1][23] = -16, - [2][1][RTW89_IC][2][23] = 54, [2][1][RTW89_KCC][1][23] = -14, [2][1][RTW89_KCC][0][23] = -14, [2][1][RTW89_ACMA][1][23] = 44, @@ -58716,13 +56202,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][23] = 30, [2][1][RTW89_THAILAND][0][23] = -16, [2][1][RTW89_FCC][1][25] = -16, - [2][1][RTW89_FCC][2][25] = 54, [2][1][RTW89_ETSI][1][25] = 44, [2][1][RTW89_ETSI][0][25] = 6, [2][1][RTW89_MKK][1][25] = 40, [2][1][RTW89_MKK][0][25] = 2, [2][1][RTW89_IC][1][25] = -16, - [2][1][RTW89_IC][2][25] = 54, [2][1][RTW89_KCC][1][25] = -14, [2][1][RTW89_KCC][0][25] = -14, [2][1][RTW89_ACMA][1][25] = 44, @@ -58735,13 +56219,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][25] = 28, [2][1][RTW89_THAILAND][0][25] = -16, [2][1][RTW89_FCC][1][27] = -16, - [2][1][RTW89_FCC][2][27] = 54, [2][1][RTW89_ETSI][1][27] = 44, [2][1][RTW89_ETSI][0][27] = 6, [2][1][RTW89_MKK][1][27] = 40, [2][1][RTW89_MKK][0][27] = 2, [2][1][RTW89_IC][1][27] = -16, - [2][1][RTW89_IC][2][27] = 54, [2][1][RTW89_KCC][1][27] = -14, [2][1][RTW89_KCC][0][27] = -14, [2][1][RTW89_ACMA][1][27] = 44, @@ -58754,13 +56236,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][27] = 28, [2][1][RTW89_THAILAND][0][27] = -16, [2][1][RTW89_FCC][1][29] = -16, - [2][1][RTW89_FCC][2][29] = 54, [2][1][RTW89_ETSI][1][29] = 44, [2][1][RTW89_ETSI][0][29] = 6, [2][1][RTW89_MKK][1][29] = 40, [2][1][RTW89_MKK][0][29] = 2, [2][1][RTW89_IC][1][29] = -16, - [2][1][RTW89_IC][2][29] = 54, [2][1][RTW89_KCC][1][29] = -14, [2][1][RTW89_KCC][0][29] = -14, [2][1][RTW89_ACMA][1][29] = 44, @@ -58773,13 +56253,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][29] = 28, [2][1][RTW89_THAILAND][0][29] = -16, [2][1][RTW89_FCC][1][30] = -16, - [2][1][RTW89_FCC][2][30] = 54, [2][1][RTW89_ETSI][1][30] = 44, [2][1][RTW89_ETSI][0][30] = 6, [2][1][RTW89_MKK][1][30] = 40, [2][1][RTW89_MKK][0][30] = 2, [2][1][RTW89_IC][1][30] = -16, - [2][1][RTW89_IC][2][30] = 54, [2][1][RTW89_KCC][1][30] = -14, [2][1][RTW89_KCC][0][30] = -14, [2][1][RTW89_ACMA][1][30] = 44, @@ -58792,13 +56270,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][30] = 28, [2][1][RTW89_THAILAND][0][30] = -16, [2][1][RTW89_FCC][1][32] = -16, - [2][1][RTW89_FCC][2][32] = 54, [2][1][RTW89_ETSI][1][32] = 44, [2][1][RTW89_ETSI][0][32] = 6, [2][1][RTW89_MKK][1][32] = 40, [2][1][RTW89_MKK][0][32] = 2, [2][1][RTW89_IC][1][32] = -16, - [2][1][RTW89_IC][2][32] = 54, [2][1][RTW89_KCC][1][32] = -14, [2][1][RTW89_KCC][0][32] = -14, [2][1][RTW89_ACMA][1][32] = 44, @@ -58811,13 +56287,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][32] = 28, [2][1][RTW89_THAILAND][0][32] = -16, [2][1][RTW89_FCC][1][34] = -16, - [2][1][RTW89_FCC][2][34] = 54, [2][1][RTW89_ETSI][1][34] = 44, [2][1][RTW89_ETSI][0][34] = 6, [2][1][RTW89_MKK][1][34] = 40, [2][1][RTW89_MKK][0][34] = 2, [2][1][RTW89_IC][1][34] = -16, - [2][1][RTW89_IC][2][34] = 54, [2][1][RTW89_KCC][1][34] = -14, [2][1][RTW89_KCC][0][34] = -14, [2][1][RTW89_ACMA][1][34] = 44, @@ -58830,13 +56304,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][34] = 28, [2][1][RTW89_THAILAND][0][34] = -16, [2][1][RTW89_FCC][1][36] = -16, - [2][1][RTW89_FCC][2][36] = 54, [2][1][RTW89_ETSI][1][36] = 44, [2][1][RTW89_ETSI][0][36] = 6, [2][1][RTW89_MKK][1][36] = 40, [2][1][RTW89_MKK][0][36] = 2, [2][1][RTW89_IC][1][36] = -16, - [2][1][RTW89_IC][2][36] = 54, [2][1][RTW89_KCC][1][36] = -14, [2][1][RTW89_KCC][0][36] = -14, [2][1][RTW89_ACMA][1][36] = 44, @@ -58849,13 +56321,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][36] = 28, [2][1][RTW89_THAILAND][0][36] = -16, [2][1][RTW89_FCC][1][38] = -16, - [2][1][RTW89_FCC][2][38] = 54, [2][1][RTW89_ETSI][1][38] = 44, [2][1][RTW89_ETSI][0][38] = 6, [2][1][RTW89_MKK][1][38] = 40, [2][1][RTW89_MKK][0][38] = 2, [2][1][RTW89_IC][1][38] = -16, - [2][1][RTW89_IC][2][38] = 54, [2][1][RTW89_KCC][1][38] = -14, [2][1][RTW89_KCC][0][38] = -14, [2][1][RTW89_ACMA][1][38] = 44, @@ -58868,13 +56338,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][38] = 28, [2][1][RTW89_THAILAND][0][38] = -16, [2][1][RTW89_FCC][1][40] = -16, - [2][1][RTW89_FCC][2][40] = 54, [2][1][RTW89_ETSI][1][40] = 44, [2][1][RTW89_ETSI][0][40] = 6, [2][1][RTW89_MKK][1][40] = 40, [2][1][RTW89_MKK][0][40] = 2, [2][1][RTW89_IC][1][40] = -16, - [2][1][RTW89_IC][2][40] = 54, [2][1][RTW89_KCC][1][40] = -14, [2][1][RTW89_KCC][0][40] = -14, [2][1][RTW89_ACMA][1][40] = 44, @@ -58887,13 +56355,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][40] = 28, [2][1][RTW89_THAILAND][0][40] = -16, [2][1][RTW89_FCC][1][42] = -16, - [2][1][RTW89_FCC][2][42] = 54, [2][1][RTW89_ETSI][1][42] = 44, [2][1][RTW89_ETSI][0][42] = 6, [2][1][RTW89_MKK][1][42] = 40, [2][1][RTW89_MKK][0][42] = 2, [2][1][RTW89_IC][1][42] = -16, - [2][1][RTW89_IC][2][42] = 54, [2][1][RTW89_KCC][1][42] = -14, [2][1][RTW89_KCC][0][42] = -14, [2][1][RTW89_ACMA][1][42] = 44, @@ -58906,13 +56372,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][42] = 28, [2][1][RTW89_THAILAND][0][42] = -16, [2][1][RTW89_FCC][1][44] = -16, - [2][1][RTW89_FCC][2][44] = 54, [2][1][RTW89_ETSI][1][44] = 44, [2][1][RTW89_ETSI][0][44] = 6, [2][1][RTW89_MKK][1][44] = 16, [2][1][RTW89_MKK][0][44] = 2, [2][1][RTW89_IC][1][44] = -16, - [2][1][RTW89_IC][2][44] = 54, [2][1][RTW89_KCC][1][44] = -14, [2][1][RTW89_KCC][0][44] = -14, [2][1][RTW89_ACMA][1][44] = 44, @@ -58925,13 +56389,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][44] = 28, [2][1][RTW89_THAILAND][0][44] = -16, [2][1][RTW89_FCC][1][45] = -16, - [2][1][RTW89_FCC][2][45] = 127, [2][1][RTW89_ETSI][1][45] = 127, [2][1][RTW89_ETSI][0][45] = 127, [2][1][RTW89_MKK][1][45] = 127, [2][1][RTW89_MKK][0][45] = 127, [2][1][RTW89_IC][1][45] = -16, - [2][1][RTW89_IC][2][45] = 56, [2][1][RTW89_KCC][1][45] = -14, [2][1][RTW89_KCC][0][45] = 127, [2][1][RTW89_ACMA][1][45] = 127, @@ -58944,13 +56406,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][45] = 127, [2][1][RTW89_THAILAND][0][45] = 127, [2][1][RTW89_FCC][1][47] = -16, - [2][1][RTW89_FCC][2][47] = 127, [2][1][RTW89_ETSI][1][47] = 127, [2][1][RTW89_ETSI][0][47] = 127, [2][1][RTW89_MKK][1][47] = 127, [2][1][RTW89_MKK][0][47] = 127, [2][1][RTW89_IC][1][47] = -16, - [2][1][RTW89_IC][2][47] = 56, [2][1][RTW89_KCC][1][47] = -14, [2][1][RTW89_KCC][0][47] = 127, [2][1][RTW89_ACMA][1][47] = 127, @@ -58963,13 +56423,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][47] = 127, [2][1][RTW89_THAILAND][0][47] = 127, [2][1][RTW89_FCC][1][49] = -16, - [2][1][RTW89_FCC][2][49] = 127, [2][1][RTW89_ETSI][1][49] = 127, [2][1][RTW89_ETSI][0][49] = 127, [2][1][RTW89_MKK][1][49] = 127, [2][1][RTW89_MKK][0][49] = 127, [2][1][RTW89_IC][1][49] = -16, - [2][1][RTW89_IC][2][49] = 56, [2][1][RTW89_KCC][1][49] = -14, [2][1][RTW89_KCC][0][49] = 127, [2][1][RTW89_ACMA][1][49] = 127, @@ -58982,13 +56440,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][49] = 127, [2][1][RTW89_THAILAND][0][49] = 127, [2][1][RTW89_FCC][1][51] = -16, - [2][1][RTW89_FCC][2][51] = 127, [2][1][RTW89_ETSI][1][51] = 127, [2][1][RTW89_ETSI][0][51] = 127, [2][1][RTW89_MKK][1][51] = 127, [2][1][RTW89_MKK][0][51] = 127, [2][1][RTW89_IC][1][51] = -16, - [2][1][RTW89_IC][2][51] = 56, [2][1][RTW89_KCC][1][51] = -14, [2][1][RTW89_KCC][0][51] = 127, [2][1][RTW89_ACMA][1][51] = 127, @@ -59001,13 +56457,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][51] = 127, [2][1][RTW89_THAILAND][0][51] = 127, [2][1][RTW89_FCC][1][53] = -16, - [2][1][RTW89_FCC][2][53] = 127, [2][1][RTW89_ETSI][1][53] = 127, [2][1][RTW89_ETSI][0][53] = 127, [2][1][RTW89_MKK][1][53] = 127, [2][1][RTW89_MKK][0][53] = 127, [2][1][RTW89_IC][1][53] = -16, - [2][1][RTW89_IC][2][53] = 56, [2][1][RTW89_KCC][1][53] = -14, [2][1][RTW89_KCC][0][53] = 127, [2][1][RTW89_ACMA][1][53] = 127, @@ -59020,13 +56474,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][53] = 127, [2][1][RTW89_THAILAND][0][53] = 127, [2][1][RTW89_FCC][1][55] = -16, - [2][1][RTW89_FCC][2][55] = 54, [2][1][RTW89_ETSI][1][55] = 127, [2][1][RTW89_ETSI][0][55] = 127, [2][1][RTW89_MKK][1][55] = 127, [2][1][RTW89_MKK][0][55] = 127, [2][1][RTW89_IC][1][55] = -16, - [2][1][RTW89_IC][2][55] = 54, [2][1][RTW89_KCC][1][55] = -14, [2][1][RTW89_KCC][0][55] = 127, [2][1][RTW89_ACMA][1][55] = 127, @@ -59039,13 +56491,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][55] = 127, [2][1][RTW89_THAILAND][0][55] = 127, [2][1][RTW89_FCC][1][57] = -16, - [2][1][RTW89_FCC][2][57] = 54, [2][1][RTW89_ETSI][1][57] = 127, [2][1][RTW89_ETSI][0][57] = 127, [2][1][RTW89_MKK][1][57] = 127, [2][1][RTW89_MKK][0][57] = 127, [2][1][RTW89_IC][1][57] = -16, - [2][1][RTW89_IC][2][57] = 54, [2][1][RTW89_KCC][1][57] = -14, [2][1][RTW89_KCC][0][57] = 127, [2][1][RTW89_ACMA][1][57] = 127, @@ -59058,13 +56508,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][57] = 127, [2][1][RTW89_THAILAND][0][57] = 127, [2][1][RTW89_FCC][1][59] = -16, - [2][1][RTW89_FCC][2][59] = 54, [2][1][RTW89_ETSI][1][59] = 127, [2][1][RTW89_ETSI][0][59] = 127, [2][1][RTW89_MKK][1][59] = 127, [2][1][RTW89_MKK][0][59] = 127, [2][1][RTW89_IC][1][59] = -16, - [2][1][RTW89_IC][2][59] = 54, [2][1][RTW89_KCC][1][59] = -14, [2][1][RTW89_KCC][0][59] = 127, [2][1][RTW89_ACMA][1][59] = 127, @@ -59077,13 +56525,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][59] = 127, [2][1][RTW89_THAILAND][0][59] = 127, [2][1][RTW89_FCC][1][60] = -16, - [2][1][RTW89_FCC][2][60] = 54, [2][1][RTW89_ETSI][1][60] = 127, [2][1][RTW89_ETSI][0][60] = 127, [2][1][RTW89_MKK][1][60] = 127, [2][1][RTW89_MKK][0][60] = 127, [2][1][RTW89_IC][1][60] = -16, - [2][1][RTW89_IC][2][60] = 54, [2][1][RTW89_KCC][1][60] = -14, [2][1][RTW89_KCC][0][60] = 127, [2][1][RTW89_ACMA][1][60] = 127, @@ -59096,13 +56542,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][60] = 127, [2][1][RTW89_THAILAND][0][60] = 127, [2][1][RTW89_FCC][1][62] = -16, - [2][1][RTW89_FCC][2][62] = 54, [2][1][RTW89_ETSI][1][62] = 127, [2][1][RTW89_ETSI][0][62] = 127, [2][1][RTW89_MKK][1][62] = 127, [2][1][RTW89_MKK][0][62] = 127, [2][1][RTW89_IC][1][62] = -16, - [2][1][RTW89_IC][2][62] = 54, [2][1][RTW89_KCC][1][62] = -14, [2][1][RTW89_KCC][0][62] = 127, [2][1][RTW89_ACMA][1][62] = 127, @@ -59115,13 +56559,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][62] = 127, [2][1][RTW89_THAILAND][0][62] = 127, [2][1][RTW89_FCC][1][64] = -16, - [2][1][RTW89_FCC][2][64] = 54, [2][1][RTW89_ETSI][1][64] = 127, [2][1][RTW89_ETSI][0][64] = 127, [2][1][RTW89_MKK][1][64] = 127, [2][1][RTW89_MKK][0][64] = 127, [2][1][RTW89_IC][1][64] = -16, - [2][1][RTW89_IC][2][64] = 54, [2][1][RTW89_KCC][1][64] = -14, [2][1][RTW89_KCC][0][64] = 127, [2][1][RTW89_ACMA][1][64] = 127, @@ -59134,13 +56576,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][64] = 127, [2][1][RTW89_THAILAND][0][64] = 127, [2][1][RTW89_FCC][1][66] = -16, - [2][1][RTW89_FCC][2][66] = 54, [2][1][RTW89_ETSI][1][66] = 127, [2][1][RTW89_ETSI][0][66] = 127, [2][1][RTW89_MKK][1][66] = 127, [2][1][RTW89_MKK][0][66] = 127, [2][1][RTW89_IC][1][66] = -16, - [2][1][RTW89_IC][2][66] = 54, [2][1][RTW89_KCC][1][66] = -14, [2][1][RTW89_KCC][0][66] = 127, [2][1][RTW89_ACMA][1][66] = 127, @@ -59153,13 +56593,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][66] = 127, [2][1][RTW89_THAILAND][0][66] = 127, [2][1][RTW89_FCC][1][68] = -16, - [2][1][RTW89_FCC][2][68] = 54, [2][1][RTW89_ETSI][1][68] = 127, [2][1][RTW89_ETSI][0][68] = 127, [2][1][RTW89_MKK][1][68] = 127, [2][1][RTW89_MKK][0][68] = 127, [2][1][RTW89_IC][1][68] = -16, - [2][1][RTW89_IC][2][68] = 54, [2][1][RTW89_KCC][1][68] = -14, [2][1][RTW89_KCC][0][68] = 127, [2][1][RTW89_ACMA][1][68] = 127, @@ -59172,13 +56610,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][68] = 127, [2][1][RTW89_THAILAND][0][68] = 127, [2][1][RTW89_FCC][1][70] = -16, - [2][1][RTW89_FCC][2][70] = 56, [2][1][RTW89_ETSI][1][70] = 127, [2][1][RTW89_ETSI][0][70] = 127, [2][1][RTW89_MKK][1][70] = 127, [2][1][RTW89_MKK][0][70] = 127, [2][1][RTW89_IC][1][70] = -16, - [2][1][RTW89_IC][2][70] = 56, [2][1][RTW89_KCC][1][70] = -14, [2][1][RTW89_KCC][0][70] = 127, [2][1][RTW89_ACMA][1][70] = 127, @@ -59191,13 +56627,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][70] = 127, [2][1][RTW89_THAILAND][0][70] = 127, [2][1][RTW89_FCC][1][72] = -16, - [2][1][RTW89_FCC][2][72] = 56, [2][1][RTW89_ETSI][1][72] = 127, [2][1][RTW89_ETSI][0][72] = 127, [2][1][RTW89_MKK][1][72] = 127, [2][1][RTW89_MKK][0][72] = 127, [2][1][RTW89_IC][1][72] = -16, - [2][1][RTW89_IC][2][72] = 56, [2][1][RTW89_KCC][1][72] = -14, [2][1][RTW89_KCC][0][72] = 127, [2][1][RTW89_ACMA][1][72] = 127, @@ -59210,13 +56644,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][72] = 127, [2][1][RTW89_THAILAND][0][72] = 127, [2][1][RTW89_FCC][1][74] = -16, - [2][1][RTW89_FCC][2][74] = 56, [2][1][RTW89_ETSI][1][74] = 127, [2][1][RTW89_ETSI][0][74] = 127, [2][1][RTW89_MKK][1][74] = 127, [2][1][RTW89_MKK][0][74] = 127, [2][1][RTW89_IC][1][74] = -16, - [2][1][RTW89_IC][2][74] = 56, [2][1][RTW89_KCC][1][74] = -14, [2][1][RTW89_KCC][0][74] = 127, [2][1][RTW89_ACMA][1][74] = 127, @@ -59229,13 +56661,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][74] = 127, [2][1][RTW89_THAILAND][0][74] = 127, [2][1][RTW89_FCC][1][75] = -16, - [2][1][RTW89_FCC][2][75] = 56, [2][1][RTW89_ETSI][1][75] = 127, [2][1][RTW89_ETSI][0][75] = 127, [2][1][RTW89_MKK][1][75] = 127, [2][1][RTW89_MKK][0][75] = 127, [2][1][RTW89_IC][1][75] = -16, - [2][1][RTW89_IC][2][75] = 56, [2][1][RTW89_KCC][1][75] = -14, [2][1][RTW89_KCC][0][75] = 127, [2][1][RTW89_ACMA][1][75] = 127, @@ -59248,13 +56678,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][75] = 127, [2][1][RTW89_THAILAND][0][75] = 127, [2][1][RTW89_FCC][1][77] = -16, - [2][1][RTW89_FCC][2][77] = 56, [2][1][RTW89_ETSI][1][77] = 127, [2][1][RTW89_ETSI][0][77] = 127, [2][1][RTW89_MKK][1][77] = 127, [2][1][RTW89_MKK][0][77] = 127, [2][1][RTW89_IC][1][77] = -16, - [2][1][RTW89_IC][2][77] = 56, [2][1][RTW89_KCC][1][77] = -14, [2][1][RTW89_KCC][0][77] = 127, [2][1][RTW89_ACMA][1][77] = 127, @@ -59267,13 +56695,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][77] = 127, [2][1][RTW89_THAILAND][0][77] = 127, [2][1][RTW89_FCC][1][79] = -16, - [2][1][RTW89_FCC][2][79] = 56, [2][1][RTW89_ETSI][1][79] = 127, [2][1][RTW89_ETSI][0][79] = 127, [2][1][RTW89_MKK][1][79] = 127, [2][1][RTW89_MKK][0][79] = 127, [2][1][RTW89_IC][1][79] = -16, - [2][1][RTW89_IC][2][79] = 56, [2][1][RTW89_KCC][1][79] = -14, [2][1][RTW89_KCC][0][79] = 127, [2][1][RTW89_ACMA][1][79] = 127, @@ -59286,13 +56712,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][79] = 127, [2][1][RTW89_THAILAND][0][79] = 127, [2][1][RTW89_FCC][1][81] = -16, - [2][1][RTW89_FCC][2][81] = 56, [2][1][RTW89_ETSI][1][81] = 127, [2][1][RTW89_ETSI][0][81] = 127, [2][1][RTW89_MKK][1][81] = 127, [2][1][RTW89_MKK][0][81] = 127, [2][1][RTW89_IC][1][81] = -16, - [2][1][RTW89_IC][2][81] = 56, [2][1][RTW89_KCC][1][81] = -14, [2][1][RTW89_KCC][0][81] = 127, [2][1][RTW89_ACMA][1][81] = 127, @@ -59305,13 +56729,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][81] = 127, [2][1][RTW89_THAILAND][0][81] = 127, [2][1][RTW89_FCC][1][83] = -16, - [2][1][RTW89_FCC][2][83] = 56, [2][1][RTW89_ETSI][1][83] = 127, [2][1][RTW89_ETSI][0][83] = 127, [2][1][RTW89_MKK][1][83] = 127, [2][1][RTW89_MKK][0][83] = 127, [2][1][RTW89_IC][1][83] = -16, - [2][1][RTW89_IC][2][83] = 56, [2][1][RTW89_KCC][1][83] = -14, [2][1][RTW89_KCC][0][83] = 127, [2][1][RTW89_ACMA][1][83] = 127, @@ -59324,13 +56746,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][83] = 127, [2][1][RTW89_THAILAND][0][83] = 127, [2][1][RTW89_FCC][1][85] = -18, - [2][1][RTW89_FCC][2][85] = 56, [2][1][RTW89_ETSI][1][85] = 127, [2][1][RTW89_ETSI][0][85] = 127, [2][1][RTW89_MKK][1][85] = 127, [2][1][RTW89_MKK][0][85] = 127, [2][1][RTW89_IC][1][85] = -18, - [2][1][RTW89_IC][2][85] = 56, [2][1][RTW89_KCC][1][85] = -14, [2][1][RTW89_KCC][0][85] = 127, [2][1][RTW89_ACMA][1][85] = 127, @@ -59343,13 +56763,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][85] = 127, [2][1][RTW89_THAILAND][0][85] = 127, [2][1][RTW89_FCC][1][87] = -16, - [2][1][RTW89_FCC][2][87] = 127, [2][1][RTW89_ETSI][1][87] = 127, [2][1][RTW89_ETSI][0][87] = 127, [2][1][RTW89_MKK][1][87] = 127, [2][1][RTW89_MKK][0][87] = 127, [2][1][RTW89_IC][1][87] = -16, - [2][1][RTW89_IC][2][87] = 127, [2][1][RTW89_KCC][1][87] = -14, [2][1][RTW89_KCC][0][87] = 127, [2][1][RTW89_ACMA][1][87] = 127, @@ -59362,13 +56780,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][87] = 127, [2][1][RTW89_THAILAND][0][87] = 127, [2][1][RTW89_FCC][1][89] = -16, - [2][1][RTW89_FCC][2][89] = 127, [2][1][RTW89_ETSI][1][89] = 127, [2][1][RTW89_ETSI][0][89] = 127, [2][1][RTW89_MKK][1][89] = 127, [2][1][RTW89_MKK][0][89] = 127, [2][1][RTW89_IC][1][89] = -16, - [2][1][RTW89_IC][2][89] = 127, [2][1][RTW89_KCC][1][89] = -14, [2][1][RTW89_KCC][0][89] = 127, [2][1][RTW89_ACMA][1][89] = 127, @@ -59381,13 +56797,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][89] = 127, [2][1][RTW89_THAILAND][0][89] = 127, [2][1][RTW89_FCC][1][90] = -16, - [2][1][RTW89_FCC][2][90] = 127, [2][1][RTW89_ETSI][1][90] = 127, [2][1][RTW89_ETSI][0][90] = 127, [2][1][RTW89_MKK][1][90] = 127, [2][1][RTW89_MKK][0][90] = 127, [2][1][RTW89_IC][1][90] = -16, - [2][1][RTW89_IC][2][90] = 127, [2][1][RTW89_KCC][1][90] = -14, [2][1][RTW89_KCC][0][90] = 127, [2][1][RTW89_ACMA][1][90] = 127, @@ -59400,13 +56814,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][90] = 127, [2][1][RTW89_THAILAND][0][90] = 127, [2][1][RTW89_FCC][1][92] = -16, - [2][1][RTW89_FCC][2][92] = 127, [2][1][RTW89_ETSI][1][92] = 127, [2][1][RTW89_ETSI][0][92] = 127, [2][1][RTW89_MKK][1][92] = 127, [2][1][RTW89_MKK][0][92] = 127, [2][1][RTW89_IC][1][92] = -16, - [2][1][RTW89_IC][2][92] = 127, [2][1][RTW89_KCC][1][92] = -14, [2][1][RTW89_KCC][0][92] = 127, [2][1][RTW89_ACMA][1][92] = 127, @@ -59419,13 +56831,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][92] = 127, [2][1][RTW89_THAILAND][0][92] = 127, [2][1][RTW89_FCC][1][94] = -16, - [2][1][RTW89_FCC][2][94] = 127, [2][1][RTW89_ETSI][1][94] = 127, [2][1][RTW89_ETSI][0][94] = 127, [2][1][RTW89_MKK][1][94] = 127, [2][1][RTW89_MKK][0][94] = 127, [2][1][RTW89_IC][1][94] = -16, - [2][1][RTW89_IC][2][94] = 127, [2][1][RTW89_KCC][1][94] = -14, [2][1][RTW89_KCC][0][94] = 127, [2][1][RTW89_ACMA][1][94] = 127, @@ -59438,13 +56848,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][94] = 127, [2][1][RTW89_THAILAND][0][94] = 127, [2][1][RTW89_FCC][1][96] = -16, - [2][1][RTW89_FCC][2][96] = 127, [2][1][RTW89_ETSI][1][96] = 127, [2][1][RTW89_ETSI][0][96] = 127, [2][1][RTW89_MKK][1][96] = 127, [2][1][RTW89_MKK][0][96] = 127, [2][1][RTW89_IC][1][96] = -16, - [2][1][RTW89_IC][2][96] = 127, [2][1][RTW89_KCC][1][96] = -14, [2][1][RTW89_KCC][0][96] = 127, [2][1][RTW89_ACMA][1][96] = 127, @@ -59457,13 +56865,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][96] = 127, [2][1][RTW89_THAILAND][0][96] = 127, [2][1][RTW89_FCC][1][98] = -16, - [2][1][RTW89_FCC][2][98] = 127, [2][1][RTW89_ETSI][1][98] = 127, [2][1][RTW89_ETSI][0][98] = 127, [2][1][RTW89_MKK][1][98] = 127, [2][1][RTW89_MKK][0][98] = 127, [2][1][RTW89_IC][1][98] = -16, - [2][1][RTW89_IC][2][98] = 127, [2][1][RTW89_KCC][1][98] = -14, [2][1][RTW89_KCC][0][98] = 127, [2][1][RTW89_ACMA][1][98] = 127, @@ -59476,13 +56882,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][98] = 127, [2][1][RTW89_THAILAND][0][98] = 127, [2][1][RTW89_FCC][1][100] = -16, - [2][1][RTW89_FCC][2][100] = 127, [2][1][RTW89_ETSI][1][100] = 127, [2][1][RTW89_ETSI][0][100] = 127, [2][1][RTW89_MKK][1][100] = 127, [2][1][RTW89_MKK][0][100] = 127, [2][1][RTW89_IC][1][100] = -16, - [2][1][RTW89_IC][2][100] = 127, [2][1][RTW89_KCC][1][100] = -14, [2][1][RTW89_KCC][0][100] = 127, [2][1][RTW89_ACMA][1][100] = 127, @@ -59495,13 +56899,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][100] = 127, [2][1][RTW89_THAILAND][0][100] = 127, [2][1][RTW89_FCC][1][102] = -16, - [2][1][RTW89_FCC][2][102] = 127, [2][1][RTW89_ETSI][1][102] = 127, [2][1][RTW89_ETSI][0][102] = 127, [2][1][RTW89_MKK][1][102] = 127, [2][1][RTW89_MKK][0][102] = 127, [2][1][RTW89_IC][1][102] = -16, - [2][1][RTW89_IC][2][102] = 127, [2][1][RTW89_KCC][1][102] = -14, [2][1][RTW89_KCC][0][102] = 127, [2][1][RTW89_ACMA][1][102] = 127, @@ -59514,13 +56916,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][102] = 127, [2][1][RTW89_THAILAND][0][102] = 127, [2][1][RTW89_FCC][1][104] = -16, - [2][1][RTW89_FCC][2][104] = 127, [2][1][RTW89_ETSI][1][104] = 127, [2][1][RTW89_ETSI][0][104] = 127, [2][1][RTW89_MKK][1][104] = 127, [2][1][RTW89_MKK][0][104] = 127, [2][1][RTW89_IC][1][104] = -16, - [2][1][RTW89_IC][2][104] = 127, [2][1][RTW89_KCC][1][104] = -14, [2][1][RTW89_KCC][0][104] = 127, [2][1][RTW89_ACMA][1][104] = 127, @@ -59533,13 +56933,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][104] = 127, [2][1][RTW89_THAILAND][0][104] = 127, [2][1][RTW89_FCC][1][105] = -16, - [2][1][RTW89_FCC][2][105] = 127, [2][1][RTW89_ETSI][1][105] = 127, [2][1][RTW89_ETSI][0][105] = 127, [2][1][RTW89_MKK][1][105] = 127, [2][1][RTW89_MKK][0][105] = 127, [2][1][RTW89_IC][1][105] = -16, - [2][1][RTW89_IC][2][105] = 127, [2][1][RTW89_KCC][1][105] = -14, [2][1][RTW89_KCC][0][105] = 127, [2][1][RTW89_ACMA][1][105] = 127, @@ -59552,13 +56950,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][105] = 127, [2][1][RTW89_THAILAND][0][105] = 127, [2][1][RTW89_FCC][1][107] = -12, - [2][1][RTW89_FCC][2][107] = 127, [2][1][RTW89_ETSI][1][107] = 127, [2][1][RTW89_ETSI][0][107] = 127, [2][1][RTW89_MKK][1][107] = 127, [2][1][RTW89_MKK][0][107] = 127, [2][1][RTW89_IC][1][107] = -12, - [2][1][RTW89_IC][2][107] = 127, [2][1][RTW89_KCC][1][107] = -14, [2][1][RTW89_KCC][0][107] = 127, [2][1][RTW89_ACMA][1][107] = 127, @@ -59571,13 +56967,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][107] = 127, [2][1][RTW89_THAILAND][0][107] = 127, [2][1][RTW89_FCC][1][109] = -10, - [2][1][RTW89_FCC][2][109] = 127, [2][1][RTW89_ETSI][1][109] = 127, [2][1][RTW89_ETSI][0][109] = 127, [2][1][RTW89_MKK][1][109] = 127, [2][1][RTW89_MKK][0][109] = 127, [2][1][RTW89_IC][1][109] = -10, - [2][1][RTW89_IC][2][109] = 127, [2][1][RTW89_KCC][1][109] = 127, [2][1][RTW89_KCC][0][109] = 127, [2][1][RTW89_ACMA][1][109] = 127, @@ -59590,13 +56984,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][109] = 127, [2][1][RTW89_THAILAND][0][109] = 127, [2][1][RTW89_FCC][1][111] = 127, - [2][1][RTW89_FCC][2][111] = 127, [2][1][RTW89_ETSI][1][111] = 127, [2][1][RTW89_ETSI][0][111] = 127, [2][1][RTW89_MKK][1][111] = 127, [2][1][RTW89_MKK][0][111] = 127, [2][1][RTW89_IC][1][111] = 127, - [2][1][RTW89_IC][2][111] = 127, [2][1][RTW89_KCC][1][111] = 127, [2][1][RTW89_KCC][0][111] = 127, [2][1][RTW89_ACMA][1][111] = 127, @@ -59609,13 +57001,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][111] = 127, [2][1][RTW89_THAILAND][0][111] = 127, [2][1][RTW89_FCC][1][113] = 127, - [2][1][RTW89_FCC][2][113] = 127, [2][1][RTW89_ETSI][1][113] = 127, [2][1][RTW89_ETSI][0][113] = 127, [2][1][RTW89_MKK][1][113] = 127, [2][1][RTW89_MKK][0][113] = 127, [2][1][RTW89_IC][1][113] = 127, - [2][1][RTW89_IC][2][113] = 127, [2][1][RTW89_KCC][1][113] = 127, [2][1][RTW89_KCC][0][113] = 127, [2][1][RTW89_ACMA][1][113] = 127, @@ -59628,13 +57018,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][113] = 127, [2][1][RTW89_THAILAND][0][113] = 127, [2][1][RTW89_FCC][1][115] = 127, - [2][1][RTW89_FCC][2][115] = 127, [2][1][RTW89_ETSI][1][115] = 127, [2][1][RTW89_ETSI][0][115] = 127, [2][1][RTW89_MKK][1][115] = 127, [2][1][RTW89_MKK][0][115] = 127, [2][1][RTW89_IC][1][115] = 127, - [2][1][RTW89_IC][2][115] = 127, [2][1][RTW89_KCC][1][115] = 127, [2][1][RTW89_KCC][0][115] = 127, [2][1][RTW89_ACMA][1][115] = 127, @@ -59647,13 +57035,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][115] = 127, [2][1][RTW89_THAILAND][0][115] = 127, [2][1][RTW89_FCC][1][117] = 127, - [2][1][RTW89_FCC][2][117] = 127, [2][1][RTW89_ETSI][1][117] = 127, [2][1][RTW89_ETSI][0][117] = 127, [2][1][RTW89_MKK][1][117] = 127, [2][1][RTW89_MKK][0][117] = 127, [2][1][RTW89_IC][1][117] = 127, - [2][1][RTW89_IC][2][117] = 127, [2][1][RTW89_KCC][1][117] = 127, [2][1][RTW89_KCC][0][117] = 127, [2][1][RTW89_ACMA][1][117] = 127, @@ -59666,13 +57052,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM] [2][1][RTW89_THAILAND][1][117] = 127, [2][1][RTW89_THAILAND][0][117] = 127, [2][1][RTW89_FCC][1][119] = 127, - [2][1][RTW89_FCC][2][119] = 127, [2][1][RTW89_ETSI][1][119] = 127, [2][1][RTW89_ETSI][0][119] = 127, [2][1][RTW89_MKK][1][119] = 127, [2][1][RTW89_MKK][0][119] = 127, [2][1][RTW89_IC][1][119] = 127, - [2][1][RTW89_IC][2][119] = 127, [2][1][RTW89_KCC][1][119] = 127, [2][1][RTW89_KCC][0][119] = 127, [2][1][RTW89_ACMA][1][119] = 127, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 583ea673a4f5..e07c7f3ade41 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -68,8 +68,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .recognize_intrs = rtw89_pci_recognize_intrs_v1, }; +static const struct dmi_system_id rtw8852c_pci_quirks[] = { + { + .ident = "Dell Inc. Vostro 16 5640", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"), + DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"), + }, + .driver_data = (void *)RTW89_QUIRK_PCI_BER, + }, + { + .ident = "Dell Inc. Inspiron 16 5640", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"), + DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"), + }, + .driver_data = (void *)RTW89_QUIRK_PCI_BER, + }, + {}, +}; + static const struct rtw89_driver_info rtw89_8852ce_info = { .chip = &rtw8852c_chip_info, + .quirks = rtw8852c_pci_quirks, .bus = { .pci = &rtw8852c_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 708132d5be2a..3b3ea3a7c19a 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -2126,7 +2126,7 @@ static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false); if (rtwdev->dbcc_en) rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band, - &tx_en0, false); + &tx_en1, false); } static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) @@ -2257,6 +2257,138 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev) btc->cx.wl.status.map.init_ok = true; } +static void +rtw8922a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val) +{ + u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0)); + u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16)); + + switch (ctrl_all_time) { + case 0xffff: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL, + B_BE_FORCE_PWR_BY_RATE_EN, 0x0); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL, + B_BE_FORCE_PWR_BY_RATE_VAL, 0x0); + break; + default: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL, + B_BE_FORCE_PWR_BY_RATE_VAL, ctrl_all_time); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL, + B_BE_FORCE_PWR_BY_RATE_EN, 0x1); + break; + } + + switch (ctrl_gnt_bt) { + case 0xffff: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL, + B_BE_PWR_BT_EN, 0x0); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL, + B_BE_PWR_BT_VAL, 0x0); + break; + default: + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL, + B_BE_PWR_BT_VAL, ctrl_gnt_bt); + rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL, + B_BE_PWR_BT_EN, 0x1); + break; + } +} + +static +s8 rtw8922a_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + return clamp_t(s8, val, -100, 0) + 100; +} + +static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_ul[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {6, 1, 0, 7}, + {13, 1, 0, 7}, + {13, 1, 0, 7} +}; + +static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_dl[] = { + {255, 0, 0, 7}, /* 0 -> original */ + {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */ + {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */ + {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */ + {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */ + {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */ + {255, 1, 0, 7}, + {255, 1, 0, 7}, + {255, 1, 0, 7} +}; + +static const u8 rtw89_btc_8922a_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30}; +static const u8 rtw89_btc_8922a_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20}; + +static const struct rtw89_btc_fbtc_mreg rtw89_btc_8922a_mon_reg[] = { + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe300), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe320), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe324), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe328), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe32c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe330), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe334), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe338), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe344), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe348), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe34c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe350), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a2c), + RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a50), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x660), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x1660), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x418c), + RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x518c), +}; + +static +void rtw8922a_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + /* Feature move to firmware */ +} + +static +void rtw8922a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + if (!state) { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x01018); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x01018); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000); + } else { + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x09018); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x09018); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000); + } +} + +static void rtw8922a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ +} + static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct ieee80211_rx_status *status) @@ -2367,6 +2499,13 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = { .btc_set_rfe = rtw8922a_btc_set_rfe, .btc_init_cfg = rtw8922a_btc_init_cfg, + .btc_set_wl_pri = NULL, + .btc_set_wl_txpwr_ctrl = rtw8922a_btc_set_wl_txpwr_ctrl, + .btc_get_bt_rssi = rtw8922a_btc_get_bt_rssi, + .btc_update_bt_cnt = rtw8922a_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8922a_btc_wl_s1_standby, + .btc_set_wl_rx_gain = rtw8922a_btc_set_wl_rx_gain, + .btc_set_policy = rtw89_btc_set_policy_v1, }; const struct rtw89_chip_info rtw8922a_chip_info = { @@ -2406,6 +2545,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .dig_regs = &rtw8922a_dig_regs, .tssi_dbw_table = NULL, .support_chanctx_num = 2, + .support_rnr = true, .support_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), @@ -2436,7 +2576,22 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .efuse_blocks = rtw8922a_efuse_blocks, .phycap_addr = 0x1700, .phycap_size = 0x38, - + .para_ver = 0xf, + .wlcx_desired = 0x07110000, + .btcx_desired = 0x7, + .scbd = 0x1, + .mailbox = 0x1, + + .afh_guard_ch = 6, + .wl_rssi_thres = rtw89_btc_8922a_wl_rssi_thres, + .bt_rssi_thres = rtw89_btc_8922a_bt_rssi_thres, + .rssi_tol = 2, + .mon_reg_num = ARRAY_SIZE(rtw89_btc_8922a_mon_reg), + .mon_reg = rtw89_btc_8922a_mon_reg, + .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_ul), + .rf_para_ulink = rtw89_btc_8922a_rf_ul, + .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_dl), + .rf_para_dlink = rtw89_btc_8922a_rf_dl, .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) | BIT(RTW89_PS_MODE_CLK_GATED) | BIT(RTW89_PS_MODE_PWR_GATED), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c index 4981b657bd7b..ce8aaa9501e1 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c @@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = { static const struct rtw89_driver_info rtw89_8922ae_info = { .chip = &rtw8922a_chip_info, + .quirks = NULL, .bus = { .pci = &rtw8922a_pci_info, }, diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h index bd7a657188d9..4ae081d2d3b4 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.h +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -7,8 +7,8 @@ #include "core.h" -#define RTW89_SAR_TXPWR_MAC_MAX S8_MAX -#define RTW89_SAR_TXPWR_MAC_MIN S8_MIN +#define RTW89_SAR_TXPWR_MAC_MAX 63 +#define RTW89_SAR_TXPWR_MAC_MIN -64 struct rtw89_sar_handler { const char *descr_sar_source; diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index ccad026defb5..fa61484c3839 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -12,6 +12,662 @@ #include "util.h" #include "wow.h" +void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + const u8 *rsn, *ies = mgmt->u.assoc_req.variable; + struct rtw89_rsn_ie *rsn_ie; + + rsn = cfg80211_find_ie(WLAN_EID_RSN, ies, skb->len); + if (!rsn) + return; + + rsn_ie = (struct rtw89_rsn_ie *)rsn; + rtw_wow->akm = rsn_ie->akm_cipher_suite.type; +} + +static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = { + {WLAN_CIPHER_SUITE_WEP40, .fw_alg = 1, .len = WLAN_KEY_LEN_WEP40,}, + {WLAN_CIPHER_SUITE_WEP104, .fw_alg = 2, .len = WLAN_KEY_LEN_WEP104,}, + {WLAN_CIPHER_SUITE_TKIP, .fw_alg = 3, .len = WLAN_KEY_LEN_TKIP,}, + {WLAN_CIPHER_SUITE_CCMP, .fw_alg = 6, .len = WLAN_KEY_LEN_CCMP,}, + {WLAN_CIPHER_SUITE_GCMP, .fw_alg = 8, .len = WLAN_KEY_LEN_GCMP,}, + {WLAN_CIPHER_SUITE_CCMP_256, .fw_alg = 7, .len = WLAN_KEY_LEN_CCMP_256,}, + {WLAN_CIPHER_SUITE_GCMP_256, .fw_alg = 23, .len = WLAN_KEY_LEN_GCMP_256,}, + {WLAN_CIPHER_SUITE_AES_CMAC, .fw_alg = 32, .len = WLAN_KEY_LEN_AES_CMAC,}, +}; + +static const +struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher) +{ + const struct rtw89_cipher_info *cipher_info_defs; + int i; + + for (i = 0; i < ARRAY_SIZE(rtw89_cipher_info_defs); i++) { + cipher_info_defs = &rtw89_cipher_info_defs[i]; + if (cipher_info_defs->cipher == cipher) + return cipher_info_defs; + } + + return NULL; +} + +static int _pn_to_iv(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key, + u8 *iv, u64 pn, u8 key_idx) +{ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + iv[0] = u64_get_bits(pn, RTW89_KEY_PN_1); + iv[1] = (u64_get_bits(pn, RTW89_KEY_PN_1) | 0x20) & 0x7f; + iv[2] = u64_get_bits(pn, RTW89_KEY_PN_0); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP_256: + iv[0] = u64_get_bits(pn, RTW89_KEY_PN_0); + iv[1] = u64_get_bits(pn, RTW89_KEY_PN_1); + iv[2] = 0; + break; + default: + return -EINVAL; + } + + iv[3] = BIT(5) | ((key_idx & 0x3) << 6); + iv[4] = u64_get_bits(pn, RTW89_KEY_PN_2); + iv[5] = u64_get_bits(pn, RTW89_KEY_PN_3); + iv[6] = u64_get_bits(pn, RTW89_KEY_PN_4); + iv[7] = u64_get_bits(pn, RTW89_KEY_PN_5); + + return 0; +} + +static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + u8 *iv) +{ + struct ieee80211_key_seq seq; + int err; + u64 pn; + + ieee80211_get_key_rx_seq(key, 0, &seq); + + /* seq.ccmp.pn[] is BE order array */ + pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) | + u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) | + u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) | + u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) | + u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) | + u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0); + + err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx); + if (err) + return err; + + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n", + __func__, key->keyidx, pn, 8, iv); + + return 0; +} + +static int rtw89_tx_pn_to_iv(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + u8 *iv) +{ + int err; + u64 pn; + + pn = atomic64_inc_return(&key->tx_pn); + err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx); + if (err) + return err; + + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n", + __func__, key->keyidx, pn, 8, iv); + + return 0; +} + +static int _iv_to_pn(struct rtw89_dev *rtwdev, u8 *iv, u64 *pn, u8 *key_id, + struct ieee80211_key_conf *key) +{ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + *pn = u64_encode_bits(iv[2], RTW89_KEY_PN_0) | + u64_encode_bits(iv[0], RTW89_KEY_PN_1); + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP_256: + *pn = u64_encode_bits(iv[0], RTW89_KEY_PN_0) | + u64_encode_bits(iv[1], RTW89_KEY_PN_1); + break; + default: + return -EINVAL; + } + + *pn |= u64_encode_bits(iv[4], RTW89_KEY_PN_2) | + u64_encode_bits(iv[5], RTW89_KEY_PN_3) | + u64_encode_bits(iv[6], RTW89_KEY_PN_4) | + u64_encode_bits(iv[7], RTW89_KEY_PN_5); + + if (key_id) + *key_id = *(iv + 3) >> 6; + + return 0; +} + +static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + u8 *iv) +{ + struct ieee80211_key_seq seq; + int err; + u64 pn; + + err = _iv_to_pn(rtwdev, iv, &pn, NULL, key); + if (err) + return err; + + /* seq.ccmp.pn[] is BE order array */ + seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5); + seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4); + seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3); + seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2); + seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1); + seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0); + + ieee80211_set_key_rx_seq(key, 0, &seq); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n", + __func__, key->keyidx, 8, iv, 6, seq.ccmp.pn); + + return 0; +} + +static int rtw89_tx_iv_to_pn(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + u8 *iv) +{ + int err; + u64 pn; + + err = _iv_to_pn(rtwdev, iv, &pn, NULL, key); + if (err) + return err; + + atomic64_set(&key->tx_pn, pn); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%llx\n", + __func__, key->keyidx, 8, iv, pn); + + return 0; +} + +static int rtw89_rx_pn_get_pmf(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + struct rtw89_wow_gtk_info *gtk_info) +{ + struct ieee80211_key_seq seq; + u64 pn; + + if (key->keyidx == 4) + memcpy(gtk_info->igtk[0], key->key, key->keylen); + else if (key->keyidx == 5) + memcpy(gtk_info->igtk[1], key->key, key->keylen); + else + return -EINVAL; + + ieee80211_get_key_rx_seq(key, 0, &seq); + + /* seq.ccmp.pn[] is BE order array */ + pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) | + u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) | + u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) | + u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) | + u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) | + u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0); + gtk_info->ipn = cpu_to_le64(pn); + gtk_info->igtk_keyid = cpu_to_le32(key->keyidx); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx\n", + __func__, key->keyidx, pn); + + return 0; +} + +static int rtw89_rx_pn_set_pmf(struct rtw89_dev *rtwdev, + struct ieee80211_key_conf *key, + u64 pn) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + struct ieee80211_key_seq seq; + + if (key->keyidx != aoac_rpt->igtk_key_id) + return 0; + + /* seq.ccmp.pn[] is BE order array */ + seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5); + seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4); + seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3); + seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2); + seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1); + seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0); + + ieee80211_set_key_rx_seq(key, 0, &seq); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%*ph\n", + __func__, key->keyidx, 6, seq.ccmp.pn); + + return 0; +} + +static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_key_info *key_info = &rtw_wow->key_info; + struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; + const struct rtw89_cipher_info *cipher_info; + bool *err = data; + int ret; + + cipher_info = rtw89_cipher_alg_recognize(key->cipher); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP_256: + if (sta) { + ret = rtw89_tx_pn_to_iv(rtwdev, key, + key_info->ptk_tx_iv); + if (ret) + goto err; + ret = rtw89_rx_pn_to_iv(rtwdev, key, + key_info->ptk_rx_iv); + if (ret) + goto err; + + rtw_wow->ptk_alg = cipher_info->fw_alg; + rtw_wow->ptk_keyidx = key->keyidx; + } else { + ret = rtw89_rx_pn_to_iv(rtwdev, key, + key_info->gtk_rx_iv[key->keyidx]); + if (ret) + goto err; + + rtw_wow->gtk_alg = cipher_info->fw_alg; + key_info->gtk_keyidx = key->keyidx; + } + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + ret = rtw89_rx_pn_get_pmf(rtwdev, key, gtk_info); + if (ret) + goto err; + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* WEP only set group key in mac80211, but fw need to set + * both of pairwise key and group key. + */ + rtw_wow->ptk_alg = cipher_info->fw_alg; + rtw_wow->ptk_keyidx = key->keyidx; + rtw_wow->gtk_alg = cipher_info->fw_alg; + key_info->gtk_keyidx = key->keyidx; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n", + key->cipher); + goto err; + } + + return; +err: + *err = true; +} + +static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + struct rtw89_set_key_info_iter_data *iter_data = data; + bool update_tx_key_info = iter_data->rx_ready; + int ret; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP_256: + if (sta && !update_tx_key_info) { + ret = rtw89_rx_iv_to_pn(rtwdev, key, + aoac_rpt->ptk_rx_iv); + if (ret) + goto err; + } + + if (sta && update_tx_key_info) { + ret = rtw89_tx_iv_to_pn(rtwdev, key, + aoac_rpt->ptk_tx_iv); + if (ret) + goto err; + } + + if (!sta && !update_tx_key_info) { + ret = rtw89_rx_iv_to_pn(rtwdev, key, + aoac_rpt->gtk_rx_iv[key->keyidx]); + if (ret) + goto err; + } + + if (!sta && update_tx_key_info && aoac_rpt->rekey_ok) + iter_data->gtk_cipher = key->cipher; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + if (update_tx_key_info) { + if (aoac_rpt->rekey_ok) + iter_data->igtk_cipher = key->cipher; + } else { + ret = rtw89_rx_pn_set_pmf(rtwdev, key, + aoac_rpt->igtk_ipn); + if (ret) + goto err; + } + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n", + key->cipher); + goto err; + } + + return; + +err: + iter_data->error = true; +} + +static void rtw89_wow_key_clear(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + + memset(&rtw_wow->aoac_rpt, 0, sizeof(rtw_wow->aoac_rpt)); + memset(&rtw_wow->gtk_info, 0, sizeof(rtw_wow->gtk_info)); + memset(&rtw_wow->key_info, 0, sizeof(rtw_wow->key_info)); + rtw_wow->ptk_alg = 0; + rtw_wow->gtk_alg = 0; +} + +static void rtw89_wow_construct_key_info(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_key_info *key_info = &rtw_wow->key_info; + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + bool err = false; + + rcu_read_lock(); + ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif, + rtw89_wow_get_key_info_iter, &err); + rcu_read_unlock(); + + if (err) { + rtw89_wow_key_clear(rtwdev); + return; + } + + key_info->valid_check = RTW89_WOW_VALID_CHECK; + key_info->symbol_check_en = RTW89_WOW_SYMBOL_CHK_PTK | + RTW89_WOW_SYMBOL_CHK_GTK; +} + +static void rtw89_wow_debug_aoac_rpt(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + + if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_WOW)) + return; + + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rpt_ver = %d\n", + aoac_rpt->rpt_ver); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] sec_type = %d\n", + aoac_rpt->sec_type); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] key_idx = %d\n", + aoac_rpt->key_idx); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] pattern_idx = %d\n", + aoac_rpt->pattern_idx); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rekey_ok = %d\n", + aoac_rpt->rekey_ok); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_tx_iv = %*ph\n", + 8, aoac_rpt->ptk_tx_iv); + rtw89_debug(rtwdev, RTW89_DBG_WOW, + "[aoac_rpt] eapol_key_replay_count = %*ph\n", + 8, aoac_rpt->eapol_key_replay_count); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_rx_iv = %*ph\n", + 8, aoac_rpt->ptk_rx_iv); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[0] = %*ph\n", + 8, aoac_rpt->gtk_rx_iv[0]); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[1] = %*ph\n", + 8, aoac_rpt->gtk_rx_iv[1]); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[2] = %*ph\n", + 8, aoac_rpt->gtk_rx_iv[2]); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[3] = %*ph\n", + 8, aoac_rpt->gtk_rx_iv[3]); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_key_id = %llu\n", + aoac_rpt->igtk_key_id); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_ipn = %llu\n", + aoac_rpt->igtk_ipn); + rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk = %*ph\n", + 32, aoac_rpt->igtk); +} + +static int rtw89_wow_get_aoac_rpt_reg(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + struct rtw89_mac_c2h_info c2h_info = {}; + struct rtw89_mac_h2c_info h2c_info = {}; + u8 igtk_ipn[8]; + u8 key_idx; + int ret; + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1; + h2c_info.content_len = 2; + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); + if (ret) + return ret; + + aoac_rpt->key_idx = + u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX); + key_idx = aoac_rpt->key_idx; + aoac_rpt->gtk_rx_iv[key_idx][0] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_0); + aoac_rpt->gtk_rx_iv[key_idx][1] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_1); + aoac_rpt->gtk_rx_iv[key_idx][2] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_2); + aoac_rpt->gtk_rx_iv[key_idx][3] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_3); + aoac_rpt->gtk_rx_iv[key_idx][4] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_4); + aoac_rpt->gtk_rx_iv[key_idx][5] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_5); + aoac_rpt->gtk_rx_iv[key_idx][6] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_6); + aoac_rpt->gtk_rx_iv[key_idx][7] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_7); + aoac_rpt->ptk_rx_iv[0] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0); + aoac_rpt->ptk_rx_iv[1] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1); + aoac_rpt->ptk_rx_iv[2] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2); + aoac_rpt->ptk_rx_iv[3] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3); + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2; + h2c_info.content_len = 2; + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); + if (ret) + return ret; + + aoac_rpt->ptk_rx_iv[4] = + u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4); + aoac_rpt->ptk_rx_iv[5] = + u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5); + aoac_rpt->ptk_rx_iv[6] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6); + aoac_rpt->ptk_rx_iv[7] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7); + igtk_ipn[0] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0); + igtk_ipn[1] = + u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1); + igtk_ipn[2] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2); + igtk_ipn[3] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3); + igtk_ipn[4] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4); + igtk_ipn[5] = + u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5); + igtk_ipn[6] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6); + igtk_ipn[7] = + u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7); + aoac_rpt->igtk_ipn = u64_encode_bits(igtk_ipn[0], RTW89_IGTK_IPN_0) | + u64_encode_bits(igtk_ipn[1], RTW89_IGTK_IPN_1) | + u64_encode_bits(igtk_ipn[2], RTW89_IGTK_IPN_2) | + u64_encode_bits(igtk_ipn[3], RTW89_IGTK_IPN_3) | + u64_encode_bits(igtk_ipn[4], RTW89_IGTK_IPN_4) | + u64_encode_bits(igtk_ipn[5], RTW89_IGTK_IPN_5) | + u64_encode_bits(igtk_ipn[6], RTW89_IGTK_IPN_6) | + u64_encode_bits(igtk_ipn[7], RTW89_IGTK_IPN_7); + + return 0; +} + +static int rtw89_wow_get_aoac_rpt(struct rtw89_dev *rtwdev, bool rx_ready) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + int ret; + + if (!rtw_wow->ptk_alg) + return -EPERM; + + if (!rx_ready) { + ret = rtw89_wow_get_aoac_rpt_reg(rtwdev); + if (ret) { + rtw89_err(rtwdev, "wow: failed to get aoac rpt by reg\n"); + return ret; + } + } else { + ret = rtw89_fw_h2c_wow_request_aoac(rtwdev); + if (ret) { + rtw89_err(rtwdev, "wow: failed to get aoac rpt by pkt\n"); + return ret; + } + } + + rtw89_wow_debug_aoac_rpt(rtwdev); + + return 0; +} + +static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev, + u32 cipher, u8 keyidx, u8 *gtk) +{ + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + const struct rtw89_cipher_info *cipher_info; + struct ieee80211_key_conf *rekey_conf; + struct ieee80211_key_conf *key; + u8 sz; + + cipher_info = rtw89_cipher_alg_recognize(cipher); + sz = struct_size(rekey_conf, key, cipher_info->len); + rekey_conf = kmalloc(sz, GFP_KERNEL); + if (!rekey_conf) + return NULL; + + rekey_conf->cipher = cipher; + rekey_conf->keyidx = keyidx; + rekey_conf->keylen = cipher_info->len; + memcpy(rekey_conf->key, gtk, + flex_array_size(rekey_conf, key, cipher_info->len)); + + /* ieee80211_gtk_rekey_add() will call set_key(), therefore we + * need to unlock mutex + */ + mutex_unlock(&rtwdev->mutex); + key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1); + mutex_lock(&rtwdev->mutex); + + kfree(rekey_conf); + if (IS_ERR(key)) { + rtw89_err(rtwdev, "ieee80211_gtk_rekey_add failed\n"); + return NULL; + } + + return key; +} + +static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready) +{ + struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif; + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; + struct rtw89_set_key_info_iter_data data = {.error = false, + .rx_ready = rx_ready}; + struct ieee80211_key_conf *key; + + rcu_read_lock(); + ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif, + rtw89_wow_set_key_info_iter, &data); + rcu_read_unlock(); + + if (data.error) { + rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s error\n", __func__); + return; + } + + if (!data.gtk_cipher) + return; + + key = rtw89_wow_gtk_rekey(rtwdev, data.gtk_cipher, aoac_rpt->key_idx, + aoac_rpt->gtk); + if (!key) + return; + + rtw89_rx_iv_to_pn(rtwdev, key, + aoac_rpt->gtk_rx_iv[key->keyidx]); + + if (!data.igtk_cipher) + return; + + key = rtw89_wow_gtk_rekey(rtwdev, data.igtk_cipher, aoac_rpt->igtk_key_id, + aoac_rpt->igtk); + if (!key) + return; + + rtw89_rx_pn_set_pmf(rtwdev, key, aoac_rpt->igtk_ipn); + ieee80211_gtk_rekey_notify(wow_vif, wow_vif->bss_conf.bssid, + aoac_rpt->eapol_key_replay_count, + GFP_KERNEL); +} + static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev) { __rtw89_leave_ps_mode(rtwdev); @@ -59,6 +715,8 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable) static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev) { + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; u32 wow_reason_reg = rtwdev->chip->wow_reason_reg; struct cfg80211_wowlan_nd_info nd_info; struct cfg80211_wowlan_wakeup wakeup = { @@ -85,10 +743,7 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx gtk rekey\n"); break; case RTW89_WOW_RSN_RX_PATTERN_MATCH: - /* Current firmware and driver don't report pattern index - * Use pattern_idx to 0 defaultly. - */ - wakeup.pattern_idx = 0; + wakeup.pattern_idx = aoac_rpt->pattern_idx; rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx pattern match packet\n"); break; case RTW89_WOW_RSN_RX_NLO: @@ -454,17 +1109,21 @@ static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable) static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) { enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL; + enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct ieee80211_vif *wow_vif = rtw_wow->wow_vif; struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv; + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; const struct rtw89_chip_info *chip = rtwdev->chip; bool include_bb = !!chip->bbmcu_nr; + bool disable_intr_for_dlfw = false; struct ieee80211_sta *wow_sta; struct rtw89_sta *rtwsta = NULL; bool is_conn = true; int ret; - rtw89_hci_disable_intr(rtwdev); + if (chip_id == RTL8852C || chip_id == RTL8922A) + disable_intr_for_dlfw = true; wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid); if (wow_sta) @@ -472,12 +1131,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) else is_conn = false; + if (disable_intr_for_dlfw) + rtw89_hci_disable_intr(rtwdev); + ret = rtw89_fw_download(rtwdev, fw_type, include_bb); if (ret) { rtw89_warn(rtwdev, "download fw failed\n"); return ret; } + if (disable_intr_for_dlfw) + rtw89_hci_enable_intr(rtwdev); + rtw89_phy_init_rf_reg(rtwdev, true); ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, @@ -519,8 +1184,10 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow) rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif); } + if (chip_gen == RTW89_CHIP_BE) + rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5); + rtw89_mac_hw_mgnt_sec(rtwdev, wow); - rtw89_hci_enable_intr(rtwdev); return 0; } @@ -595,7 +1262,22 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev) rtw89_err(rtwdev, "failed to config mac\n"); return ret; } + + /* Before enabling interrupt, we need to get AOAC report by reg due to RX + * not enabled yet. Also, we need to sync RX related IV from firmware to + * mac80211 before receiving RX packets from driver. + * After enabling interrupt, we can get AOAC report from h2c and c2h, and + * can get TX IV and complete rekey info. We need to update TX related IV + * and new GTK info if rekey happened. + */ + ret = rtw89_wow_get_aoac_rpt(rtwdev, false); + if (!ret) + rtw89_wow_update_key_info(rtwdev, false); + rtw89_hci_enable_intr(rtwdev); + ret = rtw89_wow_get_aoac_rpt(rtwdev, true); + if (!ret) + rtw89_wow_update_key_info(rtwdev, true); return 0; } @@ -618,6 +1300,7 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev) int ret; rtw89_wow_pattern_write(rtwdev); + rtw89_wow_construct_key_info(rtwdev); ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true); if (ret) { @@ -631,6 +1314,16 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev) goto out; } + ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true); + if (ret) { + rtw89_err(rtwdev, "wow: failed to enable GTK offload\n"); + goto out; + } + + ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true); + if (ret) + rtw89_warn(rtwdev, "wow: failed to enable arp offload\n"); + ret = rtw89_wow_cfg_wake(rtwdev, true); if (ret) { rtw89_err(rtwdev, "wow: failed to config wake\n"); @@ -667,6 +1360,17 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev) goto out; } + ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false); + if (ret) { + rtw89_err(rtwdev, "wow: failed to disable GTK offload\n"); + goto out; + } + + ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false); + if (ret) + rtw89_warn(rtwdev, "wow: failed to disable arp offload\n"); + + rtw89_wow_key_clear(rtwdev); rtw89_fw_release_general_pkt_list(rtwdev, true); ret = rtw89_wow_cfg_wake(rtwdev, false); diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h index a2f7b2e3cdb4..e595aee0196d 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.h +++ b/drivers/net/wireless/realtek/rtw89/wow.h @@ -5,6 +5,26 @@ #ifndef __RTW89_WOW_H__ #define __RTW89_WOW_H__ +#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0) +#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8) +#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16) +#define RTW89_KEY_PN_3 GENMASK_ULL(31, 24) +#define RTW89_KEY_PN_4 GENMASK_ULL(39, 32) +#define RTW89_KEY_PN_5 GENMASK_ULL(47, 40) + +#define RTW89_IGTK_IPN_0 GENMASK_ULL(7, 0) +#define RTW89_IGTK_IPN_1 GENMASK_ULL(15, 8) +#define RTW89_IGTK_IPN_2 GENMASK_ULL(23, 16) +#define RTW89_IGTK_IPN_3 GENMASK_ULL(31, 24) +#define RTW89_IGTK_IPN_4 GENMASK_ULL(39, 32) +#define RTW89_IGTK_IPN_5 GENMASK_ULL(47, 40) +#define RTW89_IGTK_IPN_6 GENMASK_ULL(55, 48) +#define RTW89_IGTK_IPN_7 GENMASK_ULL(63, 56) + +#define RTW89_WOW_VALID_CHECK 0xDD +#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0) +#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1) + enum rtw89_wake_reason { RTW89_WOW_RSN_RX_PTK_REKEY = 0x1, RTW89_WOW_RSN_RX_GTK_REKEY = 0x2, @@ -15,7 +35,44 @@ enum rtw89_wake_reason { RTW89_WOW_RSN_RX_NLO = 0x55, }; +struct rtw89_cipher_suite { + u8 oui[3]; + u8 type; +} __packed; + +struct rtw89_rsn_ie { + u8 tag_number; + u8 tag_length; + __le16 rsn_version; + struct rtw89_cipher_suite group_cipher_suite; + __le16 pairwise_cipher_suite_cnt; + struct rtw89_cipher_suite pairwise_cipher_suite; + __le16 akm_cipher_suite_cnt; + struct rtw89_cipher_suite akm_cipher_suite; +} __packed; + +struct rtw89_cipher_info { + u32 cipher; + u8 fw_alg; + enum ieee80211_key_len len; +}; + +struct rtw89_set_key_info_iter_data { + u32 gtk_cipher; + u32 igtk_cipher; + bool rx_ready; + bool error; +}; + +#ifdef CONFIG_PM int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan); int rtw89_wow_resume(struct rtw89_dev *rtwdev); +void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb); +#else +static inline +void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb) +{ +} +#endif #endif diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 8e7b757475d2..1e578533e473 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1519,36 +1519,7 @@ static struct sdio_driver rsi_driver = { } #endif }; - -/** - * rsi_module_init() - This function registers the sdio module. - * @void: Void. - * - * Return: 0 on success. - */ -static int rsi_module_init(void) -{ - int ret; - - ret = sdio_register_driver(&rsi_driver); - rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__); - return ret; -} - -/** - * rsi_module_exit() - This function unregisters the sdio module. - * @void: Void. - * - * Return: None. - */ -static void rsi_module_exit(void) -{ - sdio_unregister_driver(&rsi_driver); - rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__); -} - -module_init(rsi_module_init); -module_exit(rsi_module_exit); +module_sdio_driver(rsi_driver); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("Common SDIO layer for RSI drivers"); diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h index e5874186f9d7..39159201b97e 100644 --- a/drivers/net/wireless/ti/wl1251/cmd.h +++ b/drivers/net/wireless/ti/wl1251/cmd.h @@ -89,8 +89,6 @@ enum wl1251_commands { struct wl1251_cmd_header { u16 id; u16 status; - /* payload */ - u8 data[]; } __packed; struct wl1251_command { diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c index 4e5b351f80f0..c705081249d6 100644 --- a/drivers/net/wireless/ti/wl1251/sdio.c +++ b/drivers/net/wireless/ti/wl1251/sdio.c @@ -323,25 +323,7 @@ static struct sdio_driver wl1251_sdio_driver = { .remove = wl1251_sdio_remove, .drv.pm = &wl1251_sdio_pm_ops, }; - -static int __init wl1251_sdio_init(void) -{ - int err; - - err = sdio_register_driver(&wl1251_sdio_driver); - if (err) - wl1251_error("failed to register sdio driver: %d", err); - return err; -} - -static void __exit wl1251_sdio_exit(void) -{ - sdio_unregister_driver(&wl1251_sdio_driver); - wl1251_notice("unloaded"); -} - -module_init(wl1251_sdio_init); -module_exit(wl1251_sdio_exit); +module_sdio_driver(wl1251_sdio_driver); MODULE_DESCRIPTION("TI WL1251 SDIO helpers"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h index 7e28fe435b43..3d5b0df5b231 100644 --- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h +++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h @@ -65,7 +65,6 @@ struct ieee80211_header { u8 sa[ETH_ALEN]; u8 bssid[ETH_ALEN]; __le16 seq_ctl; - u8 payload[]; } __packed; struct wl12xx_ie_header { diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index f2609d5b6bf7..4c2f2608ef3b 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -208,8 +208,6 @@ enum cmd_templ { struct wl1271_cmd_header { __le16 id; __le16 status; - /* payload */ - u8 data[]; } __packed; #define WL1271_CMD_MAX_PARAMS 572 diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index f0c7e09b314d..c07acfcbbd9c 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -19,11 +19,8 @@ static ssize_t bt_coex_state_show(struct device *dev, struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; - len = PAGE_SIZE; - mutex_lock(&wl->mutex); - len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", - wl->sg_enabled); + len = sysfs_emit(buf, "%d\n\n0 - off\n1 - on\n", wl->sg_enabled); mutex_unlock(&wl->mutex); return len; @@ -78,13 +75,11 @@ static ssize_t hw_pg_ver_show(struct device *dev, struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; - len = PAGE_SIZE; - mutex_lock(&wl->mutex); if (wl->hw_pg_ver >= 0) - len = snprintf(buf, len, "%d\n", wl->hw_pg_ver); + len = sysfs_emit(buf, "%d\n", wl->hw_pg_ver); else - len = snprintf(buf, len, "n/a\n"); + len = sysfs_emit(buf, "n/a\n"); mutex_unlock(&wl->mutex); return len; diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h index 1dd7ecc11f86..602915c4da26 100644 --- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h +++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h @@ -66,7 +66,6 @@ struct ieee80211_header { u8 sa[ETH_ALEN]; u8 bssid[ETH_ALEN]; __le16 seq_ctl; - u8 payload[]; } __packed; struct wl12xx_ie_header { diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index 59e1fc0018df..b5afaec61827 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -216,7 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { struct hwsim_vif_priv { u32 magic; - u32 skip_beacons; + u32 skip_beacons[IEEE80211_MLD_MAX_NUM_LINKS]; u8 bssid[ETH_ALEN]; bool assoc; bool bcn_en; @@ -1721,6 +1721,9 @@ static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data, sp->active_links_rx &= ~BIT(link_id); else sp->active_links_rx |= BIT(link_id); + + rx_status->link_valid = true; + rx_status->link_id = link_id; } rcu_read_unlock(); } @@ -2133,13 +2136,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, } #ifdef CONFIG_MAC80211_DEBUGFS -static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void +mac80211_hwsim_link_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; - debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir, - &vp->skip_beacons); + debugfs_create_u32("skip_beacons", 0600, dir, + &vp->skip_beacons[link_conf->link_id]); } #endif @@ -2214,8 +2220,8 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf, /* TODO: get MCS */ int bitrate = 100; - if (vp->skip_beacons) { - vp->skip_beacons--; + if (vp->skip_beacons[link_conf->link_id]) { + vp->skip_beacons[link_conf->link_id]--; dev_kfree_skb(skb); return; } @@ -2307,6 +2313,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id)) ieee80211_csa_finish(vif, link_id); + + if (link_conf->color_change_active && + ieee80211_beacon_cntdwn_is_complete(vif, link_id)) + ieee80211_color_change_finish(vif, link_id); } static enum hrtimer_restart @@ -3922,7 +3932,7 @@ out: #ifdef CONFIG_MAC80211_DEBUGFS #define HWSIM_DEBUGFS_OPS \ - .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs, + .link_add_debugfs = mac80211_hwsim_link_add_debugfs, #else #define HWSIM_DEBUGFS_OPS #endif @@ -4122,7 +4132,8 @@ out_err: static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = { { - .types_mask = BIT(NL80211_IFTYPE_STATION), + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT), .he_cap = { .has_he = true, .he_cap_elem = { @@ -4229,7 +4240,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = { }, }, { - .types_mask = BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), .he_cap = { .has_he = true, .he_cap_elem = { @@ -4380,8 +4392,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = { static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = { { - /* TODO: should we support other types, e.g., P2P? */ - .types_mask = BIT(NL80211_IFTYPE_STATION), + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT), .he_cap = { .has_he = true, .he_cap_elem = { @@ -4505,7 +4517,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = { }, }, { - .types_mask = BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), .he_cap = { .has_he = true, .he_cap_elem = { @@ -4676,8 +4689,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = { static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { { - /* TODO: should we support other types, e.g., P2P? */ - .types_mask = BIT(NL80211_IFTYPE_STATION), + .types_mask = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | @@ -4822,7 +4835,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { }, }, { - .types_mask = BIT(NL80211_IFTYPE_AP), + .types_mask = BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | @@ -5313,6 +5327,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BSS_COLOR); hw->wiphy->interface_modes = param->iftypes; @@ -6751,11 +6767,11 @@ static int __init init_mac80211_hwsim(void) param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD: - param.regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_03; break; case HWSIM_REGTEST_CUSTOM_WORLD_2: if (i == 0) - param.regd = &hwsim_world_regdom_custom_01; + param.regd = &hwsim_world_regdom_custom_03; else if (i == 1) param.regd = &hwsim_world_regdom_custom_02; break; diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index 2fe724d623c0..bef6819986e9 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -33,7 +33,8 @@ static int ipc_devlink_get_param(struct devlink *dl, u32 id, /* Set the param values for the specific param ID's */ static int ipc_devlink_set_param(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct iosm_devlink *ipc_devlink = devlink_priv(dl); diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 3f72ae943b29..f2aef84fc08d 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -648,7 +648,6 @@ static struct mhi_driver mhi_mbim_driver = { .id_table = mhi_mbim_id_table, .driver = { .name = "mhi_wwan_mbim", - .owner = THIS_MODULE, }, }; diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c index 3ef4a8a4f8fd..91fa082e9cab 100644 --- a/drivers/net/wwan/t7xx/t7xx_netdev.c +++ b/drivers/net/wwan/t7xx/t7xx_netdev.c @@ -253,22 +253,27 @@ static void t7xx_ccmni_wwan_setup(struct net_device *dev) dev->netdev_ops = &ccmni_netdev_ops; } -static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) +static int t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) { int i; /* one HW, but shared with multiple net devices, * so add a dummy device for NAPI. */ - init_dummy_netdev(&ctlb->dummy_dev); + ctlb->dummy_dev = alloc_netdev_dummy(0); + if (!ctlb->dummy_dev) + return -ENOMEM; + atomic_set(&ctlb->napi_usr_refcnt, 0); ctlb->is_napi_en = false; for (i = 0; i < RXQ_NUM; i++) { ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi; - netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll, + netif_napi_add_weight(ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll, NIC_NAPI_POLL_BUDGET); } + + return 0; } static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) @@ -279,6 +284,7 @@ static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) netif_napi_del(ctlb->napi[i]); ctlb->napi[i] = NULL; } + free_netdev(ctlb->dummy_dev); } static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, @@ -480,6 +486,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) { struct device *dev = &t7xx_dev->pdev->dev; struct t7xx_ccmni_ctrl *ctlb; + int ret; ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); if (!ctlb) @@ -495,7 +502,12 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) if (!ctlb->hif_ctrl) return -ENOMEM; - t7xx_init_netdev_napi(ctlb); + ret = t7xx_init_netdev_napi(ctlb); + if (ret) { + t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); + return ret; + } + init_md_status_notifier(t7xx_dev); return 0; } diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h index f5ed6f99a145..b18312f49844 100644 --- a/drivers/net/wwan/t7xx/t7xx_netdev.h +++ b/drivers/net/wwan/t7xx/t7xx_netdev.h @@ -48,7 +48,7 @@ struct t7xx_ccmni_ctrl { unsigned int md_sta; struct t7xx_fsm_notifier md_status_notify; bool wwan_is_registered; - struct net_device dummy_dev; + struct net_device *dummy_dev; struct napi_struct *napi[RXQ_NUM]; atomic_t napi_usr_refcnt; bool is_napi_en; diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 1fcbd83f7ff2..17421da139f2 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); -/* Callback from stack when TX packet can be released */ -void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf, - bool zerocopy_success); +/* Callbacks from stack when TX packet can be released */ +extern const struct ubuf_info_ops xenvif_ubuf_ops; static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) { diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7cff90aa8d24..325fcb3d1075 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -358,7 +358,7 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu) if (mtu > max) return -EINVAL; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); return 0; } @@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) for (i = 0; i < MAX_PENDING_REQS; i++) { queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) - { { .callback = xenvif_zerocopy_callback }, + { { .ops = &xenvif_ubuf_ops }, { { .ctx = NULL, .desc = i } } }; queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ef76850d9bcd..5836995d6774 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -38,6 +38,7 @@ #include <linux/if_vlan.h> #include <linux/udp.h> #include <linux/highmem.h> +#include <linux/skbuff_ref.h> #include <net/tcp.h> @@ -1156,7 +1157,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s uarg = skb_shinfo(skb)->destructor_arg; /* increase inflight counter to offset decrement in callback */ atomic_inc(&queue->inflight_packets); - uarg->callback(NULL, uarg, true); + uarg->ops->complete(NULL, uarg, true); skb_shinfo(skb)->destructor_arg = NULL; /* Fill the skb with the new (local) frags. */ @@ -1278,8 +1279,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) return work_done; } -void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, - bool zerocopy_success) +static void xenvif_zerocopy_callback(struct sk_buff *skb, + struct ubuf_info *ubuf_base, + bool zerocopy_success) { unsigned long flags; pending_ring_idx_t index; @@ -1312,6 +1314,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, xenvif_skb_zerocopy_complete(queue); } +const struct ubuf_info_ops xenvif_ubuf_ops = { + .complete = xenvif_zerocopy_callback, +}; + static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) { struct gnttab_unmap_grant_ref *gop; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8d2aee88526c..4265c1cd0ff7 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1376,7 +1376,7 @@ static int xennet_change_mtu(struct net_device *dev, int mtu) if (mtu > max) return -EINVAL; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); return 0; } |