diff options
Diffstat (limited to 'drivers/net/ethernet/cavium')
20 files changed, 240 insertions, 205 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index b00c3002360e..50384cede8be 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - max_rx = CFG_GET_OQ_MAX_Q(conf23); - max_tx = CFG_GET_IQ_MAX_Q(conf23); - rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx); - tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx); + max_rx = oct->sriov_info.num_pf_rings; + max_tx = oct->sriov_info.num_pf_rings; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; + } else if (OCTEON_CN23XX_VF(oct)) { + max_tx = oct->sriov_info.rings_per_vf; + max_rx = oct->sriov_info.rings_per_vf; + rx_count = lio->linfo.num_rxpciq; + tx_count = lio->linfo.num_txpciq; } channel->max_rx = max_rx; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 39a9665c9d00..be9c0e3f5ade 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include <linux/module.h> #include <linux/pci.h> #include <linux/firmware.h> #include <net/vxlan.h> @@ -2223,25 +2224,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - u32 qindex = 0; - struct lio *lio; - - lio = GET_LIO(dev); - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -2263,6 +2245,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct skb_shared_hwtstamps *shhwtstamps; u64 ns; u16 vtag = 0; + u32 r_dh_off; struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); @@ -2308,6 +2291,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + if (((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX)) && ptp_enable) { @@ -2320,16 +2305,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), /* Nanoseconds are in the first 64-bits * of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); } - skb_pull(skb, sizeof(ns)); } } + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && (((rh->r_dh.encap_on) && @@ -2365,7 +2361,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -2441,7 +2436,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -2451,8 +2446,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2629,7 +2628,9 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - oct_ptp_open(netdev); + if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && + ptp_enable) + oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -2677,13 +2678,7 @@ static int liquidio_stop(struct net_device *netdev) lio->linfo.link.s.link_up = 0; lio->link_changes++; - /* Pause for a moment and wait for Octeon to flush out (to the wire) any - * egress packets that are in-flight. - */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); - - /* Now it should be safe to tell Octeon that nic interface is down. */ + /* Tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); if (OCTEON_CN23XX_PF(oct)) { @@ -2973,9 +2968,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) */ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + struct lio *lio = GET_LIO(netdev); + switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr); + if ((lio->oct_dev->chip_id == OCTEON_CN66XX || + lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -3322,11 +3321,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -3741,7 +3740,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_link_state = liquidio_set_vf_link_state, - .ndo_select_queue = select_q }; /** \brief Entry point for the liquidio module diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 70d96c10c673..9d5e03502c76 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include <linux/module.h> #include <linux/pci.h> #include <net/vxlan.h> #include "liquidio_common.h" @@ -1455,26 +1456,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Select queue based on hash - * @param dev Net device - * @param skb sk_buff structure - * @returns selected queue number - */ -static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv __attribute__((unused)), - select_queue_fallback_t fallback __attribute__((unused))) -{ - struct lio *lio; - u32 qindex; - - lio = GET_LIO(dev); - - qindex = skb_tx_hash(dev, skb); - - return (u16)(qindex % (lio->linfo.num_txpciq)); -} - /** Routine to push packets arriving on Octeon interface upto network layer. * @param oct_id - octeon device id. * @param skbuff - skbuff struct to be passed to network layer. @@ -1497,6 +1478,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), struct net_device *netdev = (struct net_device *)arg; struct sk_buff *skb = (struct sk_buff *)skbuff; u16 vtag = 0; + u32 r_dh_off; if (netdev) { struct lio *lio = GET_LIO(netdev); @@ -1540,7 +1522,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), put_page(pg_info->page); } - skb_pull(skb, rh->r_dh.len * 8); + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hwtstamp) + r_dh_off -= BYTES_PER_DHLEN_UNIT; + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); skb->protocol = eth_type_trans(skb, skb->dev); if ((netdev->features & NETIF_F_RXCSUM) && @@ -1577,7 +1572,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (packet_was_received) { droq->stats.rx_bytes_received += len; droq->stats.rx_pkts_received++; - netdev->last_rx = jiffies; } else { droq->stats.rx_dropped++; netif_info(lio, rx_err, lio->netdev, @@ -1627,7 +1621,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) iq = oct->instr_queue[iq_no]; if (iq) { /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, 1, budget); + tx_done = octeon_flush_iq(oct, iq, budget); /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ @@ -1637,8 +1631,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) __func__, iq_no); } - if ((work_done < budget) && (tx_done)) { - napi_complete(napi); + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; @@ -2440,11 +2438,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - if (skb_shinfo(skb)->gso_size) - stats->tx_done += skb_shinfo(skb)->gso_segs; + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; else stats->tx_done++; - stats->tx_tot_bytes += skb->len; + stats->tx_tot_bytes += ndata.datasize; return NETDEV_TX_OK; @@ -2703,7 +2701,6 @@ static const struct net_device_ops lionetdevops = { .ndo_set_features = liquidio_set_features, .ndo_udp_tunnel_add = liquidio_add_vxlan_port, .ndo_udp_tunnel_del = liquidio_del_vxlan_port, - .ndo_select_queue = select_q, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index ba329f6ca779..294c6f3c6b48 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -98,6 +98,9 @@ enum octeon_tag_type { #define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) #define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) +#define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U + static inline u32 incr_index(u32 index, u32 count, u32 max) { if ((index + count) >= max) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 1cb3514fc949..b3dc2e9651a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -429,15 +429,11 @@ struct octeon_config { /* The following config values are fixed and should not be modified. */ -/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */ -#define MAX_BAR1_MAP_INDEX 2 +#define BAR1_INDEX_DYNAMIC_MAP 2 +#define BAR1_INDEX_STATIC_MAP 15 #define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) -/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access. - * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access. - */ -#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \ - OCTEON_BAR1_ENTRY_SIZE) +#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking * NoResponse Lists are now maintained with each IQ. (Dec' 2007). diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 3265e0b7923e..53f38d05f7c2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -18,6 +18,7 @@ /** * @file octeon_console.c */ +#include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/crc32.h> @@ -549,6 +550,16 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } + /* Dedicate one of Octeon's BAR1 index registers to create a static + * mapping to a region of Octeon DRAM that contains the PCI console + * named block. + */ + oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP; + oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, + true); + oct->console_nb_info.dram_region_base = addr + & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL); + /* num_consoles > 0, is an indication that the consoles * are accessible */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index a8df493a5012..9675ffbf25e6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&droq->lock); writel(droq->pkt_count, droq->pkts_sent_reg); droq->pkt_count = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } @@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) spin_lock_bh(&iq->lock); writel(iq->pkt_in_done, iq->inst_cnt_reg); iq->pkt_in_done = 0; + /* this write needs to be flushed before we release the lock */ + mmiowb(); spin_unlock_bh(&iq->lock); oct = iq->oct_dev; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 18f6836250a6..c301a3852482 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -477,6 +477,12 @@ struct octeon_device { /* Console caches */ struct octeon_console console[MAX_OCTEON_MAPS]; + /* Console named block info */ + struct { + u64 dram_region_base; + int bar1_index; + } console_nb_info; + /* Coprocessor clock rate. */ u64 coproc_clock_rate; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index e04ca8f0b4a7..4608a5af35a3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx, void *app_ctx); int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget); + u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 73696b427f06..201b9875f9bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct, { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); @@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { - schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; @@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct, count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index fe60a3e6247b..c9376fe075bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -31,8 +31,8 @@ #define OCTEON_PFVFSIG 0x1122334455667788 #define OCTEON_PFVFERR 0xDEADDEADDEADDEAD -#define LIO_MBOX_WRITE_WAIT_CNT 1000 -#define LIO_MBOX_WRITE_WAIT_TIME 10 +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) enum octeon_mbox_cmd_status { OCTEON_MBOX_STATUS_SUCCESS = 0, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 13a18c9a7a51..5cd96e7d426c 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -23,7 +23,7 @@ #include "response_manager.h" #include "octeon_device.h" -#define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP #ifdef __BIG_ENDIAN_BITFIELD static inline void @@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, u32 copy_len = 0, index_reg_val = 0; unsigned long flags; u8 __iomem *mapped_addr; + u64 static_mapping_base; + + static_mapping_base = oct->console_nb_info.dram_region_base; + + if (static_mapping_base && + static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) { + int bar1_index = oct->console_nb_info.bar1_index; + + mapped_addr = oct->mmio[1].hw_addr + + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE)) + + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL)); + + if (op) + octeon_pci_fastread(oct, mapped_addr, hostbuf, len); + else + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len); + + return; + } spin_lock_irqsave(&oct->mem_access_lock, flags); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index c3d6a8228362..0243be8dd56f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, /* Add in the response related fields. Opcode and Param are already * there. */ - if (OCTEON_CN23XX_PF(oct)) { + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; @@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, *sc->status_word = COMPLETION_WORD_INIT; - if (OCTEON_CN23XX_PF(oct)) + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) sc->cmd.cmd3.rptr = sc->dmarptr; else sc->cmd.cmd2.rptr = sc->dmarptr; diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 3ce66759e80a..707bc15adec6 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct, /* Can only be called from process context */ int octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh, u32 napi_budget) + u32 napi_budget) { u32 inst_processed = 0; u32 tot_inst_processed = 0; @@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); - if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - do { - /* Process any outstanding IQ packets. */ - if (iq->flush_index == iq->octeon_read_index) - break; - - if (napi_budget) - inst_processed = lio_process_iq_request_list - (oct, iq, - napi_budget - tot_inst_processed); - else - inst_processed = - lio_process_iq_request_list(oct, iq, 0); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } + if (napi_budget) + inst_processed = + lio_process_iq_request_list(oct, iq, + napi_budget - + tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } - tot_inst_processed += inst_processed; - inst_processed = 0; + tot_inst_processed += inst_processed; + inst_processed = 0; - } while (tot_inst_processed < napi_budget); + } while (tot_inst_processed < napi_budget); - if (napi_budget && (tot_inst_processed >= napi_budget)) - tx_done = 0; - } + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; iq->last_db_time = jiffies; @@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) iq->last_db_time = jiffies; /* Flush the instruction queue */ - octeon_flush_iq(oct, iq, 1, 0); + octeon_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 21f80f5744ba..a2138686c605 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* We stopped because no more packets were available. */ - napi_complete(napi); + napi_complete_done(napi, work_done); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2e74bbaa38e1..02a986cdbb39 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; - ring->rx_max_pending = MAX_RCV_BUF_COUNT; - ring->rx_pending = qs->rbdr_len; + ring->rx_max_pending = MAX_CMP_QUEUE_LEN; + ring->rx_pending = qs->cq_len; ring->tx_max_pending = MAX_SND_QUEUE_LEN; ring->tx_pending = qs->sq_len; } +static int nicvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + u32 rx_count, tx_count; + + /* Due to HW errata this is not supported on T88 pass 1.x silicon */ + if (pass1_silicon(nic->pdev)) + return -EINVAL; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + tx_count = clamp_t(u32, ring->tx_pending, + MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); + rx_count = clamp_t(u32, ring->rx_pending, + MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); + + if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) + return 0; + + /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ + qs->sq_len = rounddown_pow_of_two(tx_count); + qs->cq_len = rounddown_pow_of_two(rx_count); + + if (netif_running(netdev)) { + nicvf_stop(netdev); + nicvf_open(netdev); + } + + return 0; +} + static int nicvf_get_rss_hash_opts(struct nicvf *nic, struct ethtool_rxnfc *info) { @@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, } static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *hkey, u8 hfunc) + const u8 *hkey, const u8 hfunc) { struct nicvf *nic = netdev_priv(dev); struct nicvf_rss_info *rss = &nic->rss_info; @@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = { .get_regs = nicvf_get_regs, .get_coalesce = nicvf_get_coalesce, .get_ringparam = nicvf_get_ringparam, + .set_ringparam = nicvf_set_ringparam, .get_rxnfc = nicvf_get_rxnfc, .set_rxnfc = nicvf_set_rxnfc, .get_rxfh_key_size = nicvf_get_rxfh_key_size, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2006f58b14b1..6feaa24bcfd4 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { /* Slow packet rate, exit polling */ - napi_complete(napi); + napi_complete_done(napi, work_done); /* Re-enable interrupts */ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq->cq_idx); @@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev) /* Configure receive side scaling and MTU */ if (!nic->sqs_mode) { nicvf_rss_init(nic); - if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + err = nicvf_update_hw_max_frs(nic, netdev->mtu); + if (err) goto cleanup; /* Clear percpu stats */ @@ -1461,8 +1462,8 @@ void nicvf_update_stats(struct nicvf *nic) nicvf_update_sq_stats(nic, qidx); } -static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) +static void nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; @@ -1478,7 +1479,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, stats->tx_packets = hw_stats->tx_frames; stats->tx_dropped = hw_stats->tx_drops; - return stats; } static void nicvf_tx_timeout(struct net_device *dev) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d2ac133e36f1..ac0390be3b12 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; - cq_cfg.qsize = CMP_QSIZE; + cq_cfg.qsize = ilog2(qs->cq_len >> 10); cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); @@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; - sq_cfg.qsize = SND_QSIZE; + sq_cfg.qsize = ilog2(qs->sq_len >> 10); sq_cfg.tstmp_bgx_intf = 0; - sq_cfg.cq_limit = 0; + /* CQ's level at which HW will stop processing SQEs to avoid + * transmitting a pkt with no space in CQ to post CQE_TX. + */ + sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); /* Set threshold value for interrupt generation */ @@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) { bool disable = false; struct queue_set *qs = nic->qs; + struct queue_set *pqs = nic->pnicvf->qs; int qidx; if (!qs) return 0; + /* Take primary VF's queue lengths. + * This is needed to take queue lengths set from ethtool + * into consideration. + */ + if (nic->sqs_mode && pqs) { + qs->cq_len = pqs->cq_len; + qs->sq_len = pqs->sq_len; + } + if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 9e2104675bc9..5cb84da99a2d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -59,8 +59,9 @@ /* Default queue count per QS, its lengths and threshold values */ #define DEFAULT_RBDR_CNT 1 -#define SND_QSIZE SND_QUEUE_SIZE2 +#define SND_QSIZE SND_QUEUE_SIZE0 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL #define MIN_SQ_DESC_PER_PKT_XMIT 2 @@ -70,11 +71,18 @@ /* Keep CQ and SQ sizes same, if timestamping * is enabled this equation will change. */ -#define CMP_QSIZE CMP_QUEUE_SIZE2 +#define CMP_QSIZE CMP_QUEUE_SIZE0 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10)) +#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10)) #define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2) #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ +/* No of CQEs that might anyway gets used by HW due to pipelining + * effects irrespective of PASS/DROP/LEVELS being configured + */ +#define CMP_QUEUE_PIPELINE_RSVD 544 + #define RBDR_SIZE RBDR_SIZE0 #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) #define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) @@ -93,8 +101,8 @@ * RED accepts pkt if unused CQE < 2304 & >= 2560 * DROPs pkts if unused CQE < 2304 */ -#define RQ_PASS_CQ_LVL 160ULL -#define RQ_DROP_CQ_LVL 144ULL +#define RQ_PASS_CQ_LVL 192ULL +#define RQ_DROP_CQ_LVL 184ULL /* RED and Backpressure levels of RBDR for pkt reception * For RBDR, level is a measure of fullness i.e 0x0 means empty diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 1e4695270da6..4c8e8cf730bb 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -978,17 +978,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) struct device *dev = &bgx->pdev->dev; struct lmac *lmac; char str[20]; - u8 dlm; - if (lmacid > bgx->max_lmac) + if (!bgx->is_dlm && lmacid) return; lmac = &bgx->lmac[lmacid]; - dlm = (lmacid / 2) + (bgx->bgx_id * 2); if (!bgx->is_dlm) sprintf(str, "BGX%d QLM mode", bgx->bgx_id); else - sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm); + sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid); switch (lmac->lmac_type) { case BGX_MODE_SGMII: @@ -1074,7 +1072,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid) static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) { struct lmac *lmac; - struct lmac *olmac; u64 cmr_cfg; u8 lmac_type; u8 lane_to_sds; @@ -1094,62 +1091,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx) return; } - /* On 81xx BGX can be split across 2 DLMs - * firmware programs lmac_type of LMAC0 and LMAC2 + /* For DLMs or SLMs on 80/81/83xx so many lane configurations + * are possible and vary across boards. Also Kernel doesn't have + * any way to identify board type/info and since firmware does, + * just take lmac type and serdes lane config as is. */ - if ((idx == 0) || (idx == 2)) { - cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - /* Check if config is not reset value */ - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) - lmac->lmac_type = BGX_MODE_INVALID; - else - lmac->lmac_type = lmac_type; - lmac_set_training(bgx, lmac, lmac->lmacid); - lmac_set_lane2sds(bgx, lmac); - - olmac = &bgx->lmac[idx + 1]; - /* Check if other LMAC on the same DLM is already configured by - * firmware, if so use the same config or else set as same, as - * that of LMAC 0/2. - * This check is needed as on 80xx only one lane of each of the - * DLM of BGX0 is used, so have to rely on firmware for - * distingushing 80xx from 81xx. - */ - cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG); - lmac_type = (u8)((cmr_cfg >> 8) & 0x07); - lane_to_sds = (u8)(cmr_cfg & 0xFF); - if ((lmac_type == 0) && (lane_to_sds == 0xE4)) { - olmac->lmac_type = lmac->lmac_type; - lmac_set_lane2sds(bgx, olmac); - } else { - olmac->lmac_type = lmac_type; - olmac->lane_to_sds = lane_to_sds; - } - lmac_set_training(bgx, olmac, olmac->lmacid); - } -} - -static bool is_dlm0_in_bgx_mode(struct bgx *bgx) -{ - struct lmac *lmac; - - if (!bgx->is_dlm) - return true; - - lmac = &bgx->lmac[0]; - if (lmac->lmac_type == BGX_MODE_INVALID) - return false; - - return true; + cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG); + lmac_type = (u8)((cmr_cfg >> 8) & 0x07); + lane_to_sds = (u8)(cmr_cfg & 0xFF); + /* Check if config is reset value */ + if ((lmac_type == 0) && (lane_to_sds == 0xE4)) + lmac->lmac_type = BGX_MODE_INVALID; + else + lmac->lmac_type = lmac_type; + lmac->lane_to_sds = lane_to_sds; + lmac_set_training(bgx, lmac, lmac->lmacid); } static void bgx_get_qlm_mode(struct bgx *bgx) { struct lmac *lmac; - struct lmac *lmac01; - struct lmac *lmac23; u8 idx; /* Init all LMAC's type to invalid */ @@ -1165,29 +1126,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx) if (bgx->lmac_count > bgx->max_lmac) bgx->lmac_count = bgx->max_lmac; - for (idx = 0; idx < bgx->max_lmac; idx++) - bgx_set_lmac_config(bgx, idx); - - if (!bgx->is_dlm || bgx->is_rgx) { - bgx_print_qlm_mode(bgx, 0); - return; - } - - if (bgx->lmac_count) { - bgx_print_qlm_mode(bgx, 0); - bgx_print_qlm_mode(bgx, 2); - } - - /* If DLM0 is not in BGX mode then LMAC0/1 have - * to be configured with serdes lanes of DLM1 - */ - if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2)) - return; for (idx = 0; idx < bgx->lmac_count; idx++) { - lmac01 = &bgx->lmac[idx]; - lmac23 = &bgx->lmac[idx + 2]; - lmac01->lmac_type = lmac23->lmac_type; - lmac01->lane_to_sds = lmac23->lane_to_sds; + bgx_set_lmac_config(bgx, idx); + bgx_print_qlm_mode(bgx, idx); } } |