aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cavium/thunder
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cavium/thunder')
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c33
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c213
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h30
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c28
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
9 files changed, 178 insertions, 198 deletions
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index d3950b20feb9..688828865c48 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -120,10 +120,9 @@
* Calculated for SCLK of 700Mhz
* value written should be a 1/16th of what is expected
*
- * 1 tick per 0.05usec = value of 2.2
- * This 10% would be covered in CQ timer thresh value
+ * 1 tick per 0.025usec
*/
-#define NICPF_CLK_PER_INT_TICK 2
+#define NICPF_CLK_PER_INT_TICK 1
/* Time to wait before we decide that a SQ is stuck.
*
@@ -266,6 +265,7 @@ struct nicvf {
u8 tns_mode:1;
u8 sqs_mode:1;
u8 loopback_supported:1;
+ bool hw_tso;
u16 mtu;
struct queue_set *qs;
#define MAX_SQS_PER_VF_SINGLE_NODE 5
@@ -490,6 +490,11 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
}
+static inline bool pass1_silicon(struct pci_dev *pdev)
+{
+ return pdev->revision < 8;
+}
+
int nicvf_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
int nicvf_open(struct net_device *netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index c561fdcb79a7..4dded90076c8 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -54,11 +54,6 @@ struct nicpf {
bool irq_allocated[NIC_PF_MSIX_VECTORS];
};
-static inline bool pass1_silicon(struct nicpf *nic)
-{
- return nic->pdev->revision < 8;
-}
-
/* Supported devices */
static const struct pci_device_id nic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
@@ -122,7 +117,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
* when PF writes to MBOX(1), in next revisions when
* PF writes to MBOX(0)
*/
- if (pass1_silicon(nic)) {
+ if (pass1_silicon(nic->pdev)) {
/* see the comment for nic_reg_write()/nic_reg_read()
* functions above
*/
@@ -397,7 +392,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
/* Leave RSS_SIZE as '0' to disable RSS */
- if (pass1_silicon(nic)) {
+ if (pass1_silicon(nic->pdev)) {
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
(vnic << 24) | (padd << 16) |
(rssi_base + rssi));
@@ -467,7 +462,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
}
cpi_base = nic->cpi_base[cfg->vf_id];
- if (pass1_silicon(nic))
+ if (pass1_silicon(nic->pdev))
idx_addr = NIC_PF_CPI_0_2047_CFG;
else
idx_addr = NIC_PF_MPI_0_2047_CFG;
@@ -615,6 +610,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
+{
+ int bgx, lmac;
+
+ nic->vf_enabled[vf] = enable;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -714,14 +724,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
- nic->vf_enabled[vf] = true;
+ nic_enable_vf(nic, vf, true);
goto unlock;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
- nic->vf_enabled[vf] = false;
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
+ nic_enable_vf(nic, vf, false);
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -1074,8 +1084,7 @@ static void nic_remove(struct pci_dev *pdev)
if (nic->check_link) {
/* Destroy work Queue */
- cancel_delayed_work(&nic->dwork);
- flush_workqueue(nic->check_link);
+ cancel_delayed_work_sync(&nic->dwork);
destroy_workqueue(nic->check_link);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index af54c10945c2..a12b2e38cf61 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
cmd->supported = 0;
cmd->transceiver = XCVR_EXTERNAL;
+
+ if (!nic->link_up) {
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ return 0;
+ }
+
if (nic->speed <= 1000) {
cmd->port = PORT_MII;
cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
return 0;
}
+static u32 nicvf_get_link(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->link_up;
+}
+
static void nicvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
static const struct ethtool_ops nicvf_ethtool_ops = {
.get_settings = nicvf_get_settings,
- .get_link = ethtool_op_get_link,
+ .get_link = nicvf_get_link,
.get_drvinfo = nicvf_get_drvinfo,
.get_msglevel = nicvf_get_msglevel,
.set_msglevel = nicvf_set_msglevel,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..c24cb2a86a42 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -525,14 +525,22 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
- nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one head SKB needs to be freed */
+ /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
+ } else {
+ /* In case of HW TSO, HW sends a CQE for each segment of a TSO
+ * packet instead of a single CQE for the whole TSO packet
+ * transmitted. Each of this CQE points to the same SQE, so
+ * avoid freeing same SQE multiple times.
+ */
+ if (!nic->hw_tso)
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
}
}
@@ -1057,6 +1065,7 @@ int nicvf_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_stop_all_queues(nic->netdev);
+ nic->link_up = false;
/* Teardown secondary qsets first */
if (!nic->sqs_mode) {
@@ -1211,9 +1220,6 @@ int nicvf_open(struct net_device *netdev)
nic->drv_stats.txq_stop = 0;
nic->drv_stats.txq_wake = 0;
- netif_carrier_on(netdev);
- netif_tx_start_all_queues(netdev);
-
return 0;
cleanup:
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
@@ -1551,6 +1557,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ if (!pass1_silicon(nic->pdev))
+ nic->hw_tso = true;
+
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
@@ -1583,8 +1592,14 @@ err_disable_device:
static void nicvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
- struct nicvf *nic = netdev_priv(netdev);
- struct net_device *pnetdev = nic->pnicvf->netdev;
+ struct nicvf *nic;
+ struct net_device *pnetdev;
+
+ if (!netdev)
+ return;
+
+ nic = netdev_priv(netdev);
+ pnetdev = nic->pnicvf->netdev;
/* Check if this Qset is assigned to different VF.
* If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index e404ea837727..d0d1b5490061 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -18,14 +18,6 @@
#include "q_struct.h"
#include "nicvf_queues.h"
-struct rbuf_info {
- struct page *page;
- void *data;
- u64 offset;
-};
-
-#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
-
/* Poll a register for a specific value */
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
u64 reg, int bit_pos, int bits, int val)
@@ -86,8 +78,6 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- u64 data;
- struct rbuf_info *rinfo;
int order = get_order(buf_len);
/* Check if request can be accomodated in previous allocated page */
@@ -113,46 +103,28 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page_offset = 0;
}
- data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
-
- /* Align buffer addr to cache line i.e 128 bytes */
- rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
- /* Save page address for reference updation */
- rinfo->page = nic->rb_page;
- /* Store start address for later retrieval */
- rinfo->data = (void *)data;
- /* Store alignment offset */
- rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+ *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
- data += rinfo->offset;
-
- /* Give next aligned address to hw for DMA */
- *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
return 0;
}
-/* Retrieve actual buffer start address and build skb for received packet */
+/* Build skb around receive buffer */
static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
u64 rb_ptr, int len)
{
+ void *data;
struct sk_buff *skb;
- struct rbuf_info *rinfo;
- rb_ptr = (u64)phys_to_virt(rb_ptr);
- /* Get buffer start address and alignment offset */
- rinfo = GET_RBUF_INFO(rb_ptr);
+ data = phys_to_virt(rb_ptr);
/* Now build an skb to give to stack */
- skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ skb = build_skb(data, RCV_FRAG_LEN);
if (!skb) {
- put_page(rinfo->page);
+ put_page(virt_to_page(data));
return NULL;
}
- /* Set correct skb->data */
- skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
-
- prefetch((void *)rb_ptr);
+ prefetch(skb->data);
return skb;
}
@@ -196,7 +168,6 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
int head, tail;
u64 buf_addr;
struct rbdr_entry_t *desc;
- struct rbuf_info *rinfo;
if (!rbdr)
return;
@@ -212,16 +183,14 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
- put_page(rinfo->page);
+ put_page(virt_to_page(phys_to_virt(buf_addr)));
head++;
head &= (rbdr->dmem.q_len - 1);
}
/* Free SKB of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
- put_page(rinfo->page);
+ put_page(virt_to_page(phys_to_virt(buf_addr)));
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -330,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic,
return err;
cq->desc = cq->dmem.base;
- cq->thresh = CMP_QUEUE_CQE_THRESH;
+ cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
return 0;
@@ -592,7 +561,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
- qidx, nic->cq_coalesce_usecs);
+ qidx, CMP_QUEUE_TIMER_THRESH);
}
/* Configures transmit queue */
@@ -956,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
- if (skb_shinfo(skb)->gso_size) {
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
subdesc_cnt = nicvf_tso_count_subdescs(skb);
return subdesc_cnt;
}
@@ -971,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
* First subdescriptor for every send descriptor.
*/
static inline void
-nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
int subdesc_cnt, struct sk_buff *skb, int len)
{
int proto;
@@ -1007,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
break;
}
}
+
+ if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ hdr->tso = 1;
+ hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+ /* For non-tunneled pkts, point this to L2 ethertype */
+ hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+ nic->drv_stats.tx_tso++;
+ }
}
/* SQ GATHER subdescriptor
@@ -1076,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
data_left -= size;
tso_build_data(skb, &tso, size);
}
- nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
seg_subdescs - 1, skb, seg_len);
sq->skbuff[hdr_qentry] = (u64)NULL;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1129,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
/* Check if its a TSO packet */
- if (skb_shinfo(skb)->gso_size)
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */
- nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+ skb, skb->len);
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1234,153 +1213,93 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
return skb;
}
-/* Enable interrupt */
-void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
{
u64 reg_val;
- reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
-
switch (int_type) {
case NICVF_INTR_CQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
break;
case NICVF_INTR_SQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
break;
case NICVF_INTR_RBDR:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
break;
case NICVF_INTR_PKT_DROP:
- reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
break;
case NICVF_INTR_TCP_TIMER:
- reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
break;
case NICVF_INTR_MBOX:
- reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
break;
case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
break;
default:
- netdev_err(nic->netdev,
- "Failed to enable interrupt: unknown type\n");
- break;
+ reg_val = 0;
}
- nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+ return reg_val;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ return;
+ }
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S,
+ nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
}
/* Disable interrupt */
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val = 0;
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
- switch (int_type) {
- case NICVF_INTR_CQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
- break;
- case NICVF_INTR_TCP_TIMER:
- reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
- break;
- case NICVF_INTR_MBOX:
- reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
- break;
- case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
- break;
- default:
- netdev_err(nic->netdev,
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to disable interrupt: unknown type\n");
- break;
+ return;
}
- nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
}
/* Clear interrupt */
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val = 0;
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
- switch (int_type) {
- case NICVF_INTR_CQ:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
- break;
- case NICVF_INTR_TCP_TIMER:
- reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
- break;
- case NICVF_INTR_MBOX:
- reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
- break;
- case NICVF_INTR_QS_ERR:
- reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
- break;
- default:
- netdev_err(nic->netdev,
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to clear interrupt: unknown type\n");
- break;
+ return;
}
- nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+ nicvf_reg_write(nic, NIC_VF_INT, mask);
}
/* Check if interrupt is enabled */
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
{
- u64 reg_val;
- u64 mask = 0xff;
-
- reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
-
- switch (int_type) {
- case NICVF_INTR_CQ:
- mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
- break;
- case NICVF_INTR_SQ:
- mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
- break;
- case NICVF_INTR_RBDR:
- mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
- break;
- case NICVF_INTR_PKT_DROP:
- mask = NICVF_INTR_PKT_DROP_MASK;
- break;
- case NICVF_INTR_TCP_TIMER:
- mask = NICVF_INTR_TCP_TIMER_MASK;
- break;
- case NICVF_INTR_MBOX:
- mask = NICVF_INTR_MBOX_MASK;
- break;
- case NICVF_INTR_QS_ERR:
- mask = NICVF_INTR_QS_ERR_MASK;
- break;
- default:
- netdev_err(nic->netdev,
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+ /* If interrupt type is unknown, we treat it disabled. */
+ if (!mask) {
+ netdev_dbg(nic->netdev,
"Failed to check interrupt enable: unknown type\n");
- break;
+ return 0;
}
- return (reg_val & mask);
+ return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
}
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index fb4957d09914..c5030a7f213a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -75,18 +75,16 @@
*/
#define CMP_QSIZE CMP_QUEUE_SIZE2
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
-#define CMP_QUEUE_CQE_THRESH 0
-#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
+#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
-#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
- (NICVF_RCV_BUF_ALIGN_BYTES * 2))
-#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
@@ -108,10 +106,6 @@
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
-#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
- (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
-#define NICVF_RCV_BUF_ALIGN_LEN(X)\
- (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
/* Queue enable/disable */
#define NICVF_SQ_EN BIT_ULL(19)
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
index 3c1de97b1add..9e6d9876bfd0 100644
--- a/drivers/net/ethernet/cavium/thunder/q_struct.h
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -545,25 +545,28 @@ struct sq_hdr_subdesc {
u64 subdesc_cnt:8;
u64 csum_l4:2;
u64 csum_l3:1;
- u64 rsvd0:5;
+ u64 csum_inner_l4:2;
+ u64 csum_inner_l3:1;
+ u64 rsvd0:2;
u64 l4_offset:8;
u64 l3_offset:8;
u64 rsvd1:4;
u64 tot_len:20; /* W0 */
- u64 tso_sdc_cont:8;
- u64 tso_sdc_first:8;
- u64 tso_l4_offset:8;
- u64 tso_flags_last:12;
- u64 tso_flags_first:12;
- u64 rsvd2:2;
+ u64 rsvd2:24;
+ u64 inner_l4_offset:8;
+ u64 inner_l3_offset:8;
+ u64 tso_start:8;
+ u64 rsvd3:2;
u64 tso_max_paysize:14; /* W1 */
#elif defined(__LITTLE_ENDIAN_BITFIELD)
u64 tot_len:20;
u64 rsvd1:4;
u64 l3_offset:8;
u64 l4_offset:8;
- u64 rsvd0:5;
+ u64 rsvd0:2;
+ u64 csum_inner_l3:1;
+ u64 csum_inner_l4:2;
u64 csum_l3:1;
u64 csum_l4:2;
u64 subdesc_cnt:8;
@@ -574,12 +577,11 @@ struct sq_hdr_subdesc {
u64 subdesc_type:4; /* W0 */
u64 tso_max_paysize:14;
- u64 rsvd2:2;
- u64 tso_flags_first:12;
- u64 tso_flags_last:12;
- u64 tso_l4_offset:8;
- u64 tso_sdc_first:8;
- u64 tso_sdc_cont:8; /* W1 */
+ u64 rsvd3:2;
+ u64 tso_start:8;
+ u64 inner_l3_offset:8;
+ u64 inner_l4_offset:8;
+ u64 rsvd2:24; /* W1 */
#endif
};
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 180aa9fabf48..9df26c2263bc 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
}
EXPORT_SYMBOL(bgx_set_lmac_mac);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ u64 cfg;
+
+ if (!bgx)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ if (enable)
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+ else
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+}
+EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+
static void bgx_sgmii_change_link_state(struct lmac *lmac)
{
struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
lmac->last_duplex = 1;
} else {
lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
}
if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
/* Enable lmac */
- bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
- CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
/* Restore default cfg, incase low level firmware changed it */
bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
/* Destroy work queue */
- cancel_delayed_work(&lmac->dwork);
- flush_workqueue(lmac->check_link);
+ cancel_delayed_work_sync(&lmac->dwork);
destroy_workqueue(lmac->check_link);
}
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bgx *bgx = NULL;
u8 lmac;
+ /* Load octeon mdio driver */
+ octeon_mdiobus_force_mod_depencency();
+
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 07b7ec66c60d..149e179363a1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -182,6 +182,8 @@ enum MCAST_MODE {
#define BCAST_ACCEPT 1
#define CAM_ACCEPT 1
+void octeon_mdiobus_force_mod_depencency(void);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
unsigned bgx_get_map(int node);
int bgx_get_lmac_count(int node, int bgx);