aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cisco/enic/enic_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c222
1 files changed, 107 insertions, 115 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index cd5fe4f6b54c..fb269d587b74 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -176,50 +176,18 @@ static void enic_unset_affinity_hint(struct enic *enic)
irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
}
-static void enic_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int enic_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
{
struct enic *enic = netdev_priv(netdev);
- __be16 port = ti->port;
int err;
spin_lock_bh(&enic->devcmd_lock);
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN) {
- netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported");
- goto error;
- }
-
- switch (ti->sa_family) {
- case AF_INET6:
- if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
- netdev_info(netdev, "vxlan: only IPv4 offload supported");
- goto error;
- }
- /* Fall through */
- case AF_INET:
- break;
- default:
- goto error;
- }
-
- if (enic->vxlan.vxlan_udp_port_number) {
- if (ntohs(port) == enic->vxlan.vxlan_udp_port_number)
- netdev_warn(netdev, "vxlan: udp port already offloaded");
- else
- netdev_info(netdev, "vxlan: offload supported for only one UDP port");
-
- goto error;
- }
- if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
- !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
- netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
- goto error;
- }
-
err = vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE,
- ntohs(port));
+ ntohs(ti->port));
if (err)
goto error;
@@ -228,52 +196,50 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
if (err)
goto error;
- enic->vxlan.vxlan_udp_port_number = ntohs(port);
-
- netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ",
- (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family);
-
- goto unlock;
-
+ enic->vxlan.vxlan_udp_port_number = ntohs(ti->port);
error:
- netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d",
- ntohs(port), ti->sa_family, ti->type);
-unlock:
spin_unlock_bh(&enic->devcmd_lock);
+
+ return err;
}
-static void enic_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int enic_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
{
struct enic *enic = netdev_priv(netdev);
int err;
spin_lock_bh(&enic->devcmd_lock);
- if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) ||
- ti->type != UDP_TUNNEL_TYPE_VXLAN) {
- netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
- ntohs(ti->port), ti->sa_family, ti->type);
- goto unlock;
- }
-
err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
OVERLAY_OFFLOAD_DISABLE);
- if (err) {
- netdev_err(netdev, "vxlan: del offload udp port: %d failed",
- ntohs(ti->port));
+ if (err)
goto unlock;
- }
enic->vxlan.vxlan_udp_port_number = 0;
- netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n",
- ntohs(ti->port), ti->sa_family);
-
unlock:
spin_unlock_bh(&enic->devcmd_lock);
+
+ return err;
}
+static const struct udp_tunnel_nic_info enic_udp_tunnels = {
+ .set_port = enic_udp_tunnel_set_port,
+ .unset_port = enic_udp_tunnel_unset_port,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+}, enic_udp_tunnels_v4 = {
+ .set_port = enic_udp_tunnel_set_port,
+ .unset_port = enic_udp_tunnel_unset_port,
+ .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
static netdev_features_t enic_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
@@ -306,7 +272,7 @@ static netdev_features_t enic_features_check(struct sk_buff *skb,
case ntohs(ETH_P_IPV6):
if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
goto out;
- /* Fall through */
+ fallthrough;
case ntohs(ETH_P_IP):
break;
default:
@@ -360,11 +326,11 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct enic *enic = vnic_dev_priv(wq->vdev);
if (buf->sop)
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
if (buf->os_buf)
dev_kfree_skb_any(buf->os_buf);
@@ -608,8 +574,8 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -639,8 +605,8 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -727,8 +693,9 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
*/
while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev,
+ skb->data + offset, len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
@@ -786,8 +753,8 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -1256,8 +1223,8 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
if (!buf->os_buf)
return;
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(buf->os_buf);
buf->os_buf = NULL;
}
@@ -1282,8 +1249,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb)
return -ENOMEM;
- dma_addr = pci_map_single(enic->pdev, skb->data, len,
- PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
+ DMA_FROM_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
dev_kfree_skb(skb);
return -ENOMEM;
@@ -1315,8 +1282,8 @@ static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
new_skb = netdev_alloc_skb_ip_align(netdev, len);
if (!new_skb)
return false;
- pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
+ DMA_FROM_DEVICE);
memcpy(new_skb->data, (*skb)->data, len);
*skb = new_skb;
@@ -1365,8 +1332,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
@@ -1380,8 +1347,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
buf->os_buf = NULL;
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
+ buf->len, DMA_FROM_DEVICE);
}
prefetch(skb->data - NET_IP_ALIGN);
@@ -1454,8 +1421,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
}
@@ -2140,8 +2107,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
int done;
int err;
- BUG_ON(in_interrupt());
-
err = start(vdev, arg);
if (err)
return err;
@@ -2212,9 +2177,9 @@ int __enic_set_rsskey(struct enic *enic)
dma_addr_t rss_key_buf_pa;
int i, kidx, bidx, err;
- rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
- sizeof(union vnic_rss_key),
- &rss_key_buf_pa);
+ rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_key),
+ &rss_key_buf_pa, GFP_ATOMIC);
if (!rss_key_buf_va)
return -ENOMEM;
@@ -2229,8 +2194,8 @@ int __enic_set_rsskey(struct enic *enic)
sizeof(union vnic_rss_key));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
- rss_key_buf_va, rss_key_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
return err;
}
@@ -2249,8 +2214,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
unsigned int i;
int err;
- rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
- sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
+ rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_cpu),
+ &rss_cpu_buf_pa, GFP_ATOMIC);
if (!rss_cpu_buf_va)
return -ENOMEM;
@@ -2263,8 +2229,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
sizeof(union vnic_rss_cpu));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
- rss_cpu_buf_va, rss_cpu_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
return err;
}
@@ -2329,6 +2295,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
+static void enic_set_api_busy(struct enic *enic, bool busy)
+{
+ spin_lock(&enic->enic_api_lock);
+ enic->enic_api_busy = busy;
+ spin_unlock(&enic->enic_api_lock);
+}
+
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2338,7 +2311,9 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_stop(enic->netdev);
enic_dev_soft_reset(enic);
enic_reset_addr_lists(enic);
@@ -2346,7 +2321,10 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2358,7 +2336,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2367,7 +2347,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2526,8 +2509,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = enic_udp_tunnel_add,
- .ndo_udp_tunnel_del = enic_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2552,8 +2535,8 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = enic_udp_tunnel_add,
- .ndo_udp_tunnel_del = enic_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2561,13 +2544,15 @@ static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
- for (i = 0; i < enic->rq_count; i++) {
- napi_hash_del(&enic->napi[i]);
- netif_napi_del(&enic->napi[i]);
- }
+ for (i = 0; i < enic->rq_count; i++)
+ __netif_napi_del(&enic->napi[i]);
+
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
- netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+ __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+
+ /* observe RCU grace period after __netif_napi_del() calls */
+ synchronize_net();
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
@@ -2733,21 +2718,21 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 32);
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 47);
@@ -2963,6 +2948,13 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
patch_level = fls(patch_level);
patch_level = patch_level ? patch_level - 1 : 0;
enic->vxlan.patch_level = patch_level;
+
+ if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 ||
+ enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) {
+ netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4;
+ if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)
+ netdev->udp_tunnel_nic_info = &enic_udp_tunnels;
+ }
}
netdev->features |= netdev->hw_features;