aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2018-07-23 09:13:12 -0700
committerRodrigo Vivi <rodrigo.vivi@intel.com>2018-07-23 09:13:12 -0700
commitc74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch)
treef2690a1a916b73ef94657fbf0e0141ae57701825 /drivers/net/ethernet/sfc
parent6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff)
parent500775074f88d9cf5416bed2ca19592812d62c41 (diff)
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other i915 changes to dinq that could break compilation. Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/net/ethernet/sfc')
-rw-r--r--drivers/net/ethernet/sfc/ef10.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.c42
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c5
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c33
7 files changed, 77 insertions, 19 deletions
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 63036d9bf3e6..23f0785c0573 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4784,8 +4784,9 @@ expire:
* will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
* the rule is not removed by efx_rps_hash_del() below.
*/
- ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
- filter_idx, true) == 0;
+ if (ret)
+ ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
+ filter_idx, true) == 0;
/* While we can't safely dereference rule (we dropped the lock), we can
* still test it for NULL.
*/
@@ -4983,7 +4984,8 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
}
- table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ table->entry = vzalloc(array_size(HUNT_FILTER_TBL_ROWS,
+ sizeof(*table->entry)));
if (!table->entry) {
rc = -ENOMEM;
goto fail;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a4ebd8715494..570ec72266f3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1289,9 +1289,8 @@ static int efx_init_io(struct efx_nic *efx)
pci_set_master(pci_dev);
- /* Set the PCI DMA mask. Try all possibilities from our
- * genuine mask down to 32 bits, because some architectures
- * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ /* Set the PCI DMA mask. Try all possibilities from our genuine mask
+ * down to 32 bits, because some architectures will allow 40 bit
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
@@ -1551,6 +1550,38 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
+#if defined(CONFIG_SMP)
+static void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ unsigned int cpu;
+
+ efx_for_each_channel(channel, efx) {
+ cpu = cpumask_local_spread(channel->channel,
+ pcibus_to_node(efx->pci_dev->bus));
+ irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+ }
+}
+
+static void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+static void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+static void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel, *end_channel;
@@ -3149,6 +3180,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
return true;
}
+static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
@@ -3308,6 +3340,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
cancel_work_sync(&efx->reset_work);
efx_disable_interrupts(efx);
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -3457,6 +3490,8 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
+
+ efx_set_interrupt_affinity(efx);
rc = efx_enable_interrupts(efx);
if (rc)
goto fail6;
@@ -3464,6 +3499,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
return 0;
fail6:
+ efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 3d6c91e96589..dd5530a4f8c8 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1242,9 +1242,8 @@ static int ef4_init_io(struct ef4_nic *efx)
pci_set_master(pci_dev);
- /* Set the PCI DMA mask. Try all possibilities from our
- * genuine mask down to 32 bits, because some architectures
- * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+ /* Set the PCI DMA mask. Try all possibilities from our genuine mask
+ * down to 32 bits, because some architectures will allow 40 bit
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 494884f6af4a..411a2f419447 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -2755,7 +2755,8 @@ int ef4_farch_filter_table_probe(struct ef4_nic *efx)
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
+ table->spec = vzalloc(array_size(sizeof(*table->spec),
+ table->size));
if (!table->spec)
goto fail;
}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index c72adf8b52ea..8edf20967c82 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2826,7 +2826,8 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
GFP_KERNEL);
if (!table->used_bitmap)
goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
+ table->spec = vzalloc(array_size(sizeof(*table->spec),
+ table->size));
if (!table->spec)
goto fail;
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 64a94f242027..d2e254f2f72b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -839,6 +839,8 @@ static void efx_filter_rfs_work(struct work_struct *data)
int rc;
rc = efx->type->filter_insert(efx, &req->spec, true);
+ if (rc >= 0)
+ rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock);
rule = efx_rps_hash_find(efx, &req->spec);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index cece961f2e82..c3ad564ac4c0 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -435,17 +435,18 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
} while (1);
}
-/* Remove buffers put into a tx_queue. None of the buffers must have
- * an skb attached.
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
*/
-static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
unsigned int pkts_compl = 0;
/* Work backwards until we hit the original insert pointer value */
- while (tx_queue->insert_count != tx_queue->write_count) {
+ while (tx_queue->insert_count != insert_count) {
--tx_queue->insert_count;
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
@@ -504,6 +505,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
*/
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
+ unsigned int old_insert_count = tx_queue->insert_count;
+ bool xmit_more = skb->xmit_more;
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -553,8 +556,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Update BQL */
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
+ efx_tx_maybe_stop_queue(tx_queue);
+
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -577,14 +582,26 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
tx_queue->tx_packets++;
}
- efx_tx_maybe_stop_queue(tx_queue);
-
return NETDEV_TX_OK;
err:
- efx_enqueue_unwind(tx_queue);
+ efx_enqueue_unwind(tx_queue, old_insert_count);
dev_kfree_skb_any(skb);
+
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!xmit_more) {
+ struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+
+ if (txq2->xmit_more_available)
+ efx_nic_push_buffers(txq2);
+
+ efx_nic_push_buffers(tx_queue);
+ }
+
return NETDEV_TX_OK;
}