aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ibmvnic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.c')
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c1402
1 files changed, 777 insertions, 625 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b702a43a5d0..1c0e4beb56e7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -84,8 +84,6 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
-static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
- union sub_crq *sub_crq);
static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
@@ -97,15 +95,14 @@ static int pending_scrq(struct ibmvnic_adapter *,
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
-static void send_map_query(struct ibmvnic_adapter *adapter);
+static void send_query_map(struct ibmvnic_adapter *adapter);
static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
-static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static void send_query_cap(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
-static int ibmvnic_init(struct ibmvnic_adapter *);
-static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@ -118,7 +115,7 @@ struct ibmvnic_stat {
#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof(struct ibmvnic_statistics, stat))
-#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
+#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
static const struct ibmvnic_stat ibmvnic_stats[] = {
{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
@@ -250,46 +247,23 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
if (!ltb->buff)
return;
+ /* VIOS automatically unmaps the long term buffer at remote
+ * end for the following resets:
+ * FAILOVER, MOBILITY, TIMEOUT.
+ */
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
- adapter->reset_reason != VNIC_RESET_MOBILITY)
+ adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
+static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
{
- struct device *dev = &adapter->vdev->dev;
- int rc;
+ if (!ltb->buff)
+ return -EINVAL;
memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -297,8 +271,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
adapter->rx_pool[i].active = 0;
}
@@ -306,15 +279,17 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
int count = pool->size - atomic_read(&pool->available);
+ u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ struct ibmvnic_sub_crq_queue *rx_scrq;
+ union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
- union sub_crq sub_crq;
struct sk_buff *skb;
unsigned int offset;
dma_addr_t dma_addr;
unsigned char *dst;
- u64 *handle_array;
int shift = 0;
int index;
int i;
@@ -322,12 +297,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_subcrqs));
-
+ rx_scrq = adapter->rx_scrq[pool->index];
+ ind_bufp = &rx_scrq->ind_buf;
for (i = 0; i < count; ++i) {
- skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
adapter->replenish_no_mem++;
@@ -352,12 +325,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
- memset(&sub_crq, 0, sizeof(sub_crq));
- sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
- sub_crq.rx_add.correlator =
+ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
+ memset(sub_crq, 0, sizeof(*sub_crq));
+ sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
+ sub_crq->rx_add.correlator =
cpu_to_be64((u64)&pool->rx_buff[index]);
- sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
+ sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -367,16 +341,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#ifdef __LITTLE_ENDIAN__
shift = 8;
#endif
- sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
-
- lpar_rc = send_subcrq(adapter, handle_array[pool->index],
- &sub_crq);
- if (lpar_rc != H_SUCCESS)
- goto failure;
-
- buffers_added++;
- adapter->replenish_add_buff_success++;
+ sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
pool->next_free = (pool->next_free + 1) % pool->size;
+ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ i == count - 1) {
+ lpar_rc =
+ send_subcrq_indirect(adapter, handle,
+ (u64)ind_bufp->indir_dma,
+ (u64)ind_bufp->index);
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+ buffers_added += ind_bufp->index;
+ adapter->replenish_add_buff_success += ind_bufp->index;
+ ind_bufp->index = 0;
+ }
}
atomic_add(buffers_added, &pool->available);
return;
@@ -384,13 +362,22 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
- pool->free_map[pool->next_free] = index;
- pool->rx_buff[index].skb = NULL;
+ for (i = ind_bufp->index - 1; i >= 0; --i) {
+ struct ibmvnic_rx_buff *rx_buff;
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
+ pool->next_free = pool->next_free == 0 ?
+ pool->size - 1 : pool->next_free - 1;
+ sub_crq = &ind_bufp->indir_arr[i];
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(sub_crq->rx_add.correlator);
+ index = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = index;
+ dev_kfree_skb_any(pool->rx_buff[index].skb);
+ pool->rx_buff[index].skb = NULL;
+ }
+ adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
-
+ ind_bufp->index = 0;
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
* queue is closed or pending failover.
@@ -407,11 +394,12 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
int i;
adapter->replenish_task_cycles++;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
+
+ netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
}
static void release_stats_buffers(struct ibmvnic_adapter *adapter)
@@ -475,32 +463,29 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ u64 buff_size;
int rx_scrqs;
int i, j, rc;
- u64 *size_array;
if (!adapter->rx_pool)
return -1;
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
-
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ buff_size = adapter->cur_rx_buf_sz;
+ rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
rx_pool->buff_size);
} else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
+ rc = reset_long_term_buff(&rx_pool->long_term_buff);
}
if (rc)
@@ -561,13 +546,11 @@ static int init_rx_pools(struct net_device *netdev)
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
- u64 *size_array;
+ u64 buff_size;
int i, j;
- rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs,
sizeof(struct ibmvnic_rx_pool),
@@ -585,11 +568,11 @@ static int init_rx_pools(struct net_device *netdev)
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq,
- be64_to_cpu(size_array[i]));
+ buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@ -625,12 +608,11 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
+static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ rc = reset_long_term_buff(&tx_pool->long_term_buff);
if (rc)
return rc;
@@ -655,12 +637,12 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool)
return -1;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tso_pool[i]);
if (rc)
return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tx_pool[i]);
if (rc)
return rc;
}
@@ -742,9 +724,10 @@ static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int tx_subcrqs;
+ u64 buff_size;
int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_subcrqs = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
@@ -758,9 +741,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->num_active_tx_pools = tx_subcrqs;
for (i = 0; i < tx_subcrqs; i++) {
+ buff_size = adapter->req_mtu + VLAN_HLEN;
+ buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
adapter->req_tx_entries_per_subcrq,
- adapter->req_mtu + VLAN_HLEN);
+ buff_size);
if (rc) {
release_tx_pools(adapter);
return rc;
@@ -846,7 +831,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- unsigned long timeout = msecs_to_jiffies(30000);
+ unsigned long timeout = msecs_to_jiffies(20000);
int retry_count = 0;
int retries = 10;
bool retry;
@@ -862,10 +847,8 @@ static int ibmvnic_login(struct net_device *netdev)
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
rc = send_login(adapter);
- if (rc) {
- netdev_warn(netdev, "Unable to login\n");
+ if (rc)
return rc;
- }
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
@@ -894,7 +877,7 @@ static int ibmvnic_login(struct net_device *netdev)
"Received partial success, retrying...\n");
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_warn(netdev,
@@ -923,6 +906,7 @@ static int ibmvnic_login(struct net_device *netdev)
__ibmvnic_set_mac(netdev, adapter->mac_addr);
+ netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
return 0;
}
@@ -946,13 +930,14 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
{
struct net_device *netdev = adapter->netdev;
- unsigned long timeout = msecs_to_jiffies(30000);
+ unsigned long timeout = msecs_to_jiffies(20000);
union ibmvnic_crq crq;
bool resend;
int rc;
@@ -980,7 +965,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
return -1;
}
- if (adapter->init_done_rc == 1) {
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
@@ -1125,7 +1110,7 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
- send_map_query(adapter);
+ send_query_map(adapter);
rc = init_rx_pools(netdev);
if (rc)
@@ -1160,6 +1145,7 @@ static int __ibmvnic_open(struct net_device *netdev)
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1197,18 +1183,26 @@ static int ibmvnic_open(struct net_device *netdev)
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc)
- return rc;
+ goto out;
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
- return rc;
+ goto out;
}
}
rc = __ibmvnic_open(netdev);
+out:
+ /* If open fails due to a pending failover, set device state and
+ * return. Device operation will be handled by reset routine.
+ */
+ if (rc && adapter->failover_pending) {
+ adapter->state = VNIC_OPEN;
+ rc = 0;
+ }
return rc;
}
@@ -1333,10 +1327,8 @@ static int __ibmvnic_close(struct net_device *netdev)
adapter->state = VNIC_CLOSING;
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- if (rc)
- return rc;
adapter->state = VNIC_CLOSED;
- return 0;
+ return rc;
}
static int ibmvnic_close(struct net_device *netdev)
@@ -1344,6 +1336,10 @@ static int ibmvnic_close(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int rc;
+ netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
+ adapter->state, adapter->failover_pending,
+ adapter->force_reset_recovery);
+
/* If device failover is pending, just set device state and return.
* Device operation will be handled by reset routine.
*/
@@ -1360,10 +1356,10 @@ static int ibmvnic_close(struct net_device *netdev)
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
- * @hdr_field - bitfield determining needed headers
- * @skb - socket buffer
- * @hdr_len - array of header lengths
- * @tot_len - total length of data
+ * @hdr_field: bitfield determining needed headers
+ * @skb: socket buffer
+ * @hdr_len: array of header lengths
+ * @hdr_data: buffer to write the header to
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
@@ -1420,11 +1416,11 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
/**
* create_hdr_descs - create header and header extension descriptors
- * @hdr_field - bitfield determining needed headers
- * @data - buffer containing header data
- * @len - length of data buffer
- * @hdr_len - array of individual header lengths
- * @scrq_arr - descriptor array
+ * @hdr_field: bitfield determining needed headers
+ * @hdr_data: buffer containing header data
+ * @len: length of data buffer
+ * @hdr_len: array of individual header lengths
+ * @scrq_arr: descriptor array
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
@@ -1472,26 +1468,26 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @skb - socket buffer
- * @num_entries - number of descriptors to be sent
- * @subcrq - first TX descriptor
- * @hdr_field - bit field determining which headers will be sent
+ * @txbuff: tx buffer
+ * @num_entries: number of descriptors to be sent
+ * @hdr_field: bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
*/
-static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+static void build_hdr_descs_arr(struct sk_buff *skb,
+ union sub_crq *indir_arr,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
+ u8 hdr_data[140] = {0};
int tot_len;
- u8 *hdr_data = txbuff->hdr_data;
- tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
- txbuff->hdr_data);
+ tot_len = build_hdr_data(hdr_field, skb, hdr_len,
+ hdr_data);
*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- txbuff->indir_arr + 1);
+ indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -1509,17 +1505,95 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
return 0;
}
+static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq)
+{
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ struct ibmvnic_tx_buff *tx_buff;
+ struct ibmvnic_tx_pool *tx_pool;
+ union sub_crq tx_scrq_entry;
+ int queue_num;
+ int entries;
+ int index;
+ int i;
+
+ ind_bufp = &tx_scrq->ind_buf;
+ entries = (u64)ind_bufp->index;
+ queue_num = tx_scrq->pool_index;
+
+ for (i = entries - 1; i >= 0; --i) {
+ tx_scrq_entry = ind_bufp->indir_arr[i];
+ if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
+ continue;
+ index = be32_to_cpu(tx_scrq_entry.v1.correlator);
+ if (index & IBMVNIC_TSO_POOL_MASK) {
+ tx_pool = &adapter->tso_pool[queue_num];
+ index &= ~IBMVNIC_TSO_POOL_MASK;
+ } else {
+ tx_pool = &adapter->tx_pool[queue_num];
+ }
+ tx_pool->free_map[tx_pool->consumer_index] = index;
+ tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
+ tx_pool->num_buffers - 1 :
+ tx_pool->consumer_index - 1;
+ tx_buff = &tx_pool->tx_buff[index];
+ adapter->netdev->stats.tx_packets--;
+ adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
+ adapter->tx_stats_buffers[queue_num].packets--;
+ adapter->tx_stats_buffers[queue_num].bytes -=
+ tx_buff->skb->len;
+ dev_kfree_skb_any(tx_buff->skb);
+ tx_buff->skb = NULL;
+ adapter->netdev->stats.tx_dropped++;
+ }
+ ind_bufp->index = 0;
+ if (atomic_sub_return(entries, &tx_scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+}
+
+static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq)
+{
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ u64 dma_addr;
+ u64 entries;
+ u64 handle;
+ int rc;
+
+ ind_bufp = &tx_scrq->ind_buf;
+ dma_addr = (u64)ind_bufp->indir_dma;
+ entries = (u64)ind_bufp->index;
+ handle = tx_scrq->handle;
+
+ if (!entries)
+ return 0;
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ if (rc)
+ ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
+ else
+ ind_bufp->index = 0;
+ return 0;
+}
+
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
unsigned int tx_map_failed = 0;
+ union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
unsigned int tx_packets = 0;
unsigned int tx_bytes = 0;
@@ -1530,10 +1604,12 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- u64 *handle_array;
int index = 0;
u8 proto = 0;
- netdev_tx_t ret = NETDEV_TX_OK;
+
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
if (test_bit(0, &adapter->resetting)) {
if (!netif_subqueue_stopped(netdev, skb))
@@ -1543,6 +1619,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1550,6 +1627,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
if (skb_is_gso(skb))
@@ -1557,11 +1635,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool = &adapter->tx_pool[queue_num];
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
-
index = tx_pool->free_map[tx_pool->consumer_index];
if (index == IBMVNIC_INVALID_MAP) {
@@ -1569,6 +1642,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1599,16 +1673,16 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
+ /* post changes to long_term_buff *dst before VIOS accessing it */
+ dma_wmb();
+
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[index];
tx_buff->skb = skb;
- tx_buff->data_dma[0] = data_dma_addr;
- tx_buff->data_len[0] = skb->len;
tx_buff->index = index;
tx_buff->pool_index = queue_num;
- tx_buff->last_frag = true;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -1653,55 +1727,29 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
}
- /* determine if l2/3/4 headers are sent to firmware */
- if ((*hdrs >> 7) & 1) {
- build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
- tx_crq.v1.n_crq_elem = num_entries;
- tx_buff->num_entries = num_entries;
- tx_buff->indir_arr[0] = tx_crq;
- tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
- sizeof(tx_buff->indir_arr),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, tx_buff->indir_dma)) {
- dev_kfree_skb_any(skb);
- tx_buff->skb = NULL;
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "tx: unable to map descriptor array\n");
- tx_map_failed++;
- tx_dropped++;
- ret = NETDEV_TX_OK;
- goto tx_err_out;
- }
- lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
- (u64)tx_buff->indir_dma,
- (u64)num_entries);
- dma_unmap_single(dev, tx_buff->indir_dma,
- sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
- } else {
- tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle_array[queue_num],
- &tx_crq);
- }
- if (lpar_rc != H_SUCCESS) {
- if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
- dev_err_ratelimited(dev, "tx: send failed\n");
- dev_kfree_skb_any(skb);
- tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED || adapter->failover_pending) {
- /* Disable TX and report carrier off if queue is closed
- * or pending failover.
- * Firmware guarantees that a signal will be sent to the
- * driver, triggering a reset or some other action.
- */
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
- }
+ if ((*hdrs >> 7) & 1)
+ build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
- tx_send_failed++;
- tx_dropped++;
- ret = NETDEV_TX_OK;
- goto tx_err_out;
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->num_entries = num_entries;
+ /* flush buffer if current entry can not fit */
+ if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_flush_err;
+ }
+
+ indir_arr[0] = tx_crq;
+ memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
+ num_entries * sizeof(struct ibmvnic_generic_scrq));
+ ind_bufp->index += num_entries;
+ if (__netdev_tx_sent_queue(txq, skb->len,
+ netdev_xmit_more() &&
+ ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
}
if (atomic_add_return(num_entries, &tx_scrq->used)
@@ -1716,14 +1764,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
goto out;
-tx_err_out:
- /* roll back consumer index and map array*/
- if (tx_pool->consumer_index == 0)
- tx_pool->consumer_index =
- tx_pool->num_buffers - 1;
- else
- tx_pool->consumer_index--;
- tx_pool->free_map[tx_pool->consumer_index] = index;
+tx_flush_err:
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
+ tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
+ tx_pool->num_buffers - 1 :
+ tx_pool->consumer_index - 1;
+ tx_dropped++;
+tx_err:
+ if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+ dev_err_ratelimited(dev, "tx: send failed\n");
+
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
+ /* Disable TX and report carrier off if queue is closed
+ * or pending failover.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset or some other action.
+ */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
out:
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
@@ -1828,94 +1888,18 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
int rc;
rc = 0;
- ether_addr_copy(adapter->mac_addr, addr->sa_data);
- if (adapter->state != VNIC_PROBED)
- rc = __ibmvnic_set_mac(netdev, addr->sa_data);
-
- return rc;
-}
-
-/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- return rc;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter);
- if (rc)
- return IBMVNIC_INIT_FAILED;
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- return 0;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
- rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = reset_state;
- return rc;
+ if (adapter->state != VNIC_PROBED) {
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
+ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
}
- rc = init_resources(adapter);
- if (rc)
- return rc;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc)
- return IBMVNIC_OPEN_FAILED;
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
- return 0;
+ return rc;
}
-/**
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -1927,13 +1911,24 @@ static int do_reset(struct ibmvnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
int i, rc;
- netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
- rwi->reset_reason);
+ netdev_dbg(adapter->netdev,
+ "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
+ adapter->state, adapter->failover_pending,
+ rwi->reset_reason, reset_state);
- rtnl_lock();
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
+
+ /* Now that we have the rtnl lock, clear any pending failover.
+ * This will ensure ibmvnic_open() has either completed or will
+ * block until failover is complete.
+ */
+ if (rwi->reset_reason == VNIC_RESET_FAILOVER)
+ adapter->failover_pending = false;
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -1945,25 +1940,37 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state != VNIC_CLOSING) {
+ rc = -1;
+ goto out;
+ }
+
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -1972,7 +1979,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -1992,7 +2001,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc) {
rc = IBMVNIC_INIT_FAILED;
goto out;
@@ -2007,12 +2016,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = reset_state;
+ if (rc)
goto out;
- }
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2034,14 +2045,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = reset_tx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
rc = reset_rx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
}
@@ -2067,14 +2078,22 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
- if (adapter->reset_reason != VNIC_RESET_FAILOVER)
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+ if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
+ adapter->reset_reason == VNIC_RESET_MOBILITY)
+ __netdev_notify_peers(netdev);
rc = 0;
out:
- rtnl_unlock();
+ /* restore the adapter state if reset failed */
+ if (rc)
+ adapter->state = reset_state;
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
+ netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
+ adapter->state, adapter->failover_pending, rc);
return rc;
}
@@ -2105,40 +2124,47 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc) {
netdev_err(adapter->netdev,
"Couldn't initialize crq. rc=%d\n", rc);
- return rc;
+ goto out;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc)
- return rc;
+ goto out;
/* If the adapter was in PROBE state prior to the reset,
* exit here.
*/
if (reset_state == VNIC_PROBED)
- return 0;
+ goto out;
rc = ibmvnic_login(netdev);
- if (rc) {
- adapter->state = VNIC_PROBED;
- return 0;
- }
+ if (rc)
+ goto out;
rc = init_resources(adapter);
if (rc)
- return rc;
+ goto out;
ibmvnic_disable_irqs(adapter);
adapter->state = VNIC_CLOSED;
if (reset_state == VNIC_CLOSED)
- return 0;
+ goto out;
rc = __ibmvnic_open(netdev);
- if (rc)
- return IBMVNIC_OPEN_FAILED;
+ if (rc) {
+ rc = IBMVNIC_OPEN_FAILED;
+ goto out;
+ }
- return 0;
+ __netdev_notify_peers(netdev);
+out:
+ /* restore adapter state if reset failed */
+ if (rc)
+ adapter->state = reset_state;
+ netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
+ adapter->state, adapter->failover_pending, rc);
+ return rc;
}
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
@@ -2160,17 +2186,6 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
return rwi;
}
-static void free_all_rwi(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_rwi *rwi;
-
- rwi = get_next_rwi(adapter);
- while (rwi) {
- kfree(rwi);
- rwi = get_next_rwi(adapter);
- }
-}
-
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
@@ -2202,15 +2217,17 @@ static void __ibmvnic_reset(struct work_struct *work)
if (!saved_state) {
reset_state = adapter->state;
- adapter->state = VNIC_RESETTING;
saved_state = true;
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
+ if (adapter->force_reset_recovery) {
+ /* Since we are doing a hard reset now, clear the
+ * failover_pending flag so we don't ignore any
+ * future MOBILITY or other resets.
+ */
+ adapter->failover_pending = false;
+
/* Transport event occurred during previous reset */
if (adapter->wait_for_reset) {
/* Previous was CHANGE_PARAM; caller locked */
@@ -2222,20 +2239,22 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ if (rc) {
+ /* give backing device time to settle down */
+ netdev_dbg(adapter->netdev,
+ "[S:%d] Hard reset failed, waiting 60 secs\n",
+ adapter->state);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(60 * HZ);
+ }
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
- if (rc == IBMVNIC_OPEN_FAILED) {
- if (list_empty(&adapter->rwi_list))
- adapter->state = VNIC_CLOSED;
- else
- adapter->state = reset_state;
- rc = 0;
- } else if (rc && rc != IBMVNIC_INIT_FAILED &&
- !adapter->force_reset_recovery)
- break;
+ adapter->last_reset_time = jiffies;
+
+ if (rc)
+ netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
rwi = get_next_rwi(adapter);
@@ -2249,12 +2268,12 @@ static void __ibmvnic_reset(struct work_struct *work)
complete(&adapter->reset_done);
}
- if (rc) {
- netdev_dbg(adapter->netdev, "Reset failed\n");
- free_all_rwi(adapter);
- }
-
clear_bit_unlock(0, &adapter->resetting);
+
+ netdev_dbg(adapter->netdev,
+ "[S:%d FRR:%d WFR:%d] Done processing resets\n",
+ adapter->state, adapter->force_reset_recovery,
+ adapter->wait_for_reset);
}
static void __ibmvnic_delayed_reset(struct work_struct *work)
@@ -2275,9 +2294,16 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+
+ /* If failover is pending don't schedule any other reset.
+ * Instead let the failover complete. If there is already a
+ * a failover reset scheduled, we will detect and drop the
+ * duplicate reset when walking the ->rwi_list below.
+ */
if (adapter->state == VNIC_REMOVING ||
adapter->state == VNIC_REMOVED ||
- adapter->failover_pending) {
+ (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
ret = EBUSY;
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
goto err;
@@ -2285,17 +2311,16 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- ret = adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = EAGAIN;
+ ret = EAGAIN;
goto err;
}
- spin_lock_irqsave(&adapter->rwi_lock, flags);
-
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
- netdev_dbg(netdev, "Skipping matching reset\n");
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+ netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
+ reason);
ret = EBUSY;
goto err;
}
@@ -2303,8 +2328,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
- ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
}
@@ -2317,12 +2340,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
- return 0;
+ ret = 0;
err:
+ /* ibmvnic_close() below can block, so drop the lock first */
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
+ if (ret == ENOMEM)
+ ibmvnic_close(netdev);
+
return -ret;
}
@@ -2330,6 +2358,18 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
+ if (test_bit(0, &adapter->resetting)) {
+ netdev_err(adapter->netdev,
+ "Adapter is resetting, skip timeout reset\n");
+ return;
+ }
+ /* No queuing up reset until at least 5 seconds (default watchdog val)
+ * after last reset
+ */
+ if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
+ netdev_dbg(dev, "Not yet time to tx timeout.\n");
+ return;
+ }
ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
}
@@ -2348,10 +2388,17 @@ static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
static int ibmvnic_poll(struct napi_struct *napi, int budget)
{
- struct net_device *netdev = napi->dev;
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int scrq_num = (int)(napi - adapter->napi);
- int frames_processed = 0;
+ struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_adapter *adapter;
+ struct net_device *netdev;
+ int frames_processed;
+ int scrq_num;
+
+ netdev = napi->dev;
+ adapter = netdev_priv(netdev);
+ scrq_num = (int)(napi - adapter->napi);
+ frames_processed = 0;
+ rx_scrq = adapter->rx_scrq[scrq_num];
restart_poll:
while (frames_processed < budget) {
@@ -2364,17 +2411,16 @@ restart_poll:
if (unlikely(test_bit(0, &adapter->resetting) &&
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
- enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ enable_scrq_irq(adapter, rx_scrq);
napi_complete_done(napi, frames_processed);
return frames_processed;
}
- if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
+ if (!pending_scrq(adapter, rx_scrq))
break;
- next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
- rx_buff =
- (struct ibmvnic_rx_buff *)be64_to_cpu(next->
- rx_comp.correlator);
+ next = ibmvnic_next_scrq(adapter, rx_scrq);
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(next->rx_comp.correlator);
/* do error checking */
if (next->rx_comp.rc) {
netdev_dbg(netdev, "rx buffer returned with rc %x\n",
@@ -2395,6 +2441,8 @@ restart_poll:
offset = be16_to_cpu(next->rx_comp.off_frame_data);
flags = next->rx_comp.flags;
skb = rx_buff->skb;
+ /* load long_term_buff before copying to skb */
+ dma_rmb();
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
@@ -2428,16 +2476,20 @@ restart_poll:
frames_processed++;
}
- if (adapter->state != VNIC_CLOSING)
+ if (adapter->state != VNIC_CLOSING &&
+ ((atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2) ||
+ frames_processed < budget))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
-
if (frames_processed < budget) {
- enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- napi_complete_done(napi, frames_processed);
- if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
- napi_reschedule(napi)) {
- disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- goto restart_poll;
+ if (napi_complete_done(napi, frames_processed)) {
+ enable_scrq_irq(adapter, rx_scrq);
+ if (pending_scrq(adapter, rx_scrq)) {
+ if (napi_reschedule(napi)) {
+ disable_scrq_irq(adapter, rx_scrq);
+ goto restart_poll;
+ }
+ }
}
}
return frames_processed;
@@ -2561,9 +2613,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
- strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, adapter->fw_version,
+ strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
+ strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, adapter->fw_version,
sizeof(info->fw_version));
}
@@ -2675,7 +2727,6 @@ static int ibmvnic_set_channels(struct net_device *netdev,
channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues);
return ret;
-
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2764,8 +2815,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
- data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
- ibmvnic_stats[i].offset));
+ data[i] = be64_to_cpu(IBMVNIC_GET_STAT
+ (adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
data[i] = adapter->tx_stats_buffers[j].packets;
@@ -2805,6 +2856,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
return 0;
}
+
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -2829,15 +2881,26 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
{
int rc;
+ if (!scrq) {
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
+ return -EINVAL;
+ }
+
if (scrq->irq) {
free_irq(scrq->irq, scrq);
irq_dispose_mapping(scrq->irq);
scrq->irq = 0;
}
- memset(scrq->msgs, 0, 4 * PAGE_SIZE);
- atomic_set(&scrq->used, 0);
- scrq->cur = 0;
+ if (scrq->msgs) {
+ memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+ atomic_set(&scrq->used, 0);
+ scrq->cur = 0;
+ scrq->ind_buf.index = 0;
+ } else {
+ netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
+ return -EINVAL;
+ }
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@ -2848,6 +2911,9 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
{
int i, rc;
+ if (!adapter->tx_scrq || !adapter->rx_scrq)
+ return -EINVAL;
+
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@ -2889,6 +2955,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
}
+ dma_free_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ scrq->ind_buf.indir_arr,
+ scrq->ind_buf.indir_dma);
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
@@ -2935,6 +3006,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+ scrq->ind_buf.index = 0;
+
+ scrq->ind_buf.indir_arr =
+ dma_alloc_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ &scrq->ind_buf.indir_dma,
+ GFP_KERNEL);
+
+ if (!scrq->ind_buf.indir_arr)
+ goto indir_failed;
+
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
@@ -2943,6 +3025,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
+indir_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -3038,7 +3126,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && (rc != H_FUNCTION))
+ if (rc && rc != H_FUNCTION)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -3057,22 +3145,23 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
+ struct netdev_queue *txq;
union sub_crq *next;
int index;
- int i, j;
+ int i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
+ int total_bytes = 0;
+ int num_packets = 0;
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
- if (next->tx_comp.rcs[i]) {
+ if (next->tx_comp.rcs[i])
dev_err(dev, "tx error %x\n",
next->tx_comp.rcs[i]);
- continue;
- }
index = be32_to_cpu(next->tx_comp.correlators[i]);
if (index & IBMVNIC_TSO_POOL_MASK) {
tx_pool = &adapter->tso_pool[pool];
@@ -3082,21 +3171,16 @@ restart_loop:
}
txbuff = &tx_pool->tx_buff[index];
-
- for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
- if (!txbuff->data_dma[j])
- continue;
-
- txbuff->data_dma[j] = 0;
- }
-
- if (txbuff->last_frag) {
- dev_kfree_skb_any(txbuff->skb);
+ num_packets++;
+ num_entries += txbuff->num_entries;
+ if (txbuff->skb) {
+ total_bytes += txbuff->skb->len;
+ dev_consume_skb_irq(txbuff->skb);
txbuff->skb = NULL;
+ } else {
+ netdev_warn(adapter->netdev,
+ "TX completion received with NULL socket buffer\n");
}
-
- num_entries += txbuff->num_entries;
-
tx_pool->free_map[tx_pool->producer_index] = index;
tx_pool->producer_index =
(tx_pool->producer_index + 1) %
@@ -3105,6 +3189,9 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
@@ -3312,7 +3399,7 @@ tx_failed:
return -1;
}
-static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
+static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
@@ -3443,11 +3530,16 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
+ int rc;
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
- return 1;
- else
- return 0;
+ rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
+
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
+ */
+ dma_rmb();
+
+ return rc;
}
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
@@ -3466,6 +3558,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
}
spin_unlock_irqrestore(&scrq->lock, flags);
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
+ */
+ dma_rmb();
+
return entry;
}
@@ -3504,38 +3601,6 @@ static void print_subcrq_error(struct device *dev, int rc, const char *func)
}
}
-static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
- union sub_crq *sub_crq)
-{
- unsigned int ua = adapter->vdev->unit_address;
- struct device *dev = &adapter->vdev->dev;
- u64 *u64_crq = (u64 *)sub_crq;
- int rc;
-
- netdev_dbg(adapter->netdev,
- "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(remote_handle),
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]),
- (unsigned long int)cpu_to_be64(u64_crq[2]),
- (unsigned long int)cpu_to_be64(u64_crq[3]));
-
- /* Make sure the hypervisor sees the complete request */
- mb();
-
- rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
- cpu_to_be64(remote_handle),
- cpu_to_be64(u64_crq[0]),
- cpu_to_be64(u64_crq[1]),
- cpu_to_be64(u64_crq[2]),
- cpu_to_be64(u64_crq[3]));
-
- if (rc)
- print_subcrq_error(dev, rc, __func__);
-
- return rc;
-}
-
static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
u64 remote_handle, u64 ioba, u64 num_entries)
{
@@ -3544,7 +3609,7 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
int rc;
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
cpu_to_be64(remote_handle),
ioba, num_entries);
@@ -3564,8 +3629,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
int rc;
netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
if (!adapter->crq.active &&
crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
@@ -3574,7 +3639,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
}
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_CRQ, ua,
cpu_to_be64(u64_crq[0]),
@@ -3583,8 +3648,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (test_bit(0, &adapter->resetting))
- ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ /* do not reset, report the fail, wait for passive init from server */
}
dev_warn(dev, "Send error (rc=%d)\n", rc);
@@ -3595,14 +3659,31 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
+ int retries = 100;
+ int rc;
memset(&crq, 0, sizeof(crq));
crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
crq.generic.cmd = IBMVNIC_CRQ_INIT;
netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq);
+ do {
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc != H_CLOSED)
+ break;
+ retries--;
+ msleep(50);
+
+ } while (retries > 0);
+
+ if (rc) {
+ dev_err(dev, "Failed to send init request, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
}
static int send_version_xchg(struct ibmvnic_adapter *adapter)
@@ -3671,15 +3752,16 @@ static int send_login(struct ibmvnic_adapter *adapter)
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
struct device *dev = &adapter->vdev->dev;
+ struct vnic_login_client_data *vlcd;
dma_addr_t rsp_buffer_token;
dma_addr_t buffer_token;
size_t rsp_buffer_size;
union ibmvnic_crq crq;
+ int client_data_len;
size_t buffer_size;
__be64 *tx_list_p;
__be64 *rx_list_p;
- int client_data_len;
- struct vnic_login_client_data *vlcd;
+ int rc;
int i;
if (!adapter->tx_scrq || !adapter->rx_scrq) {
@@ -3688,7 +3770,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
@@ -3751,15 +3835,15 @@ static int send_login(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) {
if (adapter->tx_scrq[i]) {
- tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
- crq_num);
+ tx_list_p[i] =
+ cpu_to_be64(adapter->tx_scrq[i]->crq_num);
}
}
for (i = 0; i < adapter->req_rx_queues; i++) {
if (adapter->rx_scrq[i]) {
- rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
- crq_num);
+ rx_list_p[i] =
+ cpu_to_be64(adapter->rx_scrq[i]->crq_num);
}
}
@@ -3775,7 +3859,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Login Buffer:\n");
for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_buf))[i]);
+ ((unsigned long *)(adapter->login_buf))[i]);
}
memset(&crq, 0, sizeof(crq));
@@ -3783,16 +3867,25 @@ static int send_login(struct ibmvnic_adapter *adapter)
crq.login.cmd = LOGIN;
crq.login.ioba = cpu_to_be32(buffer_token);
crq.login.len = cpu_to_be32(buffer_size);
- ibmvnic_send_crq(adapter, &crq);
+
+ adapter->login_pending = true;
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc) {
+ adapter->login_pending = false;
+ netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
+ goto buf_rsp_map_failed;
+ }
return 0;
buf_rsp_map_failed:
kfree(login_rsp_buffer);
+ adapter->login_rsp_buf = NULL;
buf_rsp_alloc_failed:
dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
buf_map_failed:
kfree(login_buffer);
+ adapter->login_buf = NULL;
buf_alloc_failed:
return -1;
}
@@ -3822,7 +3915,7 @@ static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
return ibmvnic_send_crq(adapter, &crq);
}
-static void send_map_query(struct ibmvnic_adapter *adapter)
+static void send_query_map(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3833,7 +3926,7 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
}
/* Send a series of CRQs requesting various capabilities of the VNIC server */
-static void send_cap_queries(struct ibmvnic_adapter *adapter)
+static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3950,6 +4043,113 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
+static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_tok =
+ dma_map_single(dev,
+ &adapter->ip_offload_buf,
+ buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map offload buffer\n");
+ return;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
+ crq.query_ip_offload.len = cpu_to_be32(buf_sz);
+ crq.query_ip_offload.ioba =
+ cpu_to_be32(adapter->ip_offload_tok);
+
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
+ struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ struct device *dev = &adapter->vdev->dev;
+ netdev_features_t old_hw_features = 0;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_ctrl_tok =
+ dma_map_single(dev,
+ ctrl_buf,
+ sizeof(adapter->ip_offload_ctrl),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
+ dev_err(dev, "Couldn't map ip offload control buffer\n");
+ return;
+ }
+
+ ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
+ ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
+ ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
+ ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
+ ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
+ ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
+ ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
+ ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
+ ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
+
+ /* large_rx disabled for now, additional features needed */
+ ctrl_buf->large_rx_ipv4 = 0;
+ ctrl_buf->large_rx_ipv6 = 0;
+
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
+
+ if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (buf->large_tx_ipv4)
+ adapter->netdev->hw_features |= NETIF_F_TSO;
+ if (buf->large_tx_ipv6)
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
+
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
+ crq.control_ip_offload.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4019,8 +4219,6 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- netdev_features_t old_hw_features = 0;
- union ibmvnic_crq crq;
int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
@@ -4029,7 +4227,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(buf))[i]);
+ ((unsigned long *)(buf))[i]);
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -4070,74 +4268,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
buf->off_ipv6_ext_headers);
- adapter->ip_offload_ctrl_tok =
- dma_map_single(dev, &adapter->ip_offload_ctrl,
- sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
- dev_err(dev, "Couldn't map ip offload control buffer\n");
- return;
- }
-
- adapter->ip_offload_ctrl.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
- adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
- adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
- adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
- adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
- adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
- adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
- adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
- adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
-
- /* large_rx disabled for now, additional features needed */
- adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
- adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
-
- if (adapter->state != VNIC_PROBING) {
- old_hw_features = adapter->netdev->hw_features;
- adapter->netdev->hw_features = 0;
- }
-
- adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
-
- if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
-
- if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
-
- if ((adapter->netdev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->hw_features |= NETIF_F_RXCSUM;
-
- if (buf->large_tx_ipv4)
- adapter->netdev->hw_features |= NETIF_F_TSO;
- if (buf->large_tx_ipv6)
- adapter->netdev->hw_features |= NETIF_F_TSO6;
-
- if (adapter->state == VNIC_PROBING) {
- adapter->netdev->features |= adapter->netdev->hw_features;
- } else if (old_hw_features != adapter->netdev->hw_features) {
- netdev_features_t tmp = 0;
-
- /* disable features no longer supported */
- adapter->netdev->features &= adapter->netdev->hw_features;
- /* turn on features now supported if previously enabled */
- tmp = (old_hw_features ^ adapter->netdev->hw_features) &
- adapter->netdev->hw_features;
- adapter->netdev->features |=
- tmp & adapter->netdev->wanted_features;
- }
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
- crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
- crq.control_ip_offload.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
- ibmvnic_send_crq(adapter, &crq);
+ send_control_ip_offload(adapter);
}
static const char *ibmvnic_fw_err_cause(u16 cause)
@@ -4194,8 +4325,13 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
goto out;
}
+ /* crq->change_mac_addr.mac_addr is the requested one
+ * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
+ */
ether_addr_copy(netdev->dev_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
+ ether_addr_copy(adapter->mac_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
out:
complete(&adapter->fw_done);
return rc;
@@ -4250,8 +4386,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be64_to_cpu(crq->request_capability_rsp.
- number), name);
+ (long)be64_to_cpu(crq->request_capability_rsp.number),
+ name);
if (be16_to_cpu(crq->request_capability_rsp.capability) ==
REQ_MTU) {
@@ -4263,7 +4399,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
be64_to_cpu(crq->request_capability_rsp.number);
}
- ibmvnic_send_req_caps(adapter, 1);
+ send_request_cap(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -4273,30 +4409,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
/* Done receiving requested capabilities, query IP offload support */
if (atomic_read(&adapter->running_cap_crqs) == 0) {
- union ibmvnic_crq newcrq;
- int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
- struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
- &adapter->ip_offload_buf;
-
adapter->wait_capability = false;
- adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
- buf_sz,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map offload buffer\n");
- return;
- }
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
- newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
- newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
- newcrq.query_ip_offload.ioba =
- cpu_to_be32(adapter->ip_offload_tok);
-
- ibmvnic_send_crq(adapter, &newcrq);
+ send_query_ip_offload(adapter);
}
}
@@ -4307,8 +4421,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
+ u64 *tx_handle_array;
+ u64 *rx_handle_array;
+ int num_tx_pools;
+ int num_rx_pools;
+ u64 *size_array;
int i;
+ /* CHECK: Test/set of login_pending does not need to be atomic
+ * because only ibmvnic_tasklet tests/clears this.
+ */
+ if (!adapter->login_pending) {
+ netdev_warn(netdev, "Ignoring unexpected login response\n");
+ return 0;
+ }
+ adapter->login_pending = false;
+
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
DMA_TO_DEVICE);
dma_unmap_single(dev, adapter->login_rsp_buf_token,
@@ -4329,7 +4457,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_rsp_buf))[i]);
+ ((unsigned long *)(adapter->login_rsp_buf))[i]);
}
/* Sanity checks */
@@ -4338,9 +4466,33 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
adapter->req_rx_add_queues !=
be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
- ibmvnic_remove(adapter->vdev);
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
return -EIO;
}
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ /* variable buffer sizes are not supported, so just read the
+ * first entry.
+ */
+ adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
+
+ num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
+
+ for (i = 0; i < num_tx_pools; i++)
+ adapter->tx_scrq[i]->handle = tx_handle_array[i];
+
+ for (i = 0; i < num_rx_pools; i++)
+ adapter->rx_scrq[i]->handle = rx_handle_array[i];
+
+ adapter->num_active_tx_scrqs = num_tx_pools;
+ adapter->num_active_rx_scrqs = num_rx_pools;
+ release_login_rsp_buffer(adapter);
release_login_buffer(adapter);
complete(&adapter->init_done);
@@ -4550,7 +4702,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_crqs) == 0) {
adapter->wait_capability = false;
- ibmvnic_send_req_caps(adapter, 0);
+ send_request_cap(adapter, 0);
}
}
@@ -4605,7 +4757,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_1GBPS:
adapter->speed = SPEED_1000;
break;
- case IBMVNIC_10GBP:
+ case IBMVNIC_10GBPS:
adapter->speed = SPEED_10000;
break;
case IBMVNIC_25GBPS:
@@ -4620,6 +4772,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_100GBPS:
adapter->speed = SPEED_100000;
break;
+ case IBMVNIC_200GBPS:
+ adapter->speed = SPEED_200000;
+ break;
default:
if (netif_carrier_ok(netdev))
netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
@@ -4645,20 +4800,39 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
case IBMVNIC_CRQ_INIT:
dev_info(dev, "Partner initialized\n");
adapter->from_passive_init = true;
- adapter->failover_pending = false;
+ /* Discard any stale login responses from prev reset.
+ * CHECK: should we clear even on INIT_COMPLETE?
+ */
+ adapter->login_pending = false;
+
if (!completion_done(&adapter->init_done)) {
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ if (rc && rc != -EBUSY) {
+ /* We were unable to schedule the failover
+ * reset either because the adapter was still
+ * probing (eg: during kexec) or we could not
+ * allocate memory. Clear the failover_pending
+ * flag since no one else will. We ignore
+ * EBUSY because it means either FAILOVER reset
+ * is already scheduled or the adapter is
+ * being removed.
+ */
+ netdev_err(netdev,
+ "Error %ld scheduling failover reset\n",
+ rc);
+ adapter->failover_pending = false;
+ }
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -4715,7 +4889,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
be16_to_cpu(crq->version_exchange_rsp.version);
dev_info(dev, "Partner protocol version is %d\n",
ibmvnic_version);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
break;
case QUERY_CAPABILITY_RSP:
handle_query_cap_rsp(crq, adapter);
@@ -4812,9 +4986,9 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
return IRQ_HANDLED;
}
-static void ibmvnic_tasklet(void *data)
+static void ibmvnic_tasklet(struct tasklet_struct *t)
{
- struct ibmvnic_adapter *adapter = data;
+ struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
@@ -4824,6 +4998,12 @@ static void ibmvnic_tasklet(void *data)
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
+ */
+ dma_rmb();
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
}
@@ -4870,6 +5050,9 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
/* Clean out the queue */
+ if (!crq->msgs)
+ return -EINVAL;
+
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
crq->active = false;
@@ -4949,8 +5132,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
- tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
@@ -4986,21 +5168,28 @@ map_failed:
return retrc;
}
-static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
{
struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
+ unsigned long timeout = msecs_to_jiffies(20000);
u64 old_num_rx_queues, old_num_tx_queues;
int rc;
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues;
- old_num_tx_queues = adapter->req_tx_queues;
+ if (reset) {
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+ reinit_completion(&adapter->init_done);
+ }
- reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc) {
+ dev_err(dev, "Send crq init failed with error %d\n", rc);
+ return rc;
+ }
+
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
return -1;
@@ -5017,7 +5206,8 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
+ if (reset &&
+ test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
@@ -5045,48 +5235,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return rc;
}
-static int ibmvnic_init(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
- int rc;
-
- adapter->from_passive_init = false;
-
- adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Initialization sequence timed out\n");
- return -1;
- }
-
- if (adapter->init_done_rc) {
- release_crq_queue(adapter);
- return adapter->init_done_rc;
- }
-
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- rc = init_sub_crqs(adapter);
- if (rc) {
- dev_err(dev, "Initialization of sub crqs failed\n");
- release_crq_queue(adapter);
- return rc;
- }
-
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- dev_err(dev, "Failed to initialize sub crq irqs\n");
- release_crq_queue(adapter);
- }
-
- return rc;
-}
-
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -5118,6 +5266,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev_set_drvdata(&dev->dev, netdev);
adapter->vdev = dev;
adapter->netdev = netdev;
+ adapter->login_pending = false;
ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -5126,8 +5275,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- spin_lock_init(&adapter->stats_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
__ibmvnic_delayed_reset);
@@ -5149,7 +5296,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
@@ -5181,7 +5328,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->state = VNIC_PROBED;
adapter->wait_for_reset = false;
-
+ adapter->last_reset_time = jiffies;
return 0;
ibmvnic_register_fail:
@@ -5209,12 +5356,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
- if (adapter->state == VNIC_RESETTING) {
- spin_unlock_irqrestore(&adapter->state_lock, flags);
- return -EBUSY;
- }
+ /* If ibmvnic_reset() is scheduling a reset, wait for it to
+ * finish. Then, set the state to REMOVING to prevent it from
+ * scheduling any more work and to have reset functions ignore
+ * any resets that have already been scheduled. Drop the lock
+ * after setting state, so __ibmvnic_reset() which is called
+ * from the flush_work() below, can make progress.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
adapter->state = VNIC_REMOVING;
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
spin_unlock_irqrestore(&adapter->state_lock, flags);
flush_work(&adapter->ibmvnic_reset);
@@ -5299,8 +5452,7 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);