diff options
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.c')
| -rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 167 | 
1 files changed, 100 insertions, 67 deletions
| diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 59536bd5cab1..bda7a2a9d211 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2602,6 +2602,7 @@ static void __ibmvnic_reset(struct work_struct *work)  	struct ibmvnic_rwi *rwi;  	unsigned long flags;  	u32 reset_state; +	int num_fails = 0;  	int rc = 0;  	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); @@ -2655,11 +2656,23 @@ static void __ibmvnic_reset(struct work_struct *work)  				rc = do_hard_reset(adapter, rwi, reset_state);  				rtnl_unlock();  			} -			if (rc) { -				/* give backing device time to settle down */ +			if (rc) +				num_fails++; +			else +				num_fails = 0; + +			/* If auto-priority-failover is enabled we can get +			 * back to back failovers during resets, resulting +			 * in at least two failed resets (from high-priority +			 * backing device to low-priority one and then back) +			 * If resets continue to fail beyond that, give the +			 * adapter some time to settle down before retrying. +			 */ +			if (num_fails >= 3) {  				netdev_dbg(adapter->netdev, -					   "[S:%s] Hard reset failed, waiting 60 secs\n", -					   adapter_state_to_string(adapter->state)); +					   "[S:%s] Hard reset failed %d times, waiting 60 secs\n", +					   adapter_state_to_string(adapter->state), +					   num_fails);  				set_current_state(TASK_UNINTERRUPTIBLE);  				schedule_timeout(60 * HZ);  			} @@ -3844,11 +3857,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)  	struct device *dev = &adapter->vdev->dev;  	union ibmvnic_crq crq;  	int max_entries; +	int cap_reqs; + +	/* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on +	 * the PROMISC flag). Initialize this count upfront. When the tasklet +	 * receives a response to all of these, it will send the next protocol +	 * message (QUERY_IP_OFFLOAD). +	 */ +	if (!(adapter->netdev->flags & IFF_PROMISC) || +	    adapter->promisc_supported) +		cap_reqs = 7; +	else +		cap_reqs = 6;  	if (!retry) {  		/* Sub-CRQ entries are 32 byte long */  		int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); +		atomic_set(&adapter->running_cap_crqs, cap_reqs); +  		if (adapter->min_tx_entries_per_subcrq > entries_page ||  		    adapter->min_rx_add_entries_per_subcrq > entries_page) {  			dev_err(dev, "Fatal, invalid entries per sub-crq\n"); @@ -3909,44 +3936,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)  					adapter->opt_rx_comp_queues;  		adapter->req_rx_add_queues = adapter->max_rx_add_queues; +	} else { +		atomic_add(cap_reqs, &adapter->running_cap_crqs);  	} -  	memset(&crq, 0, sizeof(crq));  	crq.request_capability.first = IBMVNIC_CRQ_CMD;  	crq.request_capability.cmd = REQUEST_CAPABILITY;  	crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);  	crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);  	crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);  	crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	crq.request_capability.capability =  	    cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);  	crq.request_capability.number =  	    cpu_to_be64(adapter->req_tx_entries_per_subcrq); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	crq.request_capability.capability =  	    cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);  	crq.request_capability.number =  	    cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	crq.request_capability.capability = cpu_to_be16(REQ_MTU);  	crq.request_capability.number = cpu_to_be64(adapter->req_mtu); -	atomic_inc(&adapter->running_cap_crqs); +	cap_reqs--;  	ibmvnic_send_crq(adapter, &crq);  	if (adapter->netdev->flags & IFF_PROMISC) { @@ -3954,16 +3982,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)  			crq.request_capability.capability =  			    cpu_to_be16(PROMISC_REQUESTED);  			crq.request_capability.number = cpu_to_be64(1); -			atomic_inc(&adapter->running_cap_crqs); +			cap_reqs--;  			ibmvnic_send_crq(adapter, &crq);  		}  	} else {  		crq.request_capability.capability =  		    cpu_to_be16(PROMISC_REQUESTED);  		crq.request_capability.number = cpu_to_be64(0); -		atomic_inc(&adapter->running_cap_crqs); +		cap_reqs--;  		ibmvnic_send_crq(adapter, &crq);  	} + +	/* Keep at end to catch any discrepancy between expected and actual +	 * CRQs sent. +	 */ +	WARN_ON(cap_reqs != 0);  }  static int pending_scrq(struct ibmvnic_adapter *adapter, @@ -4357,118 +4390,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)  static void send_query_cap(struct ibmvnic_adapter *adapter)  {  	union ibmvnic_crq crq; +	int cap_reqs; + +	/* We send out 25 QUERY_CAPABILITY CRQs below.  Initialize this count +	 * upfront. When the tasklet receives a response to all of these, it +	 * can send out the next protocol messaage (REQUEST_CAPABILITY). +	 */ +	cap_reqs = 25; + +	atomic_set(&adapter->running_cap_crqs, cap_reqs); -	atomic_set(&adapter->running_cap_crqs, 0);  	memset(&crq, 0, sizeof(crq));  	crq.query_capability.first = IBMVNIC_CRQ_CMD;  	crq.query_capability.cmd = QUERY_CAPABILITY;  	crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  	    cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  	    cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  	    cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  	    cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MIN_MTU); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_MTU); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  			cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  			cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability =  			cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); -	atomic_inc(&adapter->running_cap_crqs);  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--;  	crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); -	atomic_inc(&adapter->running_cap_crqs); +  	ibmvnic_send_crq(adapter, &crq); +	cap_reqs--; + +	/* Keep at end to catch any discrepancy between expected and actual +	 * CRQs sent. +	 */ +	WARN_ON(cap_reqs != 0);  }  static void send_query_ip_offload(struct ibmvnic_adapter *adapter) @@ -4772,6 +4819,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,  	char *name;  	atomic_dec(&adapter->running_cap_crqs); +	netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", +		   atomic_read(&adapter->running_cap_crqs));  	switch (be16_to_cpu(crq->request_capability_rsp.capability)) {  	case REQ_TX_QUEUES:  		req_value = &adapter->req_tx_queues; @@ -4835,10 +4884,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,  	}  	/* Done receiving requested capabilities, query IP offload support */ -	if (atomic_read(&adapter->running_cap_crqs) == 0) { -		adapter->wait_capability = false; +	if (atomic_read(&adapter->running_cap_crqs) == 0)  		send_query_ip_offload(adapter); -	}  }  static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, @@ -5136,10 +5183,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,  	}  out: -	if (atomic_read(&adapter->running_cap_crqs) == 0) { -		adapter->wait_capability = false; +	if (atomic_read(&adapter->running_cap_crqs) == 0)  		send_request_cap(adapter, 0); -	}  }  static int send_query_phys_parms(struct ibmvnic_adapter *adapter) @@ -5435,33 +5480,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)  	struct ibmvnic_crq_queue *queue = &adapter->crq;  	union ibmvnic_crq *crq;  	unsigned long flags; -	bool done = false;  	spin_lock_irqsave(&queue->lock, flags); -	while (!done) { -		/* Pull all the valid messages off the CRQ */ -		while ((crq = ibmvnic_next_crq(adapter)) != NULL) { -			/* This barrier makes sure ibmvnic_next_crq()'s -			 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded -			 * before ibmvnic_handle_crq()'s -			 * switch(gen_crq->first) and switch(gen_crq->cmd). -			 */ -			dma_rmb(); -			ibmvnic_handle_crq(crq, adapter); -			crq->generic.first = 0; -		} -		/* remain in tasklet until all -		 * capabilities responses are received +	/* Pull all the valid messages off the CRQ */ +	while ((crq = ibmvnic_next_crq(adapter)) != NULL) { +		/* This barrier makes sure ibmvnic_next_crq()'s +		 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded +		 * before ibmvnic_handle_crq()'s +		 * switch(gen_crq->first) and switch(gen_crq->cmd).  		 */ -		if (!adapter->wait_capability) -			done = true; +		dma_rmb(); +		ibmvnic_handle_crq(crq, adapter); +		crq->generic.first = 0;  	} -	/* if capabilities CRQ's were sent in this tasklet, the following -	 * tasklet must wait until all responses are received -	 */ -	if (atomic_read(&adapter->running_cap_crqs) != 0) -		adapter->wait_capability = true; +  	spin_unlock_irqrestore(&queue->lock, flags);  } |