diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/usb/mtu3/mtu3_qmu.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/usb/mtu3/mtu3_qmu.c')
| -rw-r--r-- | drivers/usb/mtu3/mtu3_qmu.c | 44 | 
1 files changed, 40 insertions, 4 deletions
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c index a2fdab8b63b2..3d77408e3133 100644 --- a/drivers/usb/mtu3/mtu3_qmu.c +++ b/drivers/usb/mtu3/mtu3_qmu.c @@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)  	return ring->enqueue;  } +/* @dequeue may be NULL if ring is unallocated or freed */  static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)  {  	if (ring->dequeue < ring->end) @@ -221,7 +222,7 @@ static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)  }  /* check if a ring is emtpy */ -static int gpd_ring_empty(struct mtu3_gpd_ring *ring) +static bool gpd_ring_empty(struct mtu3_gpd_ring *ring)  {  	struct qmu_gpd *enq = ring->enqueue;  	struct qmu_gpd *next; @@ -467,6 +468,37 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)  }  /* + * when rx error happens (except zlperr), QMU will stop, and RQCPR saves + * the GPD encountered error, Done irq will arise after resuming QMU again. + */ +static void qmu_error_rx(struct mtu3 *mtu, u8 epnum) +{ +	struct mtu3_ep *mep = mtu->out_eps + epnum; +	struct mtu3_gpd_ring *ring = &mep->gpd_ring; +	struct qmu_gpd *gpd_current = NULL; +	struct mtu3_request *mreq; +	dma_addr_t cur_gpd_dma; + +	cur_gpd_dma = read_rxq_cur_addr(mtu->mac_base, epnum); +	gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma); + +	mreq = next_request(mep); +	if (!mreq || mreq->gpd != gpd_current) { +		dev_err(mtu->dev, "no correct RX req is found\n"); +		return; +	} + +	mreq->request.status = -EAGAIN; + +	/* by pass the current GDP */ +	gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO); +	mtu3_qmu_resume(mep); + +	dev_dbg(mtu->dev, "%s EP%d, current=%p, req=%p\n", +		__func__, epnum, gpd_current, mreq); +} + +/*   * NOTE: request list maybe is already empty as following case:   * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->   * queue_tx --> process_tasklet(meanwhile, the second one is transferred, @@ -491,7 +523,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)  	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",  		__func__, epnum, gpd, gpd_current, ring->enqueue); -	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { +	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {  		mreq = next_request(mep); @@ -530,7 +562,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)  	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",  		__func__, epnum, gpd, gpd_current, ring->enqueue); -	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) { +	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {  		mreq = next_request(mep); @@ -571,14 +603,18 @@ static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)  	if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {  		errval = mtu3_readl(mbase, U3D_RQERRIR0); +		mtu3_writel(mbase, U3D_RQERRIR0, errval); +  		for (i = 1; i < mtu->num_eps; i++) {  			if (errval & QMU_RX_CS_ERR(i))  				dev_err(mtu->dev, "Rx %d CS error!\n", i);  			if (errval & QMU_RX_LEN_ERR(i))  				dev_err(mtu->dev, "RX %d Length error\n", i); + +			if (errval & (QMU_RX_CS_ERR(i) | QMU_RX_LEN_ERR(i))) +				qmu_error_rx(mtu, i);  		} -		mtu3_writel(mbase, U3D_RQERRIR0, errval);  	}  	if (qmu_status & RXQ_ZLPERR_INT) {  |