aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/ipw2x00/ipw2200.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/ipw2x00/ipw2200.c')
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c167
1 files changed, 69 insertions, 98 deletions
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 60b5e08dd6df..ada6ce32c1f1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,12 +1945,11 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(unsigned long data)
+static void ipw_irq_tasklet(struct tasklet_struct *t)
{
- struct ipw_priv *priv = (struct ipw_priv *)data;
+ struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
u32 inta, inta_mask, handled = 0;
unsigned long flags;
- int rc = 0;
spin_lock_irqsave(&priv->irq_lock, flags);
@@ -1980,7 +1979,7 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
IPW_DEBUG_HC("Command completed.\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
+ ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
@@ -1988,25 +1987,25 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
IPW_DEBUG_TX("TX_QUEUE_1\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
+ ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
handled |= IPW_INTA_BIT_TX_QUEUE_1;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
IPW_DEBUG_TX("TX_QUEUE_2\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
+ ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
handled |= IPW_INTA_BIT_TX_QUEUE_2;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
IPW_DEBUG_TX("TX_QUEUE_3\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
+ ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
handled |= IPW_INTA_BIT_TX_QUEUE_3;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
IPW_DEBUG_TX("TX_QUEUE_4\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
+ ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
handled |= IPW_INTA_BIT_TX_QUEUE_4;
}
@@ -2999,7 +2998,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_unlock_irqrestore(&priv->ieee->lock, flags);
}
-/**
+/*
* Check that card is still alive.
* Reads debug register from domain0.
* If card is present, pre-defined value should
@@ -3114,7 +3113,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
mdelay(1);
/* write ucode */
- /**
+ /*
* @bug
* Do NOT set indirect address register once and then
* store data to indirect data register in the loop.
@@ -3386,7 +3385,7 @@ struct ipw_fw {
__le32 boot_size;
__le32 ucode_size;
__le32 fw_size;
- u8 data[0];
+ u8 data[];
};
static int ipw_get_fw(struct ipw_priv *priv,
@@ -3442,8 +3441,9 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ rxq->pool[i].dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
@@ -3666,7 +3666,7 @@ static int ipw_load(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* DMA services
*
* Theory of operation
@@ -3689,11 +3689,11 @@ static int ipw_load(struct ipw_priv *priv)
* we only utilize the first data transmit queue (queue1).
*/
-/**
+/*
* Driver allocates buffers of this size for Rx
*/
-/**
+/*
* ipw_rx_queue_space - Return number of free slots available in queue.
*/
static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
@@ -3724,7 +3724,7 @@ static inline int ipw_queue_inc_wrap(int index, int n_bd)
return (++index == n_bd) ? 0 : index;
}
-/**
+/*
* Initialize common DMA queue structure
*
* @param q queue to init
@@ -3770,13 +3770,12 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
struct pci_dev *dev = priv->pci_dev;
q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
- if (!q->txb) {
- IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
+ if (!q->txb)
return -ENOMEM;
- }
q->bd =
- pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
+ dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
+ &q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
sizeof(q->bd[0]) * count);
@@ -3789,7 +3788,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
return 0;
}
-/**
+/*
* Free one TFD, those at index [txq->q.last_used].
* Do NOT advance any indexes
*
@@ -3812,15 +3811,16 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
IPW_ERROR("Too many chunks: %i\n",
le32_to_cpu(bd->u.data.num_chunks));
- /** @todo issue fatal error, it is quite serious situation */
+ /* @todo issue fatal error, it is quite serious situation */
return;
}
/* unmap chunks if any */
for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
- pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
+ dma_unmap_single(&dev->dev,
+ le32_to_cpu(bd->u.data.chunk_ptr[i]),
le16_to_cpu(bd->u.data.chunk_len[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (txq->txb[txq->q.last_used]) {
libipw_txb_free(txq->txb[txq->q.last_used]);
txq->txb[txq->q.last_used] = NULL;
@@ -3828,7 +3828,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
}
}
-/**
+/*
* Deallocate DMA queue.
*
* Empty queue by removing and destroying all BD's.
@@ -3852,15 +3852,15 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
}
/* free buffers belonging to queue itself */
- pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
- q->dma_addr);
+ dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
+ q->dma_addr);
kfree(txq->txb);
/* 0 fill whole structure */
memset(txq, 0, sizeof(*txq));
}
-/**
+/*
* Destroy all DMA queues and structures
*
* @param priv
@@ -4465,7 +4465,7 @@ static void handle_scan_event(struct ipw_priv *priv)
}
}
-/**
+/*
* Handle host notification packet.
* Called from interrupt routine
*/
@@ -4925,7 +4925,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
}
-/**
+/*
* Destroys all DMA structures and initialise them again
*
* @param priv
@@ -4934,7 +4934,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
static int ipw_queue_reset(struct ipw_priv *priv)
{
int rc = 0;
- /** @todo customize queue sizes */
+ /* @todo customize queue sizes */
int nTx = 64, nTxCmd = 8;
ipw_tx_queue_free(priv);
/* Tx CMD queue */
@@ -4990,7 +4990,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* Reclaim Tx queue entries no more used by NIC.
*
* When FW advances 'R' index, all entries between old and
@@ -5198,8 +5198,8 @@ static void ipw_rx_queue_replenish(void *data)
list_del(element);
rxb->dma_addr =
- pci_map_single(priv->pci_dev, rxb->skb->data,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
@@ -5232,8 +5232,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
- pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ rxq->pool[i].dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
@@ -7042,23 +7043,22 @@ static int ipw_qos_association(struct ipw_priv *priv,
* off the network from the associated setting, adjust the QoS
* setting
*/
-static int ipw_qos_association_resp(struct ipw_priv *priv,
+static void ipw_qos_association_resp(struct ipw_priv *priv,
struct libipw_network *network)
{
- int ret = 0;
unsigned long flags;
u32 size = sizeof(struct libipw_qos_parameters);
int set_qos_param = 0;
if ((priv == NULL) || (network == NULL) ||
(priv->assoc_network == NULL))
- return ret;
+ return;
if (!(priv->status & STATUS_ASSOCIATED))
- return ret;
+ return;
if ((priv->ieee->iw_mode != IW_MODE_INFRA))
- return ret;
+ return;
spin_lock_irqsave(&priv->ieee->lock, flags);
if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
@@ -7088,8 +7088,6 @@ static int ipw_qos_association_resp(struct ipw_priv *priv,
if (set_qos_param == 1)
schedule_work(&priv->qos_activate);
-
- return ret;
}
static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
@@ -8249,12 +8247,12 @@ static void ipw_rx(struct ipw_priv *priv)
struct ipw_rx_mem_buffer *rxb;
struct ipw_rx_packet *pkt;
struct libipw_hdr_4addr *header;
- u32 r, w, i;
+ u32 r, i;
u8 network_packet;
u8 fill_rx = 0;
r = ipw_read32(priv, IPW_RX_READ_INDEX);
- w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
+ ipw_read32(priv, IPW_RX_WRITE_INDEX);
i = priv->rxq->read;
if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
@@ -8268,9 +8266,8 @@ static void ipw_rx(struct ipw_priv *priv)
}
priv->rxq->queue[i] = NULL;
- pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
- IPW_RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
pkt = (struct ipw_rx_packet *)rxb->skb->data;
IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
@@ -8422,8 +8419,8 @@ static void ipw_rx(struct ipw_priv *priv)
rxb->skb = NULL;
}
- pci_unmap_single(priv->pci_dev, rxb->dma_addr,
- IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
+ IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &priv->rxq->rx_used);
i = (i + 1) % RX_QUEUE_SIZE;
@@ -8448,7 +8445,7 @@ static void ipw_rx(struct ipw_priv *priv)
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-/**
+/*
* ipw_sw_reset
* @option: options to control different reset behaviour
* 0 = reset everything except the 'disable' module_param
@@ -10222,11 +10219,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
txb->fragments[i]->len - hdr_len);
tfd->u.data.chunk_ptr[i] =
- cpu_to_le32(pci_map_single
- (priv->pci_dev,
- txb->fragments[i]->data + hdr_len,
- txb->fragments[i]->len - hdr_len,
- PCI_DMA_TODEVICE));
+ cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+ txb->fragments[i]->data + hdr_len,
+ txb->fragments[i]->len - hdr_len,
+ DMA_TO_DEVICE));
tfd->u.data.chunk_len[i] =
cpu_to_le16(txb->fragments[i]->len - hdr_len);
}
@@ -10256,10 +10252,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
dev_kfree_skb_any(txb->fragments[i]);
txb->fragments[i] = skb;
tfd->u.data.chunk_ptr[i] =
- cpu_to_le32(pci_map_single
- (priv->pci_dev, skb->data,
- remaining_bytes,
- PCI_DMA_TODEVICE));
+ cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+ skb->data,
+ remaining_bytes,
+ DMA_TO_DEVICE));
le32_add_cpu(&tfd->u.data.num_chunks, 1);
}
@@ -10643,10 +10639,8 @@ static void ipw_bg_link_down(struct work_struct *work)
mutex_unlock(&priv->mutex);
}
-static int ipw_setup_deferred_work(struct ipw_priv *priv)
+static void ipw_setup_deferred_work(struct ipw_priv *priv)
{
- int ret = 0;
-
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -10678,10 +10672,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet,
- ipw_irq_tasklet, (unsigned long)priv);
-
- return ret;
+ tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
}
static void shim__set_security(struct net_device *dev,
@@ -11629,9 +11620,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
goto out_pci_disable_device;
@@ -11662,11 +11653,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
- err = ipw_setup_deferred_work(priv);
- if (err) {
- IPW_ERROR("Unable to setup deferred work\n");
- goto out_iounmap;
- }
+ ipw_setup_deferred_work(priv);
ipw_sw_reset(priv, 1);
@@ -11851,10 +11838,9 @@ static void ipw_pci_remove(struct pci_dev *pdev)
free_firmware();
}
-#ifdef CONFIG_PM
-static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ipw_pci_suspend(struct device *dev_d)
{
- struct ipw_priv *priv = pci_get_drvdata(pdev);
+ struct ipw_priv *priv = dev_get_drvdata(dev_d);
struct net_device *dev = priv->net_dev;
printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
@@ -11865,33 +11851,20 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
/* Remove the PRESENT state of the device */
netif_device_detach(dev);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
priv->suspend_at = ktime_get_boottime_seconds();
return 0;
}
-static int ipw_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused ipw_pci_resume(struct device *dev_d)
{
+ struct pci_dev *pdev = to_pci_dev(dev_d);
struct ipw_priv *priv = pci_get_drvdata(pdev);
struct net_device *dev = priv->net_dev;
- int err;
u32 val;
printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
- pci_set_power_state(pdev, PCI_D0);
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
- dev->name);
- return err;
- }
- pci_restore_state(pdev);
-
/*
* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -11913,7 +11886,6 @@ static int ipw_pci_resume(struct pci_dev *pdev)
return 0;
}
-#endif
static void ipw_pci_shutdown(struct pci_dev *pdev)
{
@@ -11925,16 +11897,15 @@ static void ipw_pci_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume);
+
/* driver initialization stuff */
static struct pci_driver ipw_driver = {
.name = DRV_NAME,
.id_table = card_ids,
.probe = ipw_pci_probe,
.remove = ipw_pci_remove,
-#ifdef CONFIG_PM
- .suspend = ipw_pci_suspend,
- .resume = ipw_pci_resume,
-#endif
+ .driver.pm = &ipw_pci_pm_ops,
.shutdown = ipw_pci_shutdown,
};