diff options
Diffstat (limited to 'drivers/net/ethernet/sun/cassini.c')
| -rw-r--r-- | drivers/net/ethernet/sun/cassini.c | 143 |
1 files changed, 66 insertions, 77 deletions
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index f1c8615ab6f0..9ff894ba8d3e 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -237,12 +237,6 @@ static inline void cas_lock_tx(struct cas *cp) spin_lock_nested(&cp->tx_lock[i], i); } -static inline void cas_lock_all(struct cas *cp) -{ - spin_lock_irq(&cp->lock); - cas_lock_tx(cp); -} - /* WTZ: QA was finding deadlock problems with the previous * versions after long test runs with multiple cards per machine. * See if replacing cas_lock_all with safer versions helps. The @@ -266,12 +260,6 @@ static inline void cas_unlock_tx(struct cas *cp) spin_unlock(&cp->tx_lock[i - 1]); } -static inline void cas_unlock_all(struct cas *cp) -{ - cas_unlock_tx(cp); - spin_unlock_irq(&cp->lock); -} - #define cas_unlock_all_restore(cp, flags) \ do { \ struct cas *xxxcp = (cp); \ @@ -455,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp) /* cp->lock held. note: the last put_page will free the buffer */ static int cas_page_free(struct cas *cp, cas_page_t *page) { - pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, + DMA_FROM_DEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0; @@ -466,8 +454,8 @@ static int cas_page_free(struct cas *cp, cas_page_t *page) #define RX_USED_ADD(x, y) ((x)->used += (y)) #define RX_USED_SET(x, y) ((x)->used = (y)) #else -#define RX_USED_ADD(x, y) -#define RX_USED_SET(x, y) +#define RX_USED_ADD(x, y) do { } while(0) +#define RX_USED_SET(x, y) do { } while(0) #endif /* local page allocation routines for the receive buffers. jumbo pages @@ -486,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; - page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, - cp->page_size, PCI_DMA_FROMDEVICE); + page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, + cp->page_size, DMA_FROM_DEVICE); return page; page_err: @@ -1875,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) daddr = le64_to_cpu(txd->buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd->control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); entry = TX_DESC_NEXT(ring, entry); /* tiny buffer may follow */ @@ -1969,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (!dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, 0x100); p += hlen; @@ -2000,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ addr = cas_page_map(page->buffer); memcpy(p, addr + off, RX_COPY_MIN); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); off += RX_COPY_MIN; swivel = RX_COPY_MIN; @@ -2036,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen; @@ -2078,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); @@ -2095,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, p += hlen; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr, dlen + cp->crc_size); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, dlen + cp->crc_size); } @@ -2283,7 +2278,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget) drops = 0; while (1) { struct cas_rx_comp *rxc = rxcs + entry; - struct sk_buff *uninitialized_var(skb); + struct sk_buff *skb; int type, len; u64 words[4]; int i, dring; @@ -2778,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, nr_frags = skb_shinfo(skb)->nr_frags; len = skb_headlen(skb); - mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), len, - PCI_DMA_TODEVICE); + mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), len, DMA_TO_DEVICE); tentry = entry; tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); @@ -3894,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring) daddr = le64_to_cpu(txd[ent].buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd[ent].control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) { i++; @@ -4193,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp) if (!cp->tx_tiny_bufs[i]) continue; - pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, - cp->tx_tiny_bufs[i], - cp->tx_tiny_dvma[i]); + dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); cp->tx_tiny_bufs[i] = NULL; } } @@ -4207,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp) for (i = 0; i < N_TX_RINGS; i++) { cp->tx_tiny_bufs[i] = - pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, - &cp->tx_tiny_dvma[i]); + dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i], GFP_KERNEL); if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_free(cp); return -1; @@ -4766,7 +4759,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; - /* Fallthrough... */ + fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); @@ -4970,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " "for consistent allocations\n"); @@ -4981,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, " "aborting\n"); @@ -5059,9 +5051,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cp->cas_flags & CAS_FLAG_SATURN) cas_saturn_firmware_init(cp); - cp->init_block = (struct cas_init_block *) - pci_alloc_consistent(pdev, sizeof(struct cas_init_block), - &cp->block_dvma); + cp->init_block = + dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block), + &cp->block_dvma, GFP_KERNEL); if (!cp->init_block) { dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); goto err_out_iounmap; @@ -5121,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_out_free_consistent: - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); err_out_iounmap: mutex_lock(&cp->pm_mutex); @@ -5176,18 +5168,17 @@ static void cas_remove_one(struct pci_dev *pdev) cp->orig_cacheline_size); } #endif - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); pci_iounmap(pdev, cp->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int cas_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused cas_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); unsigned long flags; @@ -5216,9 +5207,9 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int cas_resume(struct pci_dev *pdev) +static int __maybe_unused cas_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); netdev_info(dev, "resuming\n"); @@ -5239,17 +5230,15 @@ static int cas_resume(struct pci_dev *pdev) mutex_unlock(&cp->pm_mutex); return 0; } -#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume); static struct pci_driver cas_driver = { .name = DRV_MODULE_NAME, .id_table = cas_pci_tbl, .probe = cas_init_one, .remove = cas_remove_one, -#ifdef CONFIG_PM - .suspend = cas_suspend, - .resume = cas_resume -#endif + .driver.pm = &cas_pm_ops, }; static int __init cas_init(void) |