diff options
Diffstat (limited to 'drivers/net')
81 files changed, 3477 insertions, 559 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index a966128c2a7a..547098086773 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -40,3 +40,17 @@ config CAIF_HSI The caif low level driver for CAIF over HSI. Be aware that if you enable this then you also need to enable a low-level HSI driver. + +config CAIF_VIRTIO + tristate "CAIF virtio transport driver" + depends on CAIF && HAS_DMA + select VHOST_RING + select VIRTIO + select GENERIC_ALLOCATOR + default n + ---help--- + The caif driver for CAIF over Virtio. + +if CAIF_VIRTIO +source "drivers/vhost/Kconfig" +endif diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index 15a9d2fc753d..9bbd45391f6c 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -9,3 +9,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o # HSI interface obj-$(CONFIG_CAIF_HSI) += caif_hsi.o + +# Virtio interface +obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c new file mode 100644 index 000000000000..b9ed1288ce2d --- /dev/null +++ b/drivers/net/caif/caif_virtio.c @@ -0,0 +1,790 @@ +/* + * Copyright (C) ST-Ericsson AB 2013 + * Authors: Vicram Arv + * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * Sjur Brendeland + * License terms: GNU General Public License (GPL) version 2 + */ +#include <linux/module.h> +#include <linux/if_arp.h> +#include <linux/virtio.h> +#include <linux/vringh.h> +#include <linux/debugfs.h> +#include <linux/spinlock.h> +#include <linux/genalloc.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_caif.h> +#include <linux/virtio_ring.h> +#include <linux/dma-mapping.h> +#include <net/caif/caif_dev.h> +#include <linux/virtio_config.h> + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vicram Arv"); +MODULE_AUTHOR("Sjur Brendeland"); +MODULE_DESCRIPTION("Virtio CAIF Driver"); + +/* NAPI schedule quota */ +#define CFV_DEFAULT_QUOTA 32 + +/* Defaults used if virtio config space is unavailable */ +#define CFV_DEF_MTU_SIZE 4096 +#define CFV_DEF_HEADROOM 32 +#define CFV_DEF_TAILROOM 32 + +/* Required IP header alignment */ +#define IP_HDR_ALIGN 4 + +/* struct cfv_napi_contxt - NAPI context info + * @riov: IOV holding data read from the ring. Note that riov may + * still hold data when cfv_rx_poll() returns. + * @head: Last descriptor ID we received from vringh_getdesc_kern. + * We use this to put descriptor back on the used ring. USHRT_MAX is + * used to indicate invalid head-id. + */ +struct cfv_napi_context { + struct vringh_kiov riov; + unsigned short head; +}; + +/* struct cfv_stats - statistics for debugfs + * @rx_napi_complete: Number of NAPI completions (RX) + * @rx_napi_resched: Number of calls where the full quota was used (RX) + * @rx_nomem: Number of SKB alloc failures (RX) + * @rx_kicks: Number of RX kicks + * @tx_full_ring: Number times TX ring was full + * @tx_no_mem: Number of times TX went out of memory + * @tx_flow_on: Number of flow on (TX) + * @tx_kicks: Number of TX kicks + */ +struct cfv_stats { + u32 rx_napi_complete; + u32 rx_napi_resched; + u32 rx_nomem; + u32 rx_kicks; + u32 tx_full_ring; + u32 tx_no_mem; + u32 tx_flow_on; + u32 tx_kicks; +}; + +/* struct cfv_info - Caif Virtio control structure + * @cfdev: caif common header + * @vdev: Associated virtio device + * @vr_rx: rx/downlink host vring + * @vq_tx: tx/uplink virtqueue + * @ndev: CAIF link layer device + * @watermark_tx: indicates number of free descriptors we need + * to reopen the tx-queues after overload. + * @tx_lock: protects vq_tx from concurrent use + * @tx_release_tasklet: Tasklet for freeing consumed TX buffers + * @napi: Napi context used in cfv_rx_poll() + * @ctx: Context data used in cfv_rx_poll() + * @tx_hr: transmit headroom + * @rx_hr: receive headroom + * @tx_tr: transmit tail room + * @rx_tr: receive tail room + * @mtu: transmit max size + * @mru: receive max size + * @allocsz: size of dma memory reserved for TX buffers + * @alloc_addr: virtual address to dma memory for TX buffers + * @alloc_dma: dma address to dma memory for TX buffers + * @genpool: Gen Pool used for allocating TX buffers + * @reserved_mem: Pointer to memory reserve allocated from genpool + * @reserved_size: Size of memory reserve allocated from genpool + * @stats: Statistics exposed in sysfs + * @debugfs: Debugfs dentry for statistic counters + */ +struct cfv_info { + struct caif_dev_common cfdev; + struct virtio_device *vdev; + struct vringh *vr_rx; + struct virtqueue *vq_tx; + struct net_device *ndev; + unsigned int watermark_tx; + /* Protect access to vq_tx */ + spinlock_t tx_lock; + struct tasklet_struct tx_release_tasklet; + struct napi_struct napi; + struct cfv_napi_context ctx; + u16 tx_hr; + u16 rx_hr; + u16 tx_tr; + u16 rx_tr; + u32 mtu; + u32 mru; + size_t allocsz; + void *alloc_addr; + dma_addr_t alloc_dma; + struct gen_pool *genpool; + unsigned long reserved_mem; + size_t reserved_size; + struct cfv_stats stats; + struct dentry *debugfs; +}; + +/* struct buf_info - maintains transmit buffer data handle + * @size: size of transmit buffer + * @dma_handle: handle to allocated dma device memory area + * @vaddr: virtual address mapping to allocated memory area + */ +struct buf_info { + size_t size; + u8 *vaddr; +}; + +/* Called from virtio device, in IRQ context */ +static void cfv_release_cb(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + + ++cfv->stats.tx_kicks; + tasklet_schedule(&cfv->tx_release_tasklet); +} + +static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info) +{ + if (!buf_info) + return; + gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr, + buf_info->size); + kfree(buf_info); +} + +/* This is invoked whenever the remote processor completed processing + * a TX msg we just sent, and the buffer is put back to the used ring. + */ +static void cfv_release_used_buf(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + unsigned long flags; + + BUG_ON(vq_tx != cfv->vq_tx); + + for (;;) { + unsigned int len; + struct buf_info *buf_info; + + /* Get used buffer from used ring to recycle used descriptors */ + spin_lock_irqsave(&cfv->tx_lock, flags); + buf_info = virtqueue_get_buf(vq_tx, &len); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Stop looping if there are no more buffers to free */ + if (!buf_info) + break; + + free_buf_info(cfv, buf_info); + + /* watermark_tx indicates if we previously stopped the tx + * queues. If we have enough free stots in the virtio ring, + * re-establish memory reserved and open up tx queues. + */ + if (cfv->vq_tx->num_free <= cfv->watermark_tx) + continue; + + /* Re-establish memory reserve */ + if (cfv->reserved_mem == 0 && cfv->genpool) + cfv->reserved_mem = + gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + + /* Open up the tx queues */ + if (cfv->reserved_mem) { + cfv->watermark_tx = + virtqueue_get_vring_size(cfv->vq_tx); + netif_tx_wake_all_queues(cfv->ndev); + /* Buffers are recycled in cfv_netdev_tx, so + * disable notifications when queues are opened. + */ + virtqueue_disable_cb(cfv->vq_tx); + ++cfv->stats.tx_flow_on; + } else { + /* if no memory reserve, wait for more free slots */ + WARN_ON(cfv->watermark_tx > + virtqueue_get_vring_size(cfv->vq_tx)); + cfv->watermark_tx += + virtqueue_get_vring_size(cfv->vq_tx) / 4; + } + } +} + +/* Allocate a SKB and copy packet data to it */ +static struct sk_buff *cfv_alloc_and_copy_skb(int *err, + struct cfv_info *cfv, + u8 *frm, u32 frm_len) +{ + struct sk_buff *skb; + u32 cfpkt_len, pad_len; + + *err = 0; + /* Verify that packet size with down-link header and mtu size */ + if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { + netdev_err(cfv->ndev, + "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n", + frm_len, cfv->mru, cfv->rx_hr, + cfv->rx_tr); + *err = -EPROTO; + return NULL; + } + + cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr); + pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1); + + skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len); + if (!skb) { + *err = -ENOMEM; + return NULL; + } + + skb_reserve(skb, cfv->rx_hr + pad_len); + + memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len); + return skb; +} + +/* Get packets from the host vring */ +static int cfv_rx_poll(struct napi_struct *napi, int quota) +{ + struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); + int rxcnt = 0; + int err = 0; + void *buf; + struct sk_buff *skb; + struct vringh_kiov *riov = &cfv->ctx.riov; + unsigned int skb_len; + +again: + do { + skb = NULL; + + /* Put the previous iovec back on the used ring and + * fetch a new iovec if we have processed all elements. + */ + if (riov->i == riov->used) { + if (cfv->ctx.head != USHRT_MAX) { + vringh_complete_kern(cfv->vr_rx, + cfv->ctx.head, + 0); + cfv->ctx.head = USHRT_MAX; + } + + err = vringh_getdesc_kern( + cfv->vr_rx, + riov, + NULL, + &cfv->ctx.head, + GFP_ATOMIC); + + if (err <= 0) + goto exit; + } + + buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base); + /* TODO: Add check on valid buffer address */ + + skb = cfv_alloc_and_copy_skb(&err, cfv, buf, + riov->iov[riov->i].iov_len); + if (unlikely(err)) + goto exit; + + /* Push received packet up the stack. */ + skb_len = skb->len; + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + skb->dev = cfv->ndev; + err = netif_receive_skb(skb); + if (unlikely(err)) { + ++cfv->ndev->stats.rx_dropped; + } else { + ++cfv->ndev->stats.rx_packets; + cfv->ndev->stats.rx_bytes += skb_len; + } + + ++riov->i; + ++rxcnt; + } while (rxcnt < quota); + + ++cfv->stats.rx_napi_resched; + goto out; + +exit: + switch (err) { + case 0: + ++cfv->stats.rx_napi_complete; + + /* Really out of patckets? (stolen from virtio_net)*/ + napi_complete(napi); + if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && + napi_schedule_prep(napi)) { + vringh_notify_disable_kern(cfv->vr_rx); + __napi_schedule(napi); + goto again; + } + break; + + case -ENOMEM: + ++cfv->stats.rx_nomem; + dev_kfree_skb(skb); + /* Stop NAPI poll on OOM, we hope to be polled later */ + napi_complete(napi); + vringh_notify_enable_kern(cfv->vr_rx); + break; + + default: + /* We're doomed, any modem fault is fatal */ + netdev_warn(cfv->ndev, "Bad ring, disable device\n"); + cfv->ndev->stats.rx_dropped = riov->used - riov->i; + napi_complete(napi); + vringh_notify_disable_kern(cfv->vr_rx); + netif_carrier_off(cfv->ndev); + break; + } +out: + if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0) + vringh_notify(cfv->vr_rx); + return rxcnt; +} + +static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx) +{ + struct cfv_info *cfv = vdev->priv; + + ++cfv->stats.rx_kicks; + vringh_notify_disable_kern(cfv->vr_rx); + napi_schedule(&cfv->napi); +} + +static void cfv_destroy_genpool(struct cfv_info *cfv) +{ + if (cfv->alloc_addr) + dma_free_coherent(cfv->vdev->dev.parent->parent, + cfv->allocsz, cfv->alloc_addr, + cfv->alloc_dma); + + if (!cfv->genpool) + return; + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + gen_pool_destroy(cfv->genpool); + cfv->genpool = NULL; +} + +static int cfv_create_genpool(struct cfv_info *cfv) +{ + int err; + + /* dma_alloc can only allocate whole pages, and we need a more + * fine graned allocation so we use genpool. We ask for space needed + * by IP and a full ring. If the dma allcoation fails we retry with a + * smaller allocation size. + */ + err = -ENOMEM; + cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) * + (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; + if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) + return -EINVAL; + + for (;;) { + if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { + netdev_info(cfv->ndev, "Not enough device memory\n"); + return -ENOMEM; + } + + cfv->alloc_addr = dma_alloc_coherent( + cfv->vdev->dev.parent->parent, + cfv->allocsz, &cfv->alloc_dma, + GFP_ATOMIC); + if (cfv->alloc_addr) + break; + + cfv->allocsz = (cfv->allocsz * 3) >> 2; + } + + netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n", + cfv->allocsz); + + /* Allocate on 128 bytes boundaries (1 << 7)*/ + cfv->genpool = gen_pool_create(7, -1); + if (!cfv->genpool) + goto err; + + err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr, + (phys_addr_t)virt_to_phys(cfv->alloc_addr), + cfv->allocsz, -1); + if (err) + goto err; + + /* Reserve some memory for low memory situations. If we hit the roof + * in the memory pool, we stop TX flow and release the reserve. + */ + cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; + cfv->reserved_mem = gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + if (!cfv->reserved_mem) { + err = -ENOMEM; + goto err; + } + + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx); + return 0; +err: + cfv_destroy_genpool(cfv); + return err; +} + +/* Enable the CAIF interface and allocate the memory-pool */ +static int cfv_netdev_open(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + + if (cfv_create_genpool(cfv)) + return -ENOMEM; + + netif_carrier_on(netdev); + napi_enable(&cfv->napi); + + /* Schedule NAPI to read any pending packets */ + napi_schedule(&cfv->napi); + return 0; +} + +/* Disable the CAIF interface and free the memory-pool */ +static int cfv_netdev_close(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + unsigned long flags; + struct buf_info *buf_info; + + /* Disable interrupts, queues and NAPI polling */ + netif_carrier_off(netdev); + virtqueue_disable_cb(cfv->vq_tx); + vringh_notify_disable_kern(cfv->vr_rx); + napi_disable(&cfv->napi); + + /* Release any TX buffers on both used and avilable rings */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx))) + free_buf_info(cfv, buf_info); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Release all dma allocated memory and destroy the pool */ + cfv_destroy_genpool(cfv); + return 0; +} + +/* Allocate a buffer in dma-memory and copy skb to it */ +static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, + struct sk_buff *skb, + struct scatterlist *sg) +{ + struct caif_payload_info *info = (void *)&skb->cb; + struct buf_info *buf_info = NULL; + u8 pad_len, hdr_ofs; + + if (!cfv->genpool) + goto err; + + if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { + netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n", + cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); + goto err; + } + + buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC); + if (unlikely(!buf_info)) + goto err; + + /* Make the IP header aligned in tbe buffer */ + hdr_ofs = cfv->tx_hr + info->hdr_len; + pad_len = hdr_ofs & (IP_HDR_ALIGN - 1); + buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len; + + /* allocate dma memory buffer */ + buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size); + if (unlikely(!buf_info->vaddr)) + goto err; + + /* copy skbuf contents to send buffer */ + skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len); + sg_init_one(sg, buf_info->vaddr + pad_len, + skb->len + cfv->tx_hr + cfv->rx_hr); + + return buf_info; +err: + kfree(buf_info); + return NULL; +} + +/* Put the CAIF packet on the virtio ring and kick the receiver */ +static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + struct buf_info *buf_info; + struct scatterlist sg; + unsigned long flags; + bool flow_off = false; + int ret; + + /* garbage collect released buffers */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + + /* Flow-off check takes into account number of cpus to make sure + * virtqueue will not be overfilled in any possible smp conditions. + * + * Flow-on is triggered when sufficient buffers are freed + */ + if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) { + flow_off = true; + cfv->stats.tx_full_ring++; + } + + /* If we run out of memory, we release the memory reserve and retry + * allocation. + */ + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + if (unlikely(!buf_info)) { + cfv->stats.tx_no_mem++; + flow_off = true; + + if (cfv->reserved_mem && cfv->genpool) { + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + cfv->reserved_mem = 0; + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + } + } + + if (unlikely(flow_off)) { + /* Turn flow on when a 1/4 of the descriptors are released */ + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4; + /* Enable notifications of recycled TX buffers */ + virtqueue_enable_cb(cfv->vq_tx); + netif_tx_stop_all_queues(netdev); + } + + if (unlikely(!buf_info)) { + /* If the memory reserve does it's job, this shouldn't happen */ + netdev_warn(cfv->ndev, "Out of gen_pool memory\n"); + goto err; + } + + ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC); + if (unlikely((ret < 0))) { + /* If flow control works, this shouldn't happen */ + netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n", + ret); + goto err; + } + + /* update netdev statistics */ + cfv->ndev->stats.tx_packets++; + cfv->ndev->stats.tx_bytes += skb->len; + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* tell the remote processor it has a pending message to read */ + virtqueue_kick(cfv->vq_tx); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +err: + spin_unlock_irqrestore(&cfv->tx_lock, flags); + cfv->ndev->stats.tx_dropped++; + free_buf_info(cfv, buf_info); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void cfv_tx_release_tasklet(unsigned long drv) +{ + struct cfv_info *cfv = (struct cfv_info *)drv; + cfv_release_used_buf(cfv->vq_tx); +} + +static const struct net_device_ops cfv_netdev_ops = { + .ndo_open = cfv_netdev_open, + .ndo_stop = cfv_netdev_close, + .ndo_start_xmit = cfv_netdev_tx, +}; + +static void cfv_netdev_setup(struct net_device *netdev) +{ + netdev->netdev_ops = &cfv_netdev_ops; + netdev->type = ARPHRD_CAIF; + netdev->tx_queue_len = 100; + netdev->flags = IFF_POINTOPOINT | IFF_NOARP; + netdev->mtu = CFV_DEF_MTU_SIZE; + netdev->destructor = free_netdev; +} + +/* Create debugfs counters for the device */ +static inline void debugfs_init(struct cfv_info *cfv) +{ + cfv->debugfs = + debugfs_create_dir(netdev_name(cfv->ndev), NULL); + + if (IS_ERR(cfv->debugfs)) + return; + + debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_napi_complete); + debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_napi_resched); + debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_nomem); + debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs, + &cfv->stats.rx_kicks); + debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_full_ring); + debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_no_mem); + debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_kicks); + debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs, + &cfv->stats.tx_flow_on); +} + +/* Setup CAIF for the a virtio device */ +static int cfv_probe(struct virtio_device *vdev) +{ + vq_callback_t *vq_cbs = cfv_release_cb; + vrh_callback_t *vrh_cbs = cfv_recv; + const char *names = "output"; + const char *cfv_netdev_name = "cfvrt"; + struct net_device *netdev; + struct cfv_info *cfv; + int err = -EINVAL; + + netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name, + cfv_netdev_setup); + if (!netdev) + return -ENOMEM; + + cfv = netdev_priv(netdev); + cfv->vdev = vdev; + cfv->ndev = netdev; + + spin_lock_init(&cfv->tx_lock); + + /* Get the RX virtio ring. This is a "host side vring". */ + err = -ENODEV; + if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) + goto err; + + err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); + if (err) + goto err; + + /* Get the TX virtio ring. This is a "guest side vring". */ + err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names); + if (err) + goto err; + + /* Get the CAIF configuration from virtio config space, if available */ +#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \ + ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \ + &_var, \ + FIELD_SIZEOF(struct virtio_caif_transf_config, _f))) + + if (vdev->config->get) { + GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom); + GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom); + GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom); + GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom); + GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu); + GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu); + } else { + cfv->tx_hr = CFV_DEF_HEADROOM; + cfv->rx_hr = CFV_DEF_HEADROOM; + cfv->tx_tr = CFV_DEF_TAILROOM; + cfv->rx_tr = CFV_DEF_TAILROOM; + cfv->mtu = CFV_DEF_MTU_SIZE; + cfv->mru = CFV_DEF_MTU_SIZE; + } + + netdev->needed_headroom = cfv->tx_hr; + netdev->needed_tailroom = cfv->tx_tr; + + /* Disable buffer release interrupts unless we have stopped TX queues */ + virtqueue_disable_cb(cfv->vq_tx); + + netdev->mtu = cfv->mtu - cfv->tx_tr; + vdev->priv = cfv; + + /* Initialize NAPI poll context data */ + vringh_kiov_init(&cfv->ctx.riov, NULL, 0); + cfv->ctx.head = USHRT_MAX; + netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA); + + tasklet_init(&cfv->tx_release_tasklet, + cfv_tx_release_tasklet, + (unsigned long)cfv); + + /* Carrier is off until netdevice is opened */ + netif_carrier_off(netdev); + + /* register Netdev */ + err = register_netdev(netdev); + if (err) { + dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); + goto err; + } + + debugfs_init(cfv); + + return 0; +err: + netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err); + + if (cfv->vr_rx) + vdev->vringh_config->del_vrhs(cfv->vdev); + if (cfv->vdev) + vdev->config->del_vqs(cfv->vdev); + free_netdev(netdev); + return err; +} + +static void cfv_remove(struct virtio_device *vdev) +{ + struct cfv_info *cfv = vdev->priv; + + rtnl_lock(); + dev_close(cfv->ndev); + rtnl_unlock(); + + tasklet_kill(&cfv->tx_release_tasklet); + debugfs_remove_recursive(cfv->debugfs); + + vringh_kiov_cleanup(&cfv->ctx.riov); + vdev->config->reset(vdev); + vdev->vringh_config->del_vrhs(cfv->vdev); + cfv->vr_rx = NULL; + vdev->config->del_vqs(cfv->vdev); + unregister_netdev(cfv->ndev); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { +}; + +static struct virtio_driver caif_virtio_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = cfv_probe, + .remove = cfv_remove, +}; + +module_virtio_driver(caif_virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 1928e2001587..072c6f14e8fc 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -632,7 +632,6 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -951,7 +950,7 @@ static int vortex_eisa_remove(struct device *device) unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); - release_region(dev->base_addr, VORTEX_TOTAL_SIZE); + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; @@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + pci_disable_device(pdev); + goto out; + } + unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { + pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); + pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, /* PCI-only startup logic */ if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_region; + goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1484,9 +1486,7 @@ free_ring: + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); +free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); + + pci_release_regions(pdev); + free_netdev(dev); } diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index ee705771bd2c..dada66bfe0d6 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1700,7 +1700,8 @@ static int bfin_mac_probe(struct platform_device *pdev) } bfin_mac_hwtstamp_init(ndev); - if (bfin_phc_init(ndev, &pdev->dev)) { + rc = bfin_phc_init(ndev, &pdev->dev); + if (rc) { dev_err(&pdev->dev, "Cannot register PHC device!\n"); goto out_err_phc; } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 40649a8bf390..6b0dc131b20e 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -4085,7 +4085,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) if (!cp->csk_tbl) return -ENOMEM; - port_id = random32(); + port_id = prandom_u32(); port_id %= CNIC_LOCAL_PORT_RANGE; if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, CNIC_LOCAL_PORT_MIN, port_id)) { @@ -4145,7 +4145,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) { u32 seed; - seed = random32(); + seed = prandom_u32(); cnic_ctx_wr(dev, 45, 0, seed); return 0; } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ce4a030d3d0c..07f7ef05c3f2 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad, sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); bnad->work_q = create_singlethread_workqueue(bnad->wq_name); - - if (!bnad->work_q) + if (!bnad->work_q) { + iounmap(bnad->bar0); return -ENOMEM; + } return 0; } diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 1194446f859a..768285ec10f4 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,7 +22,7 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" - depends on GENERIC_HARDIRQS + depends on GENERIC_HARDIRQS && HAS_DMA select NET_CORE select MACB ---help--- @@ -31,6 +31,7 @@ config ARM_AT91_ETHER config MACB tristate "Cadence MACB/GEM support" + depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index aba435c3d4ae..184a063bed5f 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,6 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c59ec3ddaa66..3cd397d60434 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5204,7 +5204,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) if (t4_wait_dev_ready(adap) < 0) return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 234ce6f07544..f544b297c9ab 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -327,6 +327,7 @@ enum vf_state { #define BE_FLAGS_LINK_STATUS_INIT 1 #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) +#define BE_FLAGS_NAPI_ENABLED (1 << 9) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 25d3290b8cac..fd7b547698ab 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -961,19 +961,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (lancer_chip(adapter)) { - req->hdr.version = 2; - req->page_size = 1; /* 1 for 4K */ - AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, - no_delay); - AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, - __ilog2_u32(cq->len/256)); - AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, - ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, - ctxt, eq->id); - } else { + + if (BEx_chip(adapter)) { AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_be, nodelay, @@ -983,6 +972,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); + } else { + req->hdr.version = 2; + req->page_size = 1; /* 1 for 4K */ + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, + no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len/256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, + ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, + ctxt, eq->id); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -1763,10 +1764,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_id = cpu_to_le32(adapter->if_handle); if (flags & IFF_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS); + BE_IF_FLAGS_VLAN_PROMISCUOUS | + BE_IF_FLAGS_MCAST_PROMISCUOUS); if (value == ON) req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS); + BE_IF_FLAGS_VLAN_PROMISCUOUS | + BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); @@ -2084,7 +2087,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, - msecs_to_jiffies(30000))) + msecs_to_jiffies(60000))) status = -1; else status = adapter->flash_status; @@ -2637,9 +2640,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, req = get_mac_list_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), - wrb, &get_mac_list_cmd); - + OPCODE_COMMON_GET_MAC_LIST, + get_mac_list_cmd.size, wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; req->perm_override = 1; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index a855668e0cc5..025bdb0d1764 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -381,7 +381,7 @@ struct amap_cq_context_be { u8 rsvd5[32]; /* dword 3*/ } __packed; -struct amap_cq_context_lancer { +struct amap_cq_context_v2 { u8 rsvd0[12]; /* dword 0*/ u8 coalescwm[2]; /* dword 0*/ u8 nodelay; /* dword 0*/ diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 5733cde88e2c..3d4461adb3b4 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -85,6 +85,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(tx_pauseframes)}, {DRVSTAT_INFO(tx_controlframes)}, {DRVSTAT_INFO(rx_priority_pause_frames)}, + {DRVSTAT_INFO(tx_priority_pauseframes)}, /* Received packets dropped when an internal fifo going into * main packet buffer tank (PMEM) overflows. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4babc8a4a543..a444110b060f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -410,6 +410,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter) drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; drvs->tx_pauseframes = port_stats->tx_pauseframes; drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; drvs->jabber_events = port_stats->jabber_events; drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; @@ -471,11 +472,26 @@ static void accumulate_16bit_val(u32 *acc, u16 val) ACCESS_ONCE(*acc) = newacc; } +void populate_erx_stats(struct be_adapter *adapter, + struct be_rx_obj *rxo, + u32 erx_stat) +{ + if (!BEx_chip(adapter)) + rx_stats(rxo)->rx_drops_no_frags = erx_stat; + else + /* below erx HW counter can actually wrap around after + * 65535. Driver accumulates a 32-bit value + */ + accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, + (u16)erx_stat); +} + void be_parse_stats(struct be_adapter *adapter) { struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); struct be_rx_obj *rxo; int i; + u32 erx_stat; if (lancer_chip(adapter)) { populate_lancer_stats(adapter); @@ -488,12 +504,8 @@ void be_parse_stats(struct be_adapter *adapter) /* as erx_v1 is longer than v0, ok to use v1 for v0 access */ for_all_rx_queues(adapter, rxo, i) { - /* below erx HW counter can actually wrap around after - * 65535. Driver accumulates a 32-bit value - */ - accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, - (u16)erx->rx_drops_no_fragments \ - [rxo->q.id]); + erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; + populate_erx_stats(adapter, rxo, erx_stat); } } } @@ -1815,7 +1827,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) mdelay(1); } else { be_rx_compl_discard(rxo, rxcp); - be_cq_notify(adapter, rx_cq->id, true, 1); + be_cq_notify(adapter, rx_cq->id, false, 1); if (rxcp->num_rcvd == 0) break; } @@ -2378,7 +2390,7 @@ static uint be_num_rss_want(struct be_adapter *adapter) return num; } -static void be_msix_enable(struct be_adapter *adapter) +static int be_msix_enable(struct be_adapter *adapter) { #define BE_MIN_MSIX_VECTORS 1 int i, status, num_vec, num_roce_vec = 0; @@ -2403,13 +2415,17 @@ static void be_msix_enable(struct be_adapter *adapter) goto done; } else if (status >= BE_MIN_MSIX_VECTORS) { num_vec = status; - if (pci_enable_msix(adapter->pdev, adapter->msix_entries, - num_vec) == 0) + status = pci_enable_msix(adapter->pdev, adapter->msix_entries, + num_vec); + if (!status) goto done; } dev_warn(dev, "MSIx enable failed\n"); - return; + /* INTx is not supported in VFs, so fail probe if enable_msix fails */ + if (!be_physfn(adapter)) + return status; + return 0; done: if (be_roce_supported(adapter)) { if (num_vec > num_roce_vec) { @@ -2423,7 +2439,7 @@ done: } else adapter->num_msix_vec = num_vec; dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec); - return; + return 0; } static inline int be_msix_vec_get(struct be_adapter *adapter, @@ -2517,11 +2533,6 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) q = &rxo->q; if (q->created) { be_cmd_rxq_destroy(adapter, q); - /* After the rxq is invalidated, wait for a grace time - * of 1ms for all dma to end and the flush compl to - * arrive - */ - mdelay(1); be_rx_cq_clean(rxo); } be_queue_free(adapter, q); @@ -2536,8 +2547,11 @@ static int be_close(struct net_device *netdev) be_roce_dev_close(adapter); - for_all_evt_queues(adapter, eqo, i) - napi_disable(&eqo->napi); + if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { + for_all_evt_queues(adapter, eqo, i) + napi_disable(&eqo->napi); + adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; + } be_async_mcc_disable(adapter); @@ -2545,6 +2559,7 @@ static int be_close(struct net_device *netdev) * all tx skbs are freed. */ be_tx_compl_clean(adapter); + netif_tx_disable(netdev); be_rx_qs_destroy(adapter); @@ -2631,7 +2646,9 @@ static int be_open(struct net_device *netdev) if (status) goto err; - be_irq_register(adapter); + status = be_irq_register(adapter); + if (status) + goto err; for_all_rx_queues(adapter, rxo, i) be_cq_notify(adapter, rxo->cq.id, true, 0); @@ -2645,11 +2662,13 @@ static int be_open(struct net_device *netdev) napi_enable(&eqo->napi); be_eq_notify(adapter, eqo->q.id, true, false, 0); } + adapter->flags |= BE_FLAGS_NAPI_ENABLED; status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); + netif_tx_start_all_queues(netdev); be_roce_dev_open(adapter); return 0; err: @@ -2761,6 +2780,8 @@ static void be_vf_clear(struct be_adapter *adapter) goto done; } + pci_disable_sriov(adapter->pdev); + for_all_vfs(adapter, vf_cfg, vf) { if (lancer_chip(adapter)) be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); @@ -2770,7 +2791,6 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } - pci_disable_sriov(adapter->pdev); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -2867,13 +2887,8 @@ static int be_vf_setup(struct be_adapter *adapter) dev_info(dev, "Device supports %d VFs and not %d\n", adapter->dev_num_vfs, num_vfs); adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); - - status = pci_enable_sriov(adapter->pdev, num_vfs); - if (status) { - dev_err(dev, "SRIOV enable failed\n"); - adapter->num_vfs = 0; + if (!adapter->num_vfs) return 0; - } } status = be_vf_setup_init(adapter); @@ -2922,6 +2937,15 @@ static int be_vf_setup(struct be_adapter *adapter) be_cmd_enable_vf(adapter, vf + 1); } + + if (!old_vfs) { + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (status) { + dev_err(dev, "SRIOV enable failed\n"); + adapter->num_vfs = 0; + goto err; + } + } return 0; err: dev_err(dev, "VF setup failed\n"); @@ -3100,7 +3124,9 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - be_msix_enable(adapter); + status = be_msix_enable(adapter); + if (status) + goto err; status = be_evt_queues_create(adapter); if (status) @@ -3174,7 +3200,7 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (be_physfn(adapter) && num_vfs) { + if (be_physfn(adapter)) { if (adapter->dev_num_vfs) be_vf_setup(adapter); else diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index d44f65bac1d4..9ce5b7185fda 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -198,6 +198,11 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +struct fec_enet_delayed_work { + struct delayed_work delay_work; + bool timeout; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -214,6 +219,7 @@ struct fec_enet_private { struct clk *clk_ipg; struct clk *clk_ahb; + struct clk *clk_enet_out; struct clk *clk_ptp; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ @@ -231,9 +237,6 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; - /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ - spinlock_t hw_lock; - struct platform_device *pdev; int opened; @@ -268,7 +271,7 @@ struct fec_enet_private { int hwts_rx_en; int hwts_tx_en; struct timer_list time_keep; - + struct fec_enet_delayed_work delay_work; }; void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b9748f14ea78..ca9825ca88c9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -87,6 +87,8 @@ #define FEC_QUIRK_HAS_GBIT (1 << 3) /* Controller has extend desc buffer */ #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) static struct platform_device_id fec_devtype[] = { { @@ -105,7 +107,7 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX, + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, }, { .name = "mvf-fec", .driver_data = FEC_QUIRK_ENET_MAC, @@ -445,6 +447,13 @@ fec_restart(struct net_device *ndev, int duplex) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ + if (netif_running(ndev)) { + netif_device_detach(ndev); + napi_disable(&fep->napi); + netif_stop_queue(ndev); + netif_tx_lock(ndev); + } + /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); @@ -605,6 +614,13 @@ fec_restart(struct net_device *ndev, int duplex) /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + if (netif_running(ndev)) { + netif_device_attach(ndev); + napi_enable(&fep->napi); + netif_wake_queue(ndev); + netif_tx_unlock(ndev); + } } static void @@ -644,8 +660,22 @@ fec_timeout(struct net_device *ndev) ndev->stats.tx_errors++; - fec_restart(ndev, fep->full_duplex); - netif_wake_queue(ndev); + fep->delay_work.timeout = true; + schedule_delayed_work(&(fep->delay_work.delay_work), 0); +} + +static void fec_enet_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, + struct fec_enet_private, + delay_work.delay_work.work); + + if (fep->delay_work.timeout) { + fep->delay_work.timeout = false; + fec_restart(fep->netdev, fep->full_duplex); + netif_wake_queue(fep->netdev); + } } static void @@ -1024,16 +1054,12 @@ static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = fep->phy_dev; - unsigned long flags; - int status_change = 0; - spin_lock_irqsave(&fep->hw_lock, flags); - /* Prevent a state halted on mii error */ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { phy_dev->state = PHY_RESUMING; - goto spin_unlock; + return; } if (phy_dev->link) { @@ -1061,9 +1087,6 @@ static void fec_enet_adjust_link(struct net_device *ndev) } } -spin_unlock: - spin_unlock_irqrestore(&fep->hw_lock, flags); - if (status_change) phy_print_status(phy_dev); } @@ -1723,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); struct bufdesc *cbd_base; /* Allocate memory for buffer descriptors. */ @@ -1732,7 +1757,6 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; memset(cbd_base, 0, PAGE_SIZE); - spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1755,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* enable hw accelerator */ - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } fec_restart(ndev, 0); @@ -1883,18 +1909,23 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } + /* enet_out is optional, depends on board */ + fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); + if (IS_ERR(fep->clk_enet_out)) + fep->clk_enet_out = NULL; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { - ret = PTR_ERR(fep->clk_ptp); + fep->clk_ptp = NULL; fep->bufdesc_ex = 0; } clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); - if (!IS_ERR(fep->clk_ptp)) - clk_prepare_enable(fep->clk_ptp); + clk_prepare_enable(fep->clk_enet_out); + clk_prepare_enable(fep->clk_ptp); reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(reg_phy)) { @@ -1947,6 +1978,7 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work); return 0; failed_register: @@ -1962,8 +1994,8 @@ failed_irq: failed_regulator: clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); - if (!IS_ERR(fep->clk_ptp)) - clk_disable_unprepare(fep->clk_ptp); + clk_disable_unprepare(fep->clk_enet_out); + clk_disable_unprepare(fep->clk_ptp); failed_pin: failed_clk: failed_ioremap: @@ -1979,12 +2011,14 @@ fec_drv_remove(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); int i; + cancel_delayed_work_sync(&(fep->delay_work.delay_work)); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); clk_disable_unprepare(fep->clk_ptp); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); for (i = 0; i < FEC_IRQ_NUM; i++) { @@ -2010,6 +2044,7 @@ fec_suspend(struct device *dev) fec_stop(ndev); netif_device_detach(ndev); } + clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -2022,6 +2057,7 @@ fec_resume(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + clk_prepare_enable(fep->clk_enet_out); clk_prepare_enable(fep->clk_ahb); clk_prepare_enable(fep->clk_ipg); if (netif_running(ndev)) { diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4989481c19f0..d300a0c0eafc 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable internal clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - 0, SDR0_ETH_CFG_ECS << dev->cell_index); + /* + * PPC460EX/GT Embedded Processor Advanced User's Manual + * section 28.10.1 Mode Register 0 (EMACx_MR0) states: + * Note: The PHY must provide a TX Clk in order to perform a soft reset + * of the EMAC. If none is present, select the internal clock + * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). + * After a soft reset, select the external clock. + */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: select internal loop clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + 0, SDR0_ETH_CFG_ECS << dev->cell_index); + } else { + /* PHY present: select external clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif out_be32(&p->mr0, EMAC_MR0_SRST); @@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev) --n; #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable external clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - SDR0_ETH_CFG_ECS << dev->cell_index, 0); + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: restore external clock source after reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif if (n) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 302d59401065..70fd55968844 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1322,7 +1322,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { - int rc, i; + int rc, i, mac_len; struct net_device *netdev; struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; @@ -1332,11 +1332,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) dev->unit_address); mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, - NULL); + &mac_len); if (!mac_addr_p) { dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); return -EINVAL; } + /* Workaround for old/broken pHyp */ + if (mac_len == 8) + mac_addr_p += 2; + else if (mac_len != 6) { + dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", + mac_len); + return -EINVAL; + } mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, NULL); @@ -1361,17 +1369,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); - /* - * Some older boxes running PHYP non-natively have an OF that returns - * a 8-byte local-mac-address field (and the first 2 bytes have to be - * ignored) while newer boxes' OF return a 6-byte field. Note that - * IEEE 1275 specifies that local-mac-address must be a 6-byte field. - * The RPA doc specifies that the first byte must be 10b, so we'll - * just look for it to solve this 8 vs. 6 byte field issue - */ - if ((*mac_addr_p & 0x3) != 0x02) - mac_addr_p += 2; - adapter->mac_addr = 0; memcpy(&adapter->mac_addr, mac_addr_p, 6); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 82f1c84282db..ffbc08f56c40 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -600,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw) s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) - usleep_range(50, 100); + udelay(50); return i; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 256ae789c143..d175bbd3ffd3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2496,10 +2496,12 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb->rxhash = re->skb->rxhash; + skb->vlan_proto = re->skb->vlan_proto; skb->vlan_tci = re->skb->vlan_tci; pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); + re->skb->vlan_proto = 0; re->skb->vlan_tci = 0; re->skb->rxhash = 0; re->skb->ip_summed = CHECKSUM_NONE; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bcf4d118e98c..c9e6b62dd000 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, }; rule.port = priv->port; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index a69a908614e6..b35f94700093 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work) .queue_mode = MLX4_NET_TRANS_Q_LIFO, .exclusive = 1, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .port = priv->port, .priority = MLX4_DOMAIN_RFS, }; @@ -448,7 +448,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@ -795,7 +795,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed enabling promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; @@ -858,7 +858,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; @@ -919,7 +919,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@ -942,7 +942,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@ -1621,10 +1621,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) MLX4_EN_FLAG_MC_PROMISC); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { priv->flags &= ~MLX4_EN_FLAG_PROMISC; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 91f2b2c43c12..d3f508697a3d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; if (user_prio >= 0) { context->pri_path.sched_queue |= user_prio << 3; - context->pri_path.feup = 1 << 6; + context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; } context->pri_path.counter_index = 0xff; context->cqn_send = cpu_to_be32(cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e3123a1df88..6000342f9725 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", + __func__); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index b147bdd40768..58a8e535d698 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", [4] = "Automatic MAC reassignment support", - [5] = "Time stamping support" + [5] = "Time stamping support", + [6] = "VST (control vlan insertion/stripping) support", + [7] = "FSM (MAC anti-spoofing) support" }; int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index ffc78d2cb0cf..f3e804f2a35f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port, return err; } +static const u8 __promisc_mode[] = { + [MLX4_FS_REGULAR] = 0x0, + [MLX4_FS_ALL_DEFAULT] = 0x1, + [MLX4_FS_MC_DEFAULT] = 0x3, + [MLX4_FS_UC_SNIFFER] = 0x4, + [MLX4_FS_MC_SNIFFER] = 0x5, +}; + +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type) +{ + if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { + mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); + return -EINVAL; + } + return __promisc_mode[flow_type]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); + static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { - static const u8 __promisc_mode[] = { - [MLX4_FS_PROMISC_NONE] = 0x0, - [MLX4_FS_PROMISC_UPLINK] = 0x1, - [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, - [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, - }; - - u32 dw = 0; - - dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; - dw |= ctrl->exclusive ? (1 << 2) : 0; - dw |= ctrl->allow_loopback ? (1 << 3) : 0; - dw |= __promisc_mode[ctrl->promisc_mode] << 8; - dw |= ctrl->priority << 16; - - hw->ctrl = cpu_to_be32(dw); + u8 flags = 0; + + flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + flags |= ctrl->exclusive ? (1 << 2) : 0; + flags |= ctrl->allow_loopback ? (1 << 3) : 0; + + hw->flags = flags; + hw->type = __promisc_mode[ctrl->promisc_mode]; + hw->prio = cpu_to_be16(ctrl->priority); hw->port = ctrl->port; hw->qpn = cpu_to_be32(ctrl->qpn); } @@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = { [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + return __sw_id_hw[id]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); + +static const int __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) +}; + +int mlx4_hw_rule_sz(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + + return __rule_hw_sz[id]; +} +EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); + static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { - static const size_t __rule_hw_sz[] = { - [MLX4_NET_TRANS_RULE_ID_ETH] = - sizeof(struct mlx4_net_trans_rule_hw_eth), - [MLX4_NET_TRANS_RULE_ID_IB] = - sizeof(struct mlx4_net_trans_rule_hw_ib), - [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, - [MLX4_NET_TRANS_RULE_ID_IPV4] = - sizeof(struct mlx4_net_trans_rule_hw_ipv4), - [MLX4_NET_TRANS_RULE_ID_TCP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), - [MLX4_NET_TRANS_RULE_ID_UDP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) - }; - if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { - mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); + if (mlx4_hw_rule_sz(dev, spec->id) < 0) return -EINVAL; - } - memset(rule_hw, 0, __rule_hw_sz[spec->id]); + memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); - rule_hw->size = __rule_hw_sz[spec->id] >> 2; + rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; switch (spec->id) { case MLX4_NET_TRANS_RULE_ID_ETH: @@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, rule_hw->eth.ether_type_enable = 1; rule_hw->eth.ether_type = spec->eth.ether_type; } - rule_hw->eth.vlan_id = spec->eth.vlan_id; - rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; + rule_hw->eth.vlan_tag = spec->eth.vlan_id; + rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; break; case MLX4_NET_TRANS_RULE_ID_IB: - rule_hw->ib.qpn = spec->ib.r_qpn; + rule_hw->ib.l3_qpn = spec->ib.l3_qpn; rule_hw->ib.qpn_mask = spec->ib.qpn_msk; memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); @@ -1136,7 +1170,7 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_net_trans_rule rule = { .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@ -1229,11 +1263,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: @@ -1260,11 +1293,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index eac3dae10efe..df15bb6631cc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -730,85 +730,6 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; -struct mlx4_net_trans_rule_hw_ctrl { - __be32 ctrl; - u8 rsvd1; - u8 funcid; - u8 vep; - u8 port; - __be32 qpn; - __be32 rsvd2; -}; - -struct mlx4_net_trans_rule_hw_ib { - u8 size; - u8 rsvd1; - __be16 id; - u32 rsvd2; - __be32 qpn; - __be32 qpn_mask; - u8 dst_gid[16]; - u8 dst_gid_msk[16]; -} __packed; - -struct mlx4_net_trans_rule_hw_eth { - u8 size; - u8 rsvd; - __be16 id; - u8 rsvd1[6]; - u8 dst_mac[6]; - u16 rsvd2; - u8 dst_mac_msk[6]; - u16 rsvd3; - u8 src_mac[6]; - u16 rsvd4; - u8 src_mac_msk[6]; - u8 rsvd5; - u8 ether_type_enable; - __be16 ether_type; - __be16 vlan_id_msk; - __be16 vlan_id; -} __packed; - -struct mlx4_net_trans_rule_hw_tcp_udp { - u8 size; - u8 rsvd; - __be16 id; - __be16 rsvd1[3]; - __be16 dst_port; - __be16 rsvd2; - __be16 dst_port_msk; - __be16 rsvd3; - __be16 src_port; - __be16 rsvd4; - __be16 src_port_msk; -} __packed; - -struct mlx4_net_trans_rule_hw_ipv4 { - u8 size; - u8 rsvd; - __be16 id; - __be32 rsvd1; - __be32 dst_ip; - __be32 dst_ip_msk; - __be32 src_ip; - __be32 src_ip_msk; -} __packed; - -struct _rule_hw { - union { - struct { - u8 size; - u8 rsvd; - __be16 id; - }; - struct mlx4_net_trans_rule_hw_eth eth; - struct mlx4_net_trans_rule_hw_ib ib; - struct mlx4_net_trans_rule_hw_ipv4 ipv4; - struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; - }; -}; - enum { MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index e12e0d2e0ee0..1157f028a90f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_QP_ST_RC == qp_type) return -EINVAL; + /* force strip vlan by clear vsd */ + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + if (0 != vp_oper->state.default_vlan) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + } else { /* priority tagged */ + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } + + qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; qpc->pri_path.vlan_index = vp_oper->vlan_idx; - qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ - qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ + qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; qpc->pri_path.sched_queue &= 0xC7; qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; - mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, - vp_oper->vlan_idx, (int)(qpc->pri_path.feup), - (int)(qpc->pri_path.fl)); } if (vp_oper->state.spoofchk) { - qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; + qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; - mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, - vp_oper->mac_idx); } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index e329fe1f11b7..79fd269e2c54 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev) return; mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); } + +struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + unsigned long flags; + + spin_lock_irqsave(&srq_table->lock, flags); + srq = radix_tree_lookup(&srq_table->tree, + srqn & (dev->caps.num_srqs - 1)); + spin_unlock_irqrestore(&srq_table->lock, flags); + + return srq; +} +EXPORT_SYMBOL_GPL(mlx4_srq_lookup); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 90c253b145ef..019c5f78732e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -429,6 +429,7 @@ struct qlcnic_hardware_context { u16 port_type; u16 board_type; + u16 supported_type; u16 link_speed; u16 link_duplex; @@ -1514,6 +1515,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); +int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ea790a93ee7c..b4ff1e35a11d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) return 1; } -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) { u32 data; - unsigned long wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; /* wait for mailbox completion */ do { data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++wait_time > QLCNIC_MBX_TIMEOUT) { + if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { data = QLCNIC_RCODE_TIMEOUT; break; } @@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, u16 opcode; u8 mbx_err_code; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; opcode = LSW(cmd->req.arg[0]); if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { @@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, /* Signal FW about the impending command */ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1276,11 +1273,13 @@ out: return err; } -static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, + int num_sds_ring) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; + u16 adapter_state = adapter->is_up; u8 ring; int ret; @@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); + if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { + adapter->max_sds_rings = num_sds_ring; + qlcnic_attach(adapter); + } netif_device_attach(netdev); return ret; } @@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, + max_sds_rings); if (ret) goto fail_diag_alloc; @@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; + if (QLC_83XX_SFP_PRESENT(config)) { + switch (ahw->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ahw->supported_type = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ahw->supported_type = PORT_TP; + break; + default: + ahw->supported_type = PORT_OTHER; + } + } if (config & 1) err = 1; } @@ -2838,7 +2859,8 @@ out: return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) { u32 config = 0; int status = 0; @@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; + + if (netif_running(adapter->netdev) && ahw->has_link_events) { + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->duplex = ahw->link_duplex; + ecmd->autoneg = ahw->link_autoneg; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->autoneg = AUTONEG_DISABLE; + } + + if (ahw->port_type == QLCNIC_XGBE) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + } + + switch (ahw->supported_type) { + case PORT_FIBRE: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_TP: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + ecmd->phy_address = ahw->physical_port; return status; } @@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, + max_sds_rings); if (ret) goto fail_diag_irq; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 1f1d85e6f2af..f5db67fc9f55 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); @@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index ab1d8d99cbd5..c67d1eb35e8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -435,10 +435,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) } done: netif_device_attach(netdev); - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, @@ -642,15 +638,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); - clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); - adapter->ahw->idc.quiesce_req = 0; - adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; - adapter->ahw->idc.err_code = 0; - adapter->ahw->idc.collect_dump = 0; + + ahw->idc.quiesce_req = 0; + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->idc.err_code = 0; + ahw->idc.collect_dump = 0; + ahw->reset_context = 0; + adapter->tx_timeo_cnt = 0; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); } /** @@ -851,6 +853,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } @@ -914,6 +917,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); adapter->ahw->idc.err_code = -EIO; return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 08efb4635007..f67652de5a63 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_lro_pkt_cnt", "ctx_ip_csum_error", "ctx_rx_pkts_wo_ctx", - "ctx_rx_pkts_dropped_wo_sts", + "ctx_rx_pkts_drop_wo_sds_on_card", + "ctx_rx_pkts_drop_wo_sds_on_host", "ctx_rx_osized_pkts", "ctx_rx_pkts_dropped_wo_rds", "ctx_rx_unexpected_mcast_pkts", "ctx_invalid_mac_address", - "ctx_rx_rds_ring_prim_attemoted", + "ctx_rx_rds_ring_prim_attempted", "ctx_rx_rds_ring_prim_success", "ctx_num_lro_flows_added", "ctx_num_lro_flows_removed", @@ -251,6 +252,18 @@ static int qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return qlcnic_82xx_get_settings(adapter, ecmd); + else if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_get_settings(adapter, ecmd); + + return -EIO; +} + +int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0; @@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; - if (qlcnic_83xx_check(adapter)) - qlcnic_83xx_get_settings(adapter); - else - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); if (val == QLCNIC_PORT_MODE_802_3_AP) { ecmd->supported = SUPPORTED_1000baseT_Full; @@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->advertising = ADVERTISED_10000baseT_Full; } - if (netif_running(dev) && adapter->ahw->has_link_events) { - if (qlcnic_82xx_check(adapter)) { - reg = QLCRD32(adapter, - P3P_LINK_SPEED_REG(pcifn)); - speed = P3P_LINK_SPEED_VAL(pcifn, reg); - ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; - } - ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); - ecmd->autoneg = adapter->ahw->link_autoneg; - ecmd->duplex = adapter->ahw->link_duplex; + if (netif_running(adapter->netdev) && ahw->has_link_events) { + reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->autoneg = ahw->link_autoneg; + ecmd->duplex = ahw->link_duplex; goto skip; } @@ -340,8 +347,8 @@ skip: case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: ecmd->advertising |= ADVERTISED_TP; ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -355,8 +362,8 @@ skip: ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; } else { ecmd->autoneg = AUTONEG_ENABLE; ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); @@ -365,13 +372,6 @@ skip: ecmd->port = PORT_TP; } break; - case QLCNIC_BRDTYPE_83XX_10G: - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && ahw->has_link_events; - break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 95b1b5732838..b6818f4356b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata { #define QLCNIC_SET_OWNER 1 #define QLCNIC_CLR_OWNER 0 -#define QLCNIC_MBX_TIMEOUT 10000 +#define QLCNIC_MBX_TIMEOUT 5000 #define QLCNIC_MBX_RSP_OK 1 #define QLCNIC_MBX_PORT_RSP_OK 0x1a diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 264d5a4f8153..8fb836d4129f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); int qlcnic_use_msi = 1; -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); int qlcnic_use_msi_x = 1; -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); int qlcnic_auto_fw_reset = 1; -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); int qlcnic_config_npars; module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); @@ -308,6 +308,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) return 0; } +static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_list_s *cur; + struct list_head *head; + + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) { + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + return; + } + } +} + static int qlcnic_set_mac(struct net_device *netdev, void *p) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@ -322,11 +339,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; + if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN)) + return 0; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } + qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); qlcnic_set_multi(adapter->netdev); @@ -2481,12 +2502,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev) if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - adapter->need_fw_reset = 1; - else + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { + netdev_info(netdev, "Tx timeout, reset the adapter.\n"); + if (qlcnic_82xx_check(adapter)) + adapter->need_fw_reset = 1; + else if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + } else { + netdev_info(netdev, "Tx timeout, reset adapter context.\n"); adapter->ahw->reset_context = 1; + } } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 44d547d78b84..3869c3864deb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; u16 opcode; u8 mbx_err_code; int i, j; @@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, * assume something is wrong. */ poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index c81be2da119b..1a66ccded235 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; - if (!(cmd->req.arg[1] & BIT_8)) - return -EINVAL; - return 0; } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 87463bc701a6..50235d201592 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); + rx_ring->pg_chunk.page = NULL; netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; @@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring curr_idx = 0; } + if (rx_ring->pg_chunk.page) { + pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + put_page(rx_ring->pg_chunk.page); + rx_ring->pg_chunk.page = NULL; + } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 4486102fa9b3..71998e7995d9 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -1528,7 +1528,7 @@ static int falcon_probe_nic(struct efx_nic *efx) return 0; fail6: - BUG_ON(i2c_del_adapter(&board->i2c_adap)); + i2c_del_adapter(&board->i2c_adap); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: efx_nic_free_buffer(efx, &efx->irq_status); @@ -1666,13 +1666,11 @@ static void falcon_remove_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_board *board = falcon_board(efx); - int rc; board->type->fini(efx); /* Remove I2C adapter and clear it in preparation for a retry */ - rc = i2c_del_adapter(&board->i2c_adap); - BUG_ON(rc); + i2c_del_adapter(&board->i2c_adap); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); efx_nic_free_buffer(efx, &efx->irq_status); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 07f6baa15c0c..9a95abf2dedf 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -912,8 +912,10 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, &efx->pci_dev->dev); - if (!ptp->phc_clock) + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); goto fail3; + } INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index f695a50bac47..43c1f3223322 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,6 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select NET_CORE select MII select PHYLIB diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 59c43918883e..21a5b291b4b3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -555,8 +555,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget) cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { - cpsw_enable_irq(priv); prim_cpsw->irq_enabled = true; + cpsw_enable_irq(priv); } } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 66e025ad5df1..f3c2d034b32c 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) if (info->has_iqueue) { gxio_mpipe_request_notif_ring_interrupt( &context, cpu_x(cpu), cpu_y(cpu), - 1, ingress_irq, info->iqueue.ring); + KERNEL_PL, ingress_irq, info->iqueue.ring); } } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index c655fe60121e..5734480c1ecf 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -1990,7 +1990,8 @@ spider_net_open(struct net_device *netdev) goto alloc_rx_failed; /* Allocate rx skbs */ - if (spider_net_alloc_rx_skbs(card)) + result = spider_net_alloc_rx_skbs(card); + if (result) goto alloc_skbs_failed; spider_net_set_multi(netdev); diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 49b8b58fc5c6..484f77ec2ce1 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -449,7 +449,7 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat) if ((--bc->hdlctx.slotcnt) > 0) return 0; bc->hdlctx.slotcnt = bc->ch_params.slottime; - if ((random32() % 256) > bc->ch_params.ppersist) + if ((prandom_u32() % 256) > bc->ch_params.ppersist) return 0; } } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index a4a3516b6bbf..3169252613fa 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -389,7 +389,7 @@ void hdlcdrv_arbitrate(struct net_device *dev, struct hdlcdrv_state *s) if ((--s->hdlctx.slotcnt) > 0) return; s->hdlctx.slotcnt = s->ch_params.slottime; - if ((random32() % 256) > s->ch_params.ppersist) + if ((prandom_u32() % 256) > s->ch_params.ppersist) return; start_tx(dev, s); } diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index b2d863f2ea42..0721e72f9299 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -638,7 +638,7 @@ static void yam_arbitrate(struct net_device *dev) yp->slotcnt = yp->slot / 10; /* is random > persist ? */ - if ((random32() % 256) > yp->pers) + if ((prandom_u32() % 256) > yp->pers) return; yam_start_tx(dev, yp); diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index a06fca61c9a0..22b4527321b1 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -609,7 +609,7 @@ static int bfin_sir_open(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; - int err = -ENOMEM; + int err; self->newspeed = 0; self->speed = 9600; @@ -623,8 +623,10 @@ static int bfin_sir_open(struct net_device *dev) bfin_sir_set_speed(port, 9600); self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME); - if (!self->irlap) + if (!self->irlap) { + err = -ENOMEM; goto err_irlap; + } INIT_WORK(&self->work, bfin_sir_send_work); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d5a141c7c4e7..1c502bb0c916 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); + vlan = list_first_or_null_rcu(&port->vlans, + struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto upper_dev_unlink; - list_add_tail(&vlan->list, &port->vlans); + list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del(&vlan->list); + list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 450345261bd3..1e11f2bfd9ce 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -126,7 +126,7 @@ config MDIO_BITBANG config MDIO_GPIO tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GENERIC_GPIO + depends on MDIO_BITBANG && GPIOLIB ---help--- Supports GPIO lib-based MDIO busses. diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c index 9eabfaa22f3e..5ca14d463ba7 100644 --- a/drivers/net/team/team_mode_random.c +++ b/drivers/net/team/team_mode_random.c @@ -18,7 +18,7 @@ static u32 random_N(unsigned int N) { - return reciprocal_divide(random32(), N); + return reciprocal_divide(prandom_u32(), N); } static bool rnd_transmit(struct team *team, struct sk_buff *skb) diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 7c769d8e25ad..287cc624b90b 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -93,6 +93,17 @@ config USB_RTL8150 To compile this driver as a module, choose M here: the module will be called rtl8150. +config USB_RTL8152 + tristate "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" + select NET_CORE + select MII + help + This option adds support for Realtek RTL8152 based USB 2.0 + 10/100 Ethernet adapters. + + To compile this driver as a module, choose M here: the + module will be called r8152. + config USB_USBNET tristate "Multi-purpose USB Networking Framework" select NET_CORE diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 119b06c9aa16..9ab5c9d4b45a 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_USB_CATC) += catc.o obj-$(CONFIG_USB_KAWETH) += kaweth.o obj-$(CONFIG_USB_PEGASUS) += pegasus.o obj-$(CONFIG_USB_RTL8150) += rtl8150.o +obj-$(CONFIG_USB_RTL8152) += r8152.o obj-$(CONFIG_USB_HSO) += hso.o obj-$(CONFIG_USB_NET_AX8817X) += asix.o asix-y := asix_devices.o asix_common.o ax88172a.o diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index f7f623a5390e..577c72d5f369 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", rx->size); kfree_skb(rx->ax_skb); + rx->ax_skb = NULL; + rx->size = 0U; + return 0; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 4ff71d619cd8..078795fe6e31 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -479,6 +479,7 @@ static const struct driver_info wwan_info = { #define NOVATEL_VENDOR_ID 0x1410 #define ZTE_VENDOR_ID 0x19D2 #define DELL_VENDOR_ID 0x413C +#define REALTEK_VENDOR_ID 0x0bda static const struct usb_device_id products [] = { /* @@ -612,6 +613,13 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */ +{ + USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* AnyDATA ADU960S - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, @@ -619,6 +627,15 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */ +#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE) +{ + USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, +#endif + /* * WHITELIST!!! * diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 09699054b54f..03e8a15d7deb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -256,8 +256,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc) static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) { pegasus_t *pegasus = netdev_priv(dev); + u16 data = val; - write_mii_word(pegasus, phy_id, loc, (__u16 *)&val); + write_mii_word(pegasus, phy_id, loc, &data); } static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5a88e72090ce..cf887c2384e9 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -501,6 +501,13 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Dell Wireless 5804 (Novatel E371) */ + USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* ADU960S */ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, @@ -548,6 +555,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ {QMI_FIXED_INTF(0x19d2, 0x1012, 4)}, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c new file mode 100644 index 000000000000..14e519888631 --- /dev/null +++ b/drivers/net/usb/r8152.c @@ -0,0 +1,1767 @@ +/* + * Copyright (c) 2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + */ + +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/version.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/if_vlan.h> +#include <linux/uaccess.h> + +/* Version Information */ +#define DRIVER_VERSION "v1.0.0 (2013/05/03)" +#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" +#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters" +#define MODULENAME "r8152" + +#define R8152_PHY_ID 32 + +#define PLA_IDR 0xc000 +#define PLA_RCR 0xc010 +#define PLA_RMS 0xc016 +#define PLA_RXFIFO_CTRL0 0xc0a0 +#define PLA_RXFIFO_CTRL1 0xc0a4 +#define PLA_RXFIFO_CTRL2 0xc0a8 +#define PLA_FMC 0xc0b4 +#define PLA_CFG_WOL 0xc0b6 +#define PLA_MAR 0xcd00 +#define PAL_BDC_CR 0xd1a0 +#define PLA_LEDSEL 0xdd90 +#define PLA_LED_FEATURE 0xdd92 +#define PLA_PHYAR 0xde00 +#define PLA_GPHY_INTR_IMR 0xe022 +#define PLA_EEE_CR 0xe040 +#define PLA_EEEP_CR 0xe080 +#define PLA_MAC_PWR_CTRL 0xe0c0 +#define PLA_TCR0 0xe610 +#define PLA_TCR1 0xe612 +#define PLA_TXFIFO_CTRL 0xe618 +#define PLA_RSTTELLY 0xe800 +#define PLA_CR 0xe813 +#define PLA_CRWECR 0xe81c +#define PLA_CONFIG5 0xe822 +#define PLA_PHY_PWR 0xe84c +#define PLA_OOB_CTRL 0xe84f +#define PLA_CPCR 0xe854 +#define PLA_MISC_0 0xe858 +#define PLA_MISC_1 0xe85a +#define PLA_OCP_GPHY_BASE 0xe86c +#define PLA_TELLYCNT 0xe890 +#define PLA_SFF_STS_7 0xe8de +#define PLA_PHYSTATUS 0xe908 +#define PLA_BP_BA 0xfc26 +#define PLA_BP_0 0xfc28 +#define PLA_BP_1 0xfc2a +#define PLA_BP_2 0xfc2c +#define PLA_BP_3 0xfc2e +#define PLA_BP_4 0xfc30 +#define PLA_BP_5 0xfc32 +#define PLA_BP_6 0xfc34 +#define PLA_BP_7 0xfc36 + +#define USB_DEV_STAT 0xb808 +#define USB_USB_CTRL 0xd406 +#define USB_PHY_CTRL 0xd408 +#define USB_TX_AGG 0xd40a +#define USB_RX_BUF_TH 0xd40c +#define USB_USB_TIMER 0xd428 +#define USB_PM_CTRL_STATUS 0xd432 +#define USB_TX_DMA 0xd434 +#define USB_UPS_CTRL 0xd800 +#define USB_BP_BA 0xfc26 +#define USB_BP_0 0xfc28 +#define USB_BP_1 0xfc2a +#define USB_BP_2 0xfc2c +#define USB_BP_3 0xfc2e +#define USB_BP_4 0xfc30 +#define USB_BP_5 0xfc32 +#define USB_BP_6 0xfc34 +#define USB_BP_7 0xfc36 + +/* OCP Registers */ +#define OCP_ALDPS_CONFIG 0x2010 +#define OCP_EEE_CONFIG1 0x2080 +#define OCP_EEE_CONFIG2 0x2092 +#define OCP_EEE_CONFIG3 0x2094 +#define OCP_EEE_AR 0xa41a +#define OCP_EEE_DATA 0xa41c + +/* PLA_RCR */ +#define RCR_AAP 0x00000001 +#define RCR_APM 0x00000002 +#define RCR_AM 0x00000004 +#define RCR_AB 0x00000008 +#define RCR_ACPT_ALL (RCR_AAP | RCR_APM | RCR_AM | RCR_AB) + +/* PLA_RXFIFO_CTRL0 */ +#define RXFIFO_THR1_NORMAL 0x00080002 +#define RXFIFO_THR1_OOB 0x01800003 + +/* PLA_RXFIFO_CTRL1 */ +#define RXFIFO_THR2_FULL 0x00000060 +#define RXFIFO_THR2_HIGH 0x00000038 +#define RXFIFO_THR2_OOB 0x0000004a + +/* PLA_RXFIFO_CTRL2 */ +#define RXFIFO_THR3_FULL 0x00000078 +#define RXFIFO_THR3_HIGH 0x00000048 +#define RXFIFO_THR3_OOB 0x0000005a + +/* PLA_TXFIFO_CTRL */ +#define TXFIFO_THR_NORMAL 0x00400008 + +/* PLA_FMC */ +#define FMC_FCR_MCU_EN 0x0001 + +/* PLA_EEEP_CR */ +#define EEEP_CR_EEEP_TX 0x0002 + +/* PLA_TCR0 */ +#define TCR0_TX_EMPTY 0x0800 +#define TCR0_AUTO_FIFO 0x0080 + +/* PLA_TCR1 */ +#define VERSION_MASK 0x7cf0 + +/* PLA_CR */ +#define CR_RST 0x10 +#define CR_RE 0x08 +#define CR_TE 0x04 + +/* PLA_CRWECR */ +#define CRWECR_NORAML 0x00 +#define CRWECR_CONFIG 0xc0 + +/* PLA_OOB_CTRL */ +#define NOW_IS_OOB 0x80 +#define TXFIFO_EMPTY 0x20 +#define RXFIFO_EMPTY 0x10 +#define LINK_LIST_READY 0x02 +#define DIS_MCU_CLROOB 0x01 +#define FIFO_EMPTY (TXFIFO_EMPTY | RXFIFO_EMPTY) + +/* PLA_MISC_1 */ +#define RXDY_GATED_EN 0x0008 + +/* PLA_SFF_STS_7 */ +#define RE_INIT_LL 0x8000 +#define MCU_BORW_EN 0x4000 + +/* PLA_CPCR */ +#define CPCR_RX_VLAN 0x0040 + +/* PLA_CFG_WOL */ +#define MAGIC_EN 0x0001 + +/* PAL_BDC_CR */ +#define ALDPS_PROXY_MODE 0x0001 + +/* PLA_CONFIG5 */ +#define LAN_WAKE_EN 0x0002 + +/* PLA_LED_FEATURE */ +#define LED_MODE_MASK 0x0700 + +/* PLA_PHY_PWR */ +#define TX_10M_IDLE_EN 0x0080 +#define PFM_PWM_SWITCH 0x0040 + +/* PLA_MAC_PWR_CTRL */ +#define D3_CLK_GATED_EN 0x00004000 +#define MCU_CLK_RATIO 0x07010f07 +#define MCU_CLK_RATIO_MASK 0x0f0f0f0f + +/* PLA_GPHY_INTR_IMR */ +#define GPHY_STS_MSK 0x0001 +#define SPEED_DOWN_MSK 0x0002 +#define SPDWN_RXDV_MSK 0x0004 +#define SPDWN_LINKCHG_MSK 0x0008 + +/* PLA_PHYAR */ +#define PHYAR_FLAG 0x80000000 + +/* PLA_EEE_CR */ +#define EEE_RX_EN 0x0001 +#define EEE_TX_EN 0x0002 + +/* USB_DEV_STAT */ +#define STAT_SPEED_MASK 0x0006 +#define STAT_SPEED_HIGH 0x0000 +#define STAT_SPEED_FULL 0x0001 + +/* USB_TX_AGG */ +#define TX_AGG_MAX_THRESHOLD 0x03 + +/* USB_RX_BUF_TH */ +#define RX_BUF_THR 0x7a120180 + +/* USB_TX_DMA */ +#define TEST_MODE_DISABLE 0x00000001 +#define TX_SIZE_ADJUST1 0x00000100 + +/* USB_UPS_CTRL */ +#define POWER_CUT 0x0100 + +/* USB_PM_CTRL_STATUS */ +#define RWSUME_INDICATE 0x0001 + +/* USB_USB_CTRL */ +#define RX_AGG_DISABLE 0x0010 + +/* OCP_ALDPS_CONFIG */ +#define ENPWRSAVE 0x8000 +#define ENPDNPS 0x0200 +#define LINKENA 0x0100 +#define DIS_SDSAVE 0x0010 + +/* OCP_EEE_CONFIG1 */ +#define RG_TXLPI_MSK_HFDUP 0x8000 +#define RG_MATCLR_EN 0x4000 +#define EEE_10_CAP 0x2000 +#define EEE_NWAY_EN 0x1000 +#define TX_QUIET_EN 0x0200 +#define RX_QUIET_EN 0x0100 +#define SDRISETIME 0x0010 /* bit 4 ~ 6 */ +#define RG_RXLPI_MSK_HFDUP 0x0008 +#define SDFALLTIME 0x0007 /* bit 0 ~ 2 */ + +/* OCP_EEE_CONFIG2 */ +#define RG_LPIHYS_NUM 0x7000 /* bit 12 ~ 15 */ +#define RG_DACQUIET_EN 0x0400 +#define RG_LDVQUIET_EN 0x0200 +#define RG_CKRSEL 0x0020 +#define RG_EEEPRG_EN 0x0010 + +/* OCP_EEE_CONFIG3 */ +#define FST_SNR_EYE_R 0x1500 /* bit 7 ~ 15 */ +#define RG_LFS_SEL 0x0060 /* bit 6 ~ 5 */ +#define MSK_PH 0x0006 /* bit 0 ~ 3 */ + +/* OCP_EEE_AR */ +/* bit[15:14] function */ +#define FUN_ADDR 0x0000 +#define FUN_DATA 0x4000 +/* bit[4:0] device addr */ +#define DEVICE_ADDR 0x0007 + +/* OCP_EEE_DATA */ +#define EEE_ADDR 0x003C +#define EEE_DATA 0x0002 + +enum rtl_register_content { + _100bps = 0x08, + _10bps = 0x04, + LINK_STATUS = 0x02, + FULL_DUP = 0x01, +}; + +#define RTL8152_REQT_READ 0xc0 +#define RTL8152_REQT_WRITE 0x40 +#define RTL8152_REQ_GET_REGS 0x05 +#define RTL8152_REQ_SET_REGS 0x05 + +#define BYTE_EN_DWORD 0xff +#define BYTE_EN_WORD 0x33 +#define BYTE_EN_BYTE 0x11 +#define BYTE_EN_SIX_BYTES 0x3f +#define BYTE_EN_START_MASK 0x0f +#define BYTE_EN_END_MASK 0xf0 + +#define RTL8152_RMS (VLAN_ETH_FRAME_LEN + VLAN_HLEN) +#define RTL8152_TX_TIMEOUT (HZ) + +/* rtl8152 flags */ +enum rtl8152_flags { + RTL8152_UNPLUG = 0, + RX_URB_FAIL, + RTL8152_SET_RX_MODE, + WORK_ENABLE +}; + +/* Define these values to match your device */ +#define VENDOR_ID_REALTEK 0x0bda +#define PRODUCT_ID_RTL8152 0x8152 + +#define MCU_TYPE_PLA 0x0100 +#define MCU_TYPE_USB 0x0000 + +struct rx_desc { + u32 opts1; +#define RX_LEN_MASK 0x7fff + u32 opts2; + u32 opts3; + u32 opts4; + u32 opts5; + u32 opts6; +}; + +struct tx_desc { + u32 opts1; +#define TX_FS (1 << 31) /* First segment of a packet */ +#define TX_LS (1 << 30) /* Final segment of a packet */ +#define TX_LEN_MASK 0xffff + u32 opts2; +}; + +struct r8152 { + unsigned long flags; + struct usb_device *udev; + struct tasklet_struct tl; + struct net_device *netdev; + struct urb *rx_urb, *tx_urb; + struct sk_buff *tx_skb, *rx_skb; + struct delayed_work schedule; + struct mii_if_info mii; + u32 msg_enable; + u16 ocp_base; + u8 version; + u8 speed; +}; + +enum rtl_version { + RTL_VER_UNKNOWN = 0, + RTL_VER_01, + RTL_VER_02 +}; + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + * The RTL chips use a 64 element hash table based on the Ethernet CRC. + */ +static const int multicast_filter_limit = 32; + +static +int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) +{ + return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, data, size, 500); +} + +static +int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) +{ + return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), + RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, + value, index, data, size, 500); +} + +static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, + void *data, u16 type) +{ + u16 limit = 64; + int ret = 0; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + /* both size and indix must be 4 bytes align */ + if ((size & 3) || !size || (index & 3) || !data) + return -EPERM; + + if ((u32)index + (u32)size > 0xffff) + return -EPERM; + + while (size) { + if (size > limit) { + ret = get_registers(tp, index, type, limit, data); + if (ret < 0) + break; + + index += limit; + data += limit; + size -= limit; + } else { + ret = get_registers(tp, index, type, size, data); + if (ret < 0) + break; + + index += size; + data += size; + size = 0; + break; + } + } + + return ret; +} + +static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, + u16 size, void *data, u16 type) +{ + int ret; + u16 byteen_start, byteen_end, byen; + u16 limit = 512; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return -ENODEV; + + /* both size and indix must be 4 bytes align */ + if ((size & 3) || !size || (index & 3) || !data) + return -EPERM; + + if ((u32)index + (u32)size > 0xffff) + return -EPERM; + + byteen_start = byteen & BYTE_EN_START_MASK; + byteen_end = byteen & BYTE_EN_END_MASK; + + byen = byteen_start | (byteen_start << 4); + ret = set_registers(tp, index, type | byen, 4, data); + if (ret < 0) + goto error1; + + index += 4; + data += 4; + size -= 4; + + if (size) { + size -= 4; + + while (size) { + if (size > limit) { + ret = set_registers(tp, index, + type | BYTE_EN_DWORD, + limit, data); + if (ret < 0) + goto error1; + + index += limit; + data += limit; + size -= limit; + } else { + ret = set_registers(tp, index, + type | BYTE_EN_DWORD, + size, data); + if (ret < 0) + goto error1; + + index += size; + data += size; + size = 0; + break; + } + } + + byen = byteen_end | (byteen_end >> 4); + ret = set_registers(tp, index, type | byen, 4, data); + if (ret < 0) + goto error1; + } + +error1: + return ret; +} + +static inline +int pla_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) +{ + return generic_ocp_read(tp, index, size, data, MCU_TYPE_PLA); +} + +static inline +int pla_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) +{ + return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_PLA); +} + +static inline +int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) +{ + return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB); +} + +static inline +int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) +{ + return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB); +} + +static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index) +{ + u32 data; + + if (type == MCU_TYPE_PLA) + pla_ocp_read(tp, index, sizeof(data), &data); + else + usb_ocp_read(tp, index, sizeof(data), &data); + + return __le32_to_cpu(data); +} + +static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data) +{ + if (type == MCU_TYPE_PLA) + pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); + else + usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data); +} + +static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index) +{ + u32 data; + u8 shift = index & 2; + + index &= ~3; + + if (type == MCU_TYPE_PLA) + pla_ocp_read(tp, index, sizeof(data), &data); + else + usb_ocp_read(tp, index, sizeof(data), &data); + + data = __le32_to_cpu(data); + data >>= (shift * 8); + data &= 0xffff; + + return (u16)data; +} + +static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) +{ + u32 tmp, mask = 0xffff; + u16 byen = BYTE_EN_WORD; + u8 shift = index & 2; + + data &= mask; + + if (index & 2) { + byen <<= shift; + mask <<= (shift * 8); + data <<= (shift * 8); + index &= ~3; + } + + if (type == MCU_TYPE_PLA) + pla_ocp_read(tp, index, sizeof(tmp), &tmp); + else + usb_ocp_read(tp, index, sizeof(tmp), &tmp); + + tmp = __le32_to_cpu(tmp) & ~mask; + tmp |= data; + tmp = __cpu_to_le32(tmp); + + if (type == MCU_TYPE_PLA) + pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp); + else + usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp); +} + +static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index) +{ + u32 data; + u8 shift = index & 3; + + index &= ~3; + + if (type == MCU_TYPE_PLA) + pla_ocp_read(tp, index, sizeof(data), &data); + else + usb_ocp_read(tp, index, sizeof(data), &data); + + data = __le32_to_cpu(data); + data >>= (shift * 8); + data &= 0xff; + + return (u8)data; +} + +static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) +{ + u32 tmp, mask = 0xff; + u16 byen = BYTE_EN_BYTE; + u8 shift = index & 3; + + data &= mask; + + if (index & 3) { + byen <<= shift; + mask <<= (shift * 8); + data <<= (shift * 8); + index &= ~3; + } + + if (type == MCU_TYPE_PLA) + pla_ocp_read(tp, index, sizeof(tmp), &tmp); + else + usb_ocp_read(tp, index, sizeof(tmp), &tmp); + + tmp = __le32_to_cpu(tmp) & ~mask; + tmp |= data; + tmp = __cpu_to_le32(tmp); + + if (type == MCU_TYPE_PLA) + pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp); + else + usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp); +} + +static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value) +{ + u32 ocp_data; + int i; + + ocp_data = PHYAR_FLAG | ((reg_addr & 0x1f) << 16) | + (value & 0xffff); + + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data); + + for (i = 20; i > 0; i--) { + udelay(25); + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR); + if (!(ocp_data & PHYAR_FLAG)) + break; + } + udelay(20); +} + +static int r8152_mdio_read(struct r8152 *tp, u32 reg_addr) +{ + u32 ocp_data; + int i; + + ocp_data = (reg_addr & 0x1f) << 16; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_PHYAR, ocp_data); + + for (i = 20; i > 0; i--) { + udelay(25); + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_PHYAR); + if (ocp_data & PHYAR_FLAG) + break; + } + udelay(20); + + if (!(ocp_data & PHYAR_FLAG)) + return -EAGAIN; + + return (u16)(ocp_data & 0xffff); +} + +static int read_mii_word(struct net_device *netdev, int phy_id, int reg) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (phy_id != R8152_PHY_ID) + return -EINVAL; + + return r8152_mdio_read(tp, reg); +} + +static +void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (phy_id != R8152_PHY_ID) + return; + + r8152_mdio_write(tp, reg, val); +} + +static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data) +{ + u16 ocp_base, ocp_index; + + ocp_base = addr & 0xf000; + if (ocp_base != tp->ocp_base) { + ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, ocp_base); + tp->ocp_base = ocp_base; + } + + ocp_index = (addr & 0x0fff) | 0xb000; + ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data); +} + +static inline void set_ethernet_addr(struct r8152 *tp) +{ + struct net_device *dev = tp->netdev; + u8 *node_id; + + node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL); + if (!node_id) { + netif_err(tp, probe, dev, "out of memory"); + return; + } + + if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0) + netif_notice(tp, probe, dev, "inet addr fail\n"); + else { + memcpy(dev->dev_addr, node_id, dev->addr_len); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + } + kfree(node_id); +} + +static int rtl8152_set_mac_address(struct net_device *netdev, void *p) +{ + struct r8152 *tp = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); + pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + + return 0; +} + +static int alloc_all_urbs(struct r8152 *tp) +{ + tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!tp->rx_urb) + return 0; + tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!tp->tx_urb) { + usb_free_urb(tp->rx_urb); + return 0; + } + + return 1; +} + +static void free_all_urbs(struct r8152 *tp) +{ + usb_free_urb(tp->rx_urb); + usb_free_urb(tp->tx_urb); +} + +static struct net_device_stats *rtl8152_get_stats(struct net_device *dev) +{ + return &dev->stats; +} + +static void read_bulk_callback(struct urb *urb) +{ + struct r8152 *tp; + unsigned pkt_len; + struct sk_buff *skb; + struct net_device *netdev; + struct net_device_stats *stats; + int status = urb->status; + int result; + struct rx_desc *rx_desc; + + tp = urb->context; + if (!tp) + return; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + netdev = tp->netdev; + if (!netif_device_present(netdev)) + return; + + stats = rtl8152_get_stats(netdev); + switch (status) { + case 0: + break; + case -ESHUTDOWN: + set_bit(RTL8152_UNPLUG, &tp->flags); + netif_device_detach(tp->netdev); + case -ENOENT: + return; /* the urb is in unlink state */ + case -ETIME: + pr_warn_ratelimited("may be reset is needed?..\n"); + goto goon; + default: + pr_warn_ratelimited("Rx status %d\n", status); + goto goon; + } + + /* protect against short packets (tell me why we got some?!?) */ + if (urb->actual_length < sizeof(*rx_desc)) + goto goon; + + + rx_desc = (struct rx_desc *)urb->transfer_buffer; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; + if (urb->actual_length < sizeof(struct rx_desc) + pkt_len) + goto goon; + + skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + if (!skb) + goto goon; + + memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len); + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, netdev); + netif_rx(skb); + stats->rx_packets++; + stats->rx_bytes += pkt_len; +goon: + usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), + tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc), + (usb_complete_t)read_bulk_callback, tp); + result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC); + if (result == -ENODEV) { + netif_device_detach(tp->netdev); + } else if (result) { + set_bit(RX_URB_FAIL, &tp->flags); + goto resched; + } else { + clear_bit(RX_URB_FAIL, &tp->flags); + } + + return; +resched: + tasklet_schedule(&tp->tl); +} + +static void rx_fixup(unsigned long data) +{ + struct r8152 *tp; + int status; + + tp = (struct r8152 *)data; + if (!test_bit(WORK_ENABLE, &tp->flags)) + return; + + status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC); + if (status == -ENODEV) { + netif_device_detach(tp->netdev); + } else if (status) { + set_bit(RX_URB_FAIL, &tp->flags); + goto tlsched; + } else { + clear_bit(RX_URB_FAIL, &tp->flags); + } + + return; +tlsched: + tasklet_schedule(&tp->tl); +} + +static void write_bulk_callback(struct urb *urb) +{ + struct r8152 *tp; + int status = urb->status; + + tp = urb->context; + if (!tp) + return; + dev_kfree_skb_irq(tp->tx_skb); + if (!netif_device_present(tp->netdev)) + return; + if (status) + dev_info(&urb->dev->dev, "%s: Tx status %d\n", + tp->netdev->name, status); + tp->netdev->trans_start = jiffies; + netif_wake_queue(tp->netdev); +} + +static void rtl8152_tx_timeout(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + struct net_device_stats *stats = rtl8152_get_stats(netdev); + netif_warn(tp, tx_err, netdev, "Tx timeout.\n"); + usb_unlink_urb(tp->tx_urb); + stats->tx_errors++; +} + +static void rtl8152_set_rx_mode(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (tp->speed & LINK_STATUS) + set_bit(RTL8152_SET_RX_MODE, &tp->flags); +} + +static void _rtl8152_set_rx_mode(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + u32 tmp, *mc_filter; /* Multicast hash filter */ + u32 ocp_data; + + mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL); + if (!mc_filter) { + netif_err(tp, link, netdev, "out of memory"); + return; + } + + clear_bit(RTL8152_SET_RX_MODE, &tp->flags); + netif_stop_queue(netdev); + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_data |= RCR_AB | RCR_APM; + + if (netdev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); + ocp_data |= RCR_AM | RCR_AAP; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(netdev) > multicast_filter_limit) || + (netdev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + ocp_data |= RCR_AM; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, netdev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + ocp_data |= RCR_AM; + } + } + + tmp = mc_filter[0]; + mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1])); + mc_filter[1] = __cpu_to_le32(swab32(tmp)); + + pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + netif_wake_queue(netdev); + kfree(mc_filter); +} + +static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + struct net_device_stats *stats = rtl8152_get_stats(netdev); + struct tx_desc *tx_desc; + int len, res; + + netif_stop_queue(netdev); + len = skb->len; + if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) { + struct sk_buff *tx_skb; + + tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC); + dev_kfree_skb_any(skb); + if (!tx_skb) { + stats->tx_dropped++; + netif_wake_queue(netdev); + return NETDEV_TX_OK; + } + skb = tx_skb; + } + tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc)); + memset(tx_desc, 0, sizeof(*tx_desc)); + tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS); + tp->tx_skb = skb; + skb_tx_timestamp(skb); + usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2), + skb->data, skb->len, + (usb_complete_t)write_bulk_callback, tp); + res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC); + if (res) { + /* Can we get/handle EPIPE here? */ + if (res == -ENODEV) { + netif_device_detach(tp->netdev); + } else { + netif_warn(tp, tx_err, netdev, + "failed tx_urb %d\n", res); + stats->tx_errors++; + netif_start_queue(netdev); + } + } else { + stats->tx_packets++; + stats->tx_bytes += skb->len; + } + + return NETDEV_TX_OK; +} + +static void r8152b_reset_packet_filter(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_FMC); + ocp_data &= ~FMC_FCR_MCU_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); + ocp_data |= FMC_FCR_MCU_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_FMC, ocp_data); +} + +static void rtl8152_nic_reset(struct r8152 *tp) +{ + int i; + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, CR_RST); + + for (i = 0; i < 1000; i++) { + if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) + break; + udelay(100); + } +} + +static inline u8 rtl8152_get_speed(struct r8152 *tp) +{ + return ocp_read_byte(tp, MCU_TYPE_PLA, PLA_PHYSTATUS); +} + +static int rtl8152_enable(struct r8152 *tp) +{ + u32 ocp_data; + u8 speed; + + speed = rtl8152_get_speed(tp); + if (speed & _100bps) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); + ocp_data &= ~EEEP_CR_EEEP_TX; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); + } else { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR); + ocp_data |= EEEP_CR_EEEP_TX; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data); + } + + r8152b_reset_packet_filter(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR); + ocp_data |= CR_RE | CR_TE; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); + ocp_data &= ~RXDY_GATED_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); + + usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), + tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc), + (usb_complete_t)read_bulk_callback, tp); + + return usb_submit_urb(tp->rx_urb, GFP_KERNEL); +} + +static void rtl8152_disable(struct r8152 *tp) +{ + u32 ocp_data; + int i; + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + usb_kill_urb(tp->tx_urb); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); + ocp_data |= RXDY_GATED_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); + + for (i = 0; i < 1000; i++) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) + break; + mdelay(1); + } + + for (i = 0; i < 1000; i++) { + if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) + break; + mdelay(1); + } + + usb_kill_urb(tp->rx_urb); + + rtl8152_nic_reset(tp); +} + +static void r8152b_exit_oob(struct r8152 *tp) +{ + u32 ocp_data; + int i; + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data &= ~RCR_ACPT_ALL; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); + ocp_data |= RXDY_GATED_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data &= ~MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + for (i = 0; i < 1000; i++) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + if (ocp_data & LINK_LIST_READY) + break; + mdelay(1); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= RE_INIT_LL; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + for (i = 0; i < 1000; i++) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + if (ocp_data & LINK_LIST_READY) + break; + mdelay(1); + } + + rtl8152_nic_reset(tp); + + /* rx share fifo credit full threshold */ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT); + ocp_data &= STAT_SPEED_MASK; + if (ocp_data == STAT_SPEED_FULL) { + /* rx share fifo credit near full threshold */ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, + RXFIFO_THR2_FULL); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, + RXFIFO_THR3_FULL); + } else { + /* rx share fifo credit near full threshold */ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, + RXFIFO_THR2_HIGH); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, + RXFIFO_THR3_HIGH); + } + + /* TX share fifo free credit full threshold */ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL); + + ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD); + ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_BUF_THR); + ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, + TEST_MODE_DISABLE | TX_SIZE_ADJUST1); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); + ocp_data |= TCR0_AUTO_FIFO; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_TCR0, ocp_data); +} + +static void r8152b_enter_oob(struct r8152 *tp) +{ + u32 ocp_data; + int i; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data &= ~NOW_IS_OOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_OOB); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); + + rtl8152_disable(tp); + + for (i = 0; i < 1000; i++) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + if (ocp_data & LINK_LIST_READY) + break; + mdelay(1); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= RE_INIT_LL; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + + for (i = 0; i < 1000; i++) { + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + if (ocp_data & LINK_LIST_READY) + break; + mdelay(1); + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL); + ocp_data |= MAGIC_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + ocp_data |= CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); + ocp_data |= ALDPS_PROXY_MODE; + ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); + ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); + + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1); + ocp_data &= ~RXDY_GATED_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); + + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); + ocp_data |= RCR_APM | RCR_AM | RCR_AB; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); +} + +static void r8152b_disable_aldps(struct r8152 *tp) +{ + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE); + msleep(20); +} + +static inline void r8152b_enable_aldps(struct r8152 *tp) +{ + ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS | + LINKENA | DIS_SDSAVE); +} + +static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) +{ + u16 bmcr, anar; + int ret = 0; + + cancel_delayed_work_sync(&tp->schedule); + anar = r8152_mdio_read(tp, MII_ADVERTISE); + anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (autoneg == AUTONEG_DISABLE) { + if (speed == SPEED_10) { + bmcr = 0; + anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; + } else if (speed == SPEED_100) { + bmcr = BMCR_SPEED100; + anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; + } else { + ret = -EINVAL; + goto out; + } + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } else { + if (speed == SPEED_10) { + if (duplex == DUPLEX_FULL) + anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; + else + anar |= ADVERTISE_10HALF; + } else if (speed == SPEED_100) { + if (duplex == DUPLEX_FULL) { + anar |= ADVERTISE_10HALF | ADVERTISE_10FULL; + anar |= ADVERTISE_100HALF | ADVERTISE_100FULL; + } else { + anar |= ADVERTISE_10HALF; + anar |= ADVERTISE_100HALF; + } + } else { + ret = -EINVAL; + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + } + + r8152_mdio_write(tp, MII_ADVERTISE, anar); + r8152_mdio_write(tp, MII_BMCR, bmcr); + +out: + schedule_delayed_work(&tp->schedule, 5 * HZ); + + return ret; +} + +static void rtl8152_down(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); + ocp_data &= ~POWER_CUT; + ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); + + r8152b_disable_aldps(tp); + r8152b_enter_oob(tp); + r8152b_enable_aldps(tp); +} + +static void set_carrier(struct r8152 *tp) +{ + struct net_device *netdev = tp->netdev; + u8 speed; + + speed = rtl8152_get_speed(tp); + + if (speed & LINK_STATUS) { + if (!(tp->speed & LINK_STATUS)) { + rtl8152_enable(tp); + set_bit(RTL8152_SET_RX_MODE, &tp->flags); + netif_carrier_on(netdev); + } + } else { + if (tp->speed & LINK_STATUS) { + netif_carrier_off(netdev); + rtl8152_disable(tp); + } + } + tp->speed = speed; +} + +static void rtl_work_func_t(struct work_struct *work) +{ + struct r8152 *tp = container_of(work, struct r8152, schedule.work); + + if (!test_bit(WORK_ENABLE, &tp->flags)) + goto out1; + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + goto out1; + + set_carrier(tp); + + if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) + _rtl8152_set_rx_mode(tp->netdev); + + schedule_delayed_work(&tp->schedule, HZ); + +out1: + return; +} + +static int rtl8152_open(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + int res = 0; + + tp->speed = rtl8152_get_speed(tp); + if (tp->speed & LINK_STATUS) { + res = rtl8152_enable(tp); + if (res) { + if (res == -ENODEV) + netif_device_detach(tp->netdev); + + netif_err(tp, ifup, netdev, + "rtl8152_open failed: %d\n", res); + return res; + } + + netif_carrier_on(netdev); + } else { + netif_stop_queue(netdev); + netif_carrier_off(netdev); + } + + rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL); + netif_start_queue(netdev); + set_bit(WORK_ENABLE, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + + return res; +} + +static int rtl8152_close(struct net_device *netdev) +{ + struct r8152 *tp = netdev_priv(netdev); + int res = 0; + + clear_bit(WORK_ENABLE, &tp->flags); + cancel_delayed_work_sync(&tp->schedule); + netif_stop_queue(netdev); + rtl8152_disable(tp); + + return res; +} + +static void rtl_clear_bp(struct r8152 *tp) +{ + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0); + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); + ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); + mdelay(3); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); + ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); +} + +static void r8152b_enable_eee(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + ocp_data |= EEE_RX_EN | EEE_TX_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CONFIG1, RG_TXLPI_MSK_HFDUP | RG_MATCLR_EN | + EEE_10_CAP | EEE_NWAY_EN | + TX_QUIET_EN | RX_QUIET_EN | + SDRISETIME | RG_RXLPI_MSK_HFDUP | + SDFALLTIME); + ocp_reg_write(tp, OCP_EEE_CONFIG2, RG_LPIHYS_NUM | RG_DACQUIET_EN | + RG_LDVQUIET_EN | RG_CKRSEL | + RG_EEEPRG_EN); + ocp_reg_write(tp, OCP_EEE_CONFIG3, FST_SNR_EYE_R | RG_LFS_SEL | MSK_PH); + ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | DEVICE_ADDR); + ocp_reg_write(tp, OCP_EEE_DATA, EEE_ADDR); + ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | DEVICE_ADDR); + ocp_reg_write(tp, OCP_EEE_DATA, EEE_DATA); + ocp_reg_write(tp, OCP_EEE_AR, 0x0000); +} + +static void r8152b_enable_fc(struct r8152 *tp) +{ + u16 anar; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + r8152_mdio_write(tp, MII_ADVERTISE, anar); +} + +static void r8152b_hw_phy_cfg(struct r8152 *tp) +{ + r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE); + r8152b_disable_aldps(tp); +} + +static void r8152b_init(struct r8152 *tp) +{ + u32 ocp_data; + int i; + + rtl_clear_bp(tp); + + if (tp->version == RTL_VER_01) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); + ocp_data &= ~LED_MODE_MASK; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); + } + + r8152b_hw_phy_cfg(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); + ocp_data &= ~POWER_CUT; + ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS); + ocp_data &= ~RWSUME_INDICATE; + ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); + + r8152b_exit_oob(tp); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL); + ocp_data &= ~MCU_CLK_RATIO_MASK; + ocp_data |= MCU_CLK_RATIO | D3_CLK_GATED_EN; + ocp_write_dword(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ocp_data); + ocp_data = GPHY_STS_MSK | SPEED_DOWN_MSK | + SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); + + r8152b_enable_eee(tp); + r8152b_enable_aldps(tp); + r8152b_enable_fc(tp); + + r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | + BMCR_ANRESTART); + for (i = 0; i < 100; i++) { + udelay(100); + if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET)) + break; + } + + /* disable rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data |= RX_AGG_DISABLE; + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); +} + +static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct r8152 *tp = usb_get_intfdata(intf); + + netif_device_detach(tp->netdev); + + if (netif_running(tp->netdev)) { + clear_bit(WORK_ENABLE, &tp->flags); + cancel_delayed_work_sync(&tp->schedule); + } + + rtl8152_down(tp); + + return 0; +} + +static int rtl8152_resume(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + + r8152b_init(tp); + netif_device_attach(tp->netdev); + if (netif_running(tp->netdev)) { + rtl8152_enable(tp); + set_bit(WORK_ENABLE, &tp->flags); + set_bit(RTL8152_SET_RX_MODE, &tp->flags); + schedule_delayed_work(&tp->schedule, 0); + } + + return 0; +} + +static void rtl8152_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct r8152 *tp = netdev_priv(netdev); + + strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN); + strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); + usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); +} + +static +int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct r8152 *tp = netdev_priv(netdev); + + if (!tp->mii.mdio_read) + return -EOPNOTSUPP; + + return mii_ethtool_gset(&tp->mii, cmd); +} + +static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct r8152 *tp = netdev_priv(dev); + + return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); +} + +static struct ethtool_ops ops = { + .get_drvinfo = rtl8152_get_drvinfo, + .get_settings = rtl8152_get_settings, + .set_settings = rtl8152_set_settings, + .get_link = ethtool_op_get_link, +}; + +static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct r8152 *tp = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(rq); + int res = 0; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = R8152_PHY_ID; /* Internal PHY */ + break; + + case SIOCGMIIREG: + data->val_out = r8152_mdio_read(tp, data->reg_num); + break; + + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) { + res = -EPERM; + break; + } + r8152_mdio_write(tp, data->reg_num, data->val_in); + break; + + default: + res = -EOPNOTSUPP; + } + + return res; +} + +static const struct net_device_ops rtl8152_netdev_ops = { + .ndo_open = rtl8152_open, + .ndo_stop = rtl8152_close, + .ndo_do_ioctl = rtl8152_ioctl, + .ndo_start_xmit = rtl8152_start_xmit, + .ndo_tx_timeout = rtl8152_tx_timeout, + .ndo_set_rx_mode = rtl8152_set_rx_mode, + .ndo_set_mac_address = rtl8152_set_mac_address, + + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, +}; + +static void r8152b_get_version(struct r8152 *tp) +{ + u32 ocp_data; + u16 version; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR1); + version = (u16)(ocp_data & VERSION_MASK); + + switch (version) { + case 0x4c00: + tp->version = RTL_VER_01; + break; + case 0x4c10: + tp->version = RTL_VER_02; + break; + default: + netif_info(tp, probe, tp->netdev, + "Unknown version 0x%04x\n", version); + break; + } +} + +static int rtl8152_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct r8152 *tp; + struct net_device *netdev; + + if (udev->actconfig->desc.bConfigurationValue != 1) { + usb_driver_set_configuration(udev, 1); + return -ENODEV; + } + + netdev = alloc_etherdev(sizeof(struct r8152)); + if (!netdev) { + dev_err(&intf->dev, "Out of memory"); + return -ENOMEM; + } + + tp = netdev_priv(netdev); + tp->msg_enable = 0x7FFF; + + tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp); + INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + + tp->udev = udev; + tp->netdev = netdev; + netdev->netdev_ops = &rtl8152_netdev_ops; + netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; + netdev->features &= ~NETIF_F_IP_CSUM; + SET_ETHTOOL_OPS(netdev, &ops); + tp->speed = 0; + + tp->mii.dev = netdev; + tp->mii.mdio_read = read_mii_word; + tp->mii.mdio_write = write_mii_word; + tp->mii.phy_id_mask = 0x3f; + tp->mii.reg_num_mask = 0x1f; + tp->mii.phy_id = R8152_PHY_ID; + tp->mii.supports_gmii = 0; + + r8152b_get_version(tp); + r8152b_init(tp); + set_ethernet_addr(tp); + + if (!alloc_all_urbs(tp)) { + netif_err(tp, probe, netdev, "out of memory"); + goto out; + } + + tp->rx_skb = netdev_alloc_skb(netdev, + RTL8152_RMS + sizeof(struct rx_desc)); + if (!tp->rx_skb) + goto out1; + + usb_set_intfdata(intf, tp); + SET_NETDEV_DEV(netdev, &intf->dev); + + + if (register_netdev(netdev) != 0) { + netif_err(tp, probe, netdev, "couldn't register the device"); + goto out2; + } + + netif_info(tp, probe, netdev, "%s", DRIVER_VERSION); + + return 0; + +out2: + usb_set_intfdata(intf, NULL); + dev_kfree_skb(tp->rx_skb); +out1: + free_all_urbs(tp); +out: + free_netdev(netdev); + return -EIO; +} + +static void rtl8152_unload(struct r8152 *tp) +{ + u32 ocp_data; + + if (tp->version != RTL_VER_01) { + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL); + ocp_data |= POWER_CUT; + ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data); + } + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS); + ocp_data &= ~RWSUME_INDICATE; + ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); +} + +static void rtl8152_disconnect(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + + usb_set_intfdata(intf, NULL); + if (tp) { + set_bit(RTL8152_UNPLUG, &tp->flags); + tasklet_kill(&tp->tl); + unregister_netdev(tp->netdev); + rtl8152_unload(tp); + free_all_urbs(tp); + if (tp->rx_skb) + dev_kfree_skb(tp->rx_skb); + free_netdev(tp->netdev); + } +} + +/* table of devices that work with this driver */ +static struct usb_device_id rtl8152_table[] = { + {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, rtl8152_table); + +static struct usb_driver rtl8152_driver = { + .name = MODULENAME, + .probe = rtl8152_probe, + .disconnect = rtl8152_disconnect, + .id_table = rtl8152_table, + .suspend = rtl8152_suspend, + .resume = rtl8152_resume +}; + +static int __init usb_rtl8152_init(void) +{ + return usb_register(&rtl8152_driver); +} + +static void __exit usb_rtl8152_exit(void) +{ + usb_deregister(&rtl8152_driver); +} + +module_init(usb_rtl8152_init); +module_exit(usb_rtl8152_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index a923d61c6fc5..a79e9d334928 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -426,6 +426,13 @@ static void sierra_net_dosync(struct usbnet *dev) dev_dbg(&dev->udev->dev, "%s", __func__); + /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the + * firmware restart itself. After restarting, the modem will respond + * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues + * sending MSYNC commands every few seconds until it receives the + * RESTART event from the firmware + */ + /* tell modem we are ready */ status = sierra_net_send_sync(dev); if (status < 0) @@ -704,6 +711,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) /* set context index initially to 0 - prepares tx hdr template */ sierra_net_set_ctx_index(priv, 0); + /* prepare sync message template */ + memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); + /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) @@ -739,11 +749,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) kfree(priv); return -ENODEV; } - /* prepare sync message from template */ - memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); - - /* initiate the sync sequence */ - sierra_net_dosync(dev); return 0; } @@ -766,8 +771,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) netdev_err(dev->net, "usb_control_msg failed, status %d\n", status); - sierra_net_set_private(dev, NULL); + usbnet_status_stop(dev); + sierra_net_set_private(dev, NULL); kfree(priv); } @@ -908,6 +914,24 @@ static const struct driver_info sierra_net_info_direct_ip = { .tx_fixup = sierra_net_tx_fixup, }; +static int +sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod) +{ + int ret; + + ret = usbnet_probe(udev, prod); + if (ret == 0) { + struct usbnet *dev = usb_get_intfdata(udev); + + ret = usbnet_status_start(dev, GFP_KERNEL); + if (ret == 0) { + /* Interrupt URB now set up; initiate sync sequence */ + sierra_net_dosync(dev); + } + } + return ret; +} + #define DIRECT_IP_DEVICE(vend, prod) \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ @@ -930,7 +954,7 @@ MODULE_DEVICE_TABLE(usb, products); static struct usb_driver sierra_net_driver = { .name = "sierra_net", .id_table = products, - .probe = usbnet_probe, + .probe = sierra_net_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 1e5a9b72650e..f95cb032394b 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -252,6 +252,70 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) return 0; } +/* Submit the interrupt URB if not previously submitted, increasing refcount */ +int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + WARN_ON_ONCE(dev->interrupt == NULL); + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + + if (++dev->interrupt_count == 1) + ret = usb_submit_urb(dev->interrupt, mem_flags); + + dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } + return ret; +} +EXPORT_SYMBOL_GPL(usbnet_status_start); + +/* For resume; submit interrupt URB if previously submitted */ +static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + mutex_lock(&dev->interrupt_mutex); + if (dev->interrupt_count) { + ret = usb_submit_urb(dev->interrupt, mem_flags); + dev_dbg(&dev->udev->dev, + "submitted interrupt URB for resume\n"); + } + mutex_unlock(&dev->interrupt_mutex); + return ret; +} + +/* Kill the interrupt URB if all submitters want it killed */ +void usbnet_status_stop(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + WARN_ON(dev->interrupt_count == 0); + + if (dev->interrupt_count && --dev->interrupt_count == 0) + usb_kill_urb(dev->interrupt); + + dev_dbg(&dev->udev->dev, + "decremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } +} +EXPORT_SYMBOL_GPL(usbnet_status_stop); + +/* For suspend; always kill interrupt URB */ +static void __usbnet_status_stop_force(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + usb_kill_urb(dev->interrupt); + dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); + mutex_unlock(&dev->interrupt_mutex); + } +} + /* Passes this packet up the stack, updating its accounting. * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. @@ -725,7 +789,7 @@ int usbnet_stop (struct net_device *net) if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + usbnet_status_stop(dev); usbnet_purge_paused_rxq(dev); @@ -787,7 +851,7 @@ int usbnet_open (struct net_device *net) /* start any status interrupt transfer */ if (dev->interrupt) { - retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); + retval = usbnet_status_start(dev, GFP_KERNEL); if (retval < 0) { netif_err(dev, ifup, dev->net, "intr submit %d\n", retval); @@ -1458,6 +1522,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->delay.data = (unsigned long) dev; init_timer (&dev->delay); mutex_init (&dev->phy_mutex); + mutex_init(&dev->interrupt_mutex); + dev->interrupt_count = 0; dev->net = net; strcpy (net->name, "usb%d"); @@ -1593,7 +1659,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) */ netif_device_detach (dev->net); usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + __usbnet_status_stop_force(dev); /* * reattach so runtime management can use and @@ -1613,9 +1679,8 @@ int usbnet_resume (struct usb_interface *intf) int retval; if (!--dev->suspend_count) { - /* resume interrupt URBs */ - if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) - usb_submit_urb(dev->interrupt, GFP_NOIO); + /* resume interrupt URB if it was previously submitted */ + __usbnet_status_start_force(dev, GFP_NOIO); spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 50077753a0e5..655bb25eed2b 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -28,7 +28,7 @@ #include <linux/slab.h> #include <linux/cpu.h> -static int napi_weight = 128; +static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; @@ -39,7 +39,6 @@ module_param(gso, bool, 0444); #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 -#define VIRTNET_SEND_COMMAND_SG_MAX 2 #define VIRTNET_DRIVER_VERSION "1.0.0" struct virtnet_stats { @@ -444,7 +443,7 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); - err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp); + err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); @@ -489,8 +488,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) /* chain first in list head */ first->private = (unsigned long)list; - err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2, - first, gfp); + err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, + first, gfp); if (err < 0) give_pages(rq, first); @@ -508,7 +507,7 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) sg_init_one(rq->sg, page_address(page), PAGE_SIZE); - err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp); + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); if (err < 0) give_pages(rq, page); @@ -582,7 +581,7 @@ static void refill_work(struct work_struct *work) bool still_empty; int i; - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; napi_disable(&rq->napi); @@ -637,7 +636,7 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->curr_queue_pairs; i++) { /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); @@ -711,8 +710,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; - return virtqueue_add_buf(sq->vq, sq->sg, num_sg, - 0, skb, GFP_ATOMIC); + return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -767,32 +765,35 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, - struct scatterlist *data, int out, int in) + struct scatterlist *out, + struct scatterlist *in) { - struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; + struct scatterlist *sgs[4], hdr, stat; struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; - unsigned int tmp; - int i; + unsigned out_num = 0, in_num = 0, tmp; /* Caller should know better */ - BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || - (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); - - out++; /* Add header */ - in++; /* Add return status */ + BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); ctrl.class = class; ctrl.cmd = cmd; + /* Add header */ + sg_init_one(&hdr, &ctrl, sizeof(ctrl)); + sgs[out_num++] = &hdr; - sg_init_table(sg, out + in); + if (out) + sgs[out_num++] = out; + if (in) + sgs[out_num + in_num++] = in; - sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); - for_each_sg(data, s, out + in - 2, i) - sg_set_buf(&sg[i + 1], sg_virt(s), s->length); - sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); + /* Add return status. */ + sg_init_one(&stat, &status, sizeof(status)); + sgs[out_num + in_num++] = &stat; - BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); + BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); + BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) + < 0); virtqueue_kick(vi->cvq); @@ -821,7 +822,7 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) sg_init_one(&sg, addr->sa_data, dev->addr_len); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_ADDR_SET, - &sg, 1, 0)) { + &sg, NULL)) { dev_warn(&vdev->dev, "Failed to set mac address by vq command.\n"); return -EINVAL; @@ -889,8 +890,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, - VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, - 0, 0)) + VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); rtnl_unlock(); } @@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) struct scatterlist sg; struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; + int i; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; @@ -908,12 +909,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) sg_init_one(&sg, &s, sizeof(s)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, - VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){ + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", queue_pairs); return -EINVAL; - } else + } else { + for (i = vi->curr_queue_pairs; i < queue_pairs; i++) + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); vi->curr_queue_pairs = queue_pairs; + } return 0; } @@ -955,7 +960,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, - sg, 1, 0)) + sg, NULL)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", promisc ? "en" : "dis"); @@ -963,7 +968,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, - sg, 1, 0)) + sg, NULL)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", allmulti ? "en" : "dis"); @@ -1000,7 +1005,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, - sg, 2, 0)) + sg, NULL)) dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); kfree(buf); @@ -1015,7 +1020,7 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, - VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) + VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); return 0; } @@ -1029,7 +1034,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, sg_init_one(&sg, &vid, sizeof(vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, - VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) + VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); return 0; } @@ -1570,7 +1575,7 @@ static int virtnet_probe(struct virtio_device *vdev) } /* Last of all, set up some receive buffers. */ - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->curr_queue_pairs; i++) { try_fill_recv(&vi->rq[i], GFP_KERNEL); /* If we didn't even get one input buffer, we're useless. */ @@ -1694,7 +1699,7 @@ static int virtnet_restore(struct virtio_device *vdev) netif_device_attach(vi->dev); - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->curr_queue_pairs; i++) if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9b20d9ee2719..7f702fe3ecc2 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; + if (!test_bit(ATH_STAT_STARTED, ah->status)) + return; + mutex_lock(&ah->lock); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { @@ -2676,6 +2679,7 @@ done: mmiowb(); mutex_unlock(&ah->lock); + set_bit(ATH_STAT_STARTED, ah->status); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); @@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw) ath5k_stop_tasklets(ah); + clear_bit(ATH_STAT_STARTED, ah->status); cancel_delayed_work_sync(&ah->tx_complete_work); if (!ath5k_modparam_no_hw_rfkill_switch) diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 17507dc8a1e7..f3dc124c60c7 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT config ATH9K tristate "Atheros 802.11n wireless cards support" - depends on MAC80211 + depends on MAC80211 && HAS_DMA select ATH9K_HW select MAC80211_LEDS select LEDS_CLASS diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index 0c2ac0c6dc89..e85a8b076c22 100644 --- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h @@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = { {0x00009d10, 0x01834061}, {0x00009d14, 0x00c00400}, {0x00009d18, 0x00000000}, - {0x00009e08, 0x0078230c}, - {0x00009e24, 0x990bb515}, - {0x00009e28, 0x126f0000}, + {0x00009e08, 0x0038230c}, + {0x00009e24, 0x9907b515}, + {0x00009e28, 0x126f0600}, {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, @@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = { static const u32 ar9565_1p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009}, {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, @@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, - {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, + {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, @@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = { static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { /* Addr allmodes */ + {0x00004050, 0x00300300}, + {0x0000406c, 0x00100000}, {0x0000a000, 0x00010000}, {0x0000a004, 0x00030002}, {0x0000a008, 0x00050004}, @@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a0b4, 0x00000000}, {0x0000a0b8, 0x00000000}, {0x0000a0bc, 0x00000000}, - {0x0000a0c0, 0x001f0000}, - {0x0000a0c4, 0x01000101}, - {0x0000a0c8, 0x011e011f}, - {0x0000a0cc, 0x011c011d}, - {0x0000a0d0, 0x02030204}, - {0x0000a0d4, 0x02010202}, - {0x0000a0d8, 0x021f0200}, - {0x0000a0dc, 0x0302021e}, - {0x0000a0e0, 0x03000301}, - {0x0000a0e4, 0x031e031f}, - {0x0000a0e8, 0x0402031d}, - {0x0000a0ec, 0x04000401}, - {0x0000a0f0, 0x041e041f}, - {0x0000a0f4, 0x0502041d}, - {0x0000a0f8, 0x05000501}, - {0x0000a0fc, 0x051e051f}, - {0x0000a100, 0x06010602}, - {0x0000a104, 0x061f0600}, - {0x0000a108, 0x061d061e}, - {0x0000a10c, 0x07020703}, - {0x0000a110, 0x07000701}, + {0x0000a0c0, 0x00bf00a0}, + {0x0000a0c4, 0x11a011a1}, + {0x0000a0c8, 0x11be11bf}, + {0x0000a0cc, 0x11bc11bd}, + {0x0000a0d0, 0x22632264}, + {0x0000a0d4, 0x22612262}, + {0x0000a0d8, 0x227f2260}, + {0x0000a0dc, 0x4322227e}, + {0x0000a0e0, 0x43204321}, + {0x0000a0e4, 0x433e433f}, + {0x0000a0e8, 0x4462433d}, + {0x0000a0ec, 0x44604461}, + {0x0000a0f0, 0x447e447f}, + {0x0000a0f4, 0x5582447d}, + {0x0000a0f8, 0x55805581}, + {0x0000a0fc, 0x559e559f}, + {0x0000a100, 0x66816682}, + {0x0000a104, 0x669f6680}, + {0x0000a108, 0x669d669e}, + {0x0000a10c, 0x77627763}, + {0x0000a110, 0x77607761}, {0x0000a114, 0x00000000}, {0x0000a118, 0x00000000}, {0x0000a11c, 0x00000000}, @@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a134, 0x00000000}, {0x0000a138, 0x00000000}, {0x0000a13c, 0x00000000}, - {0x0000a140, 0x001f0000}, - {0x0000a144, 0x01000101}, - {0x0000a148, 0x011e011f}, - {0x0000a14c, 0x011c011d}, - {0x0000a150, 0x02030204}, - {0x0000a154, 0x02010202}, - {0x0000a158, 0x021f0200}, - {0x0000a15c, 0x0302021e}, - {0x0000a160, 0x03000301}, - {0x0000a164, 0x031e031f}, - {0x0000a168, 0x0402031d}, - {0x0000a16c, 0x04000401}, - {0x0000a170, 0x041e041f}, - {0x0000a174, 0x0502041d}, - {0x0000a178, 0x05000501}, - {0x0000a17c, 0x051e051f}, - {0x0000a180, 0x06010602}, - {0x0000a184, 0x061f0600}, - {0x0000a188, 0x061d061e}, - {0x0000a18c, 0x07020703}, - {0x0000a190, 0x07000701}, + {0x0000a140, 0x00bf00a0}, + {0x0000a144, 0x11a011a1}, + {0x0000a148, 0x11be11bf}, + {0x0000a14c, 0x11bc11bd}, + {0x0000a150, 0x22632264}, + {0x0000a154, 0x22612262}, + {0x0000a158, 0x227f2260}, + {0x0000a15c, 0x4322227e}, + {0x0000a160, 0x43204321}, + {0x0000a164, 0x433e433f}, + {0x0000a168, 0x4462433d}, + {0x0000a16c, 0x44604461}, + {0x0000a170, 0x447e447f}, + {0x0000a174, 0x5582447d}, + {0x0000a178, 0x55805581}, + {0x0000a17c, 0x559e559f}, + {0x0000a180, 0x66816682}, + {0x0000a184, 0x669f6680}, + {0x0000a188, 0x669d669e}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = { static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { /* Addr allmodes */ - {0x00018c00, 0x18213ede}, + {0x00018c00, 0x18212ede}, {0x00018c04, 0x000801d8}, {0x00018c08, 0x0003780c}, }; @@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = { {0x0000a180, 0x66816682}, {0x0000a184, 0x669f6680}, {0x0000a188, 0x669d669e}, - {0x0000a18c, 0x77627763}, - {0x0000a190, 0x77607761}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, @@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, - {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0}, + {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1}, + {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2}, + {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3}, + {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4}, + {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, + {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, @@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0}, + {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1}, + {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2}, + {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3}, + {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4}, + {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, + {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6963862a1872..a18414b5948b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) goto work; - ath9k_set_beacon(sc); - if (ah->opmode == NL80211_IFTYPE_STATION && test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + } else { + ath9k_set_beacon(sc); } work: ath_restart_work(sc); @@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; + int key; ath_node_attach(sc, sta, vif); @@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - an->ps_key = ath_key_config(common, vif, sta, &ps_key); + key = ath_key_config(common, vif, sta, &ps_key); + if (key > 0) + an->ps_key = key; return 0; } @@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); + an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 23a3498f14d4..830bb1d1f957 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1502,7 +1502,7 @@ static const struct file_operations atmel_proc_fops = { .open = atmel_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static const struct net_device_ops atmel_netdev_ops = { diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 523355b87659..f7c70b3a6ea9 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1728,6 +1728,25 @@ drop_recycle_buffer: sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) +{ + int current_slot, previous_slot; + + B43_WARN_ON(ring->tx); + + /* Device has filled all buffers, drop all packets and let TCP + * decrease speed. + * Decrement RX index by one will let the device to see all slots + * as free again + */ + /* + *TODO: How to increase rx_drop in mac80211? + */ + current_slot = ring->ops->get_current_rxslot(ring); + previous_slot = prev_slot(ring, current_slot); + ring->ops->set_current_rxslot(ring, previous_slot); +} + void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index 9fdd1983079c..df8c8cdcbdb5 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -9,7 +9,7 @@ /* DMA-Interrupt reasons. */ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ | (1 << 14) | (1 << 15)) -#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RDESC_UFLOW (1 << 13) #define B43_DMAIRQ_RX_DONE (1 << 16) /*** 32-bit DMA Engine. ***/ @@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev, void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status); +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring); + void b43_dma_rx(struct b43_dmaring *ring); void b43_dma_direct_fifo_rx(struct b43_wldev *dev, diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index d377f77d30b5..6dd07e2ec595 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) } } - if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | - B43_DMAIRQ_NONFATALMASK))) { - if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { - b43err(dev->wl, "Fatal DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - b43err(dev->wl, "This device does not support DMA " + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) { + b43err(dev->wl, + "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); - /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = true; - b43_controller_restart(dev, "DMA error"); - return; - } - if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { - b43err(dev->wl, "DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - } + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = true; + b43_controller_restart(dev, "DMA error"); + return; } if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) @@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) handle_irq_noise(dev); /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) { + if (B43_DEBUG) + b43warn(dev->wl, "RX descriptor underrun\n"); + b43_dma_handle_rx_overflow(dev->dma.rx_ring); + } if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { if (b43_using_pio_transfers(dev)) b43_pio_rx(dev->pio.rx_queue); @@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) return IRQ_NONE; dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) - & 0x0001DC00; + & 0x0001FC00; dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) & 0x0000DC00; dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) @@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_write32(dev, 0x018C, 0x02000000); } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); - b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00); b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 2b90da0d85f3..e7a1a4770996 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -1117,7 +1117,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work) if (afx_hdl->is_listen && afx_hdl->my_listen_chan) /* 100ms ~ 300ms */ err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, - 100 * (1 + (random32() % 3))); + 100 * (1 + prandom_u32() % 3)); else err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 19c45e363aa7..d6033a8e5dea 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -89,7 +89,7 @@ static const struct file_operations ap_debug_proc_fops = { .open = ap_debug_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; #endif /* PRISM2_NO_PROCFS_DEBUG */ @@ -1116,7 +1116,7 @@ static const struct file_operations prism2_sta_proc_fops = { .open = prism2_sta_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; static void handle_add_proc_queue(struct work_struct *work) diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 507ab99eef4e..6307a4e36c85 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -2957,7 +2957,7 @@ static const struct file_operations prism2_registers_proc_fops = { .open = prism2_registers_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; #endif /* PRISM2_NO_PROCFS_DEBUG */ diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index 7491dab2c105..aa7ad3a7a69b 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c @@ -52,7 +52,7 @@ static const struct file_operations prism2_debug_proc_fops = { .open = prism2_debug_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; #endif /* PRISM2_NO_PROCFS_DEBUG */ @@ -103,7 +103,7 @@ static const struct file_operations prism2_stats_proc_fops = { .open = prism2_stats_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; @@ -265,7 +265,7 @@ static const struct file_operations prism2_crypt_proc_fops = { .open = prism2_crypt_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b8f82e688c72..9a95045c97b6 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c @@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a0cb0770d319..e42b266a023a 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -216,7 +216,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, mwifiex_form_mgmt_frame(skb, buf, len); mwifiex_queue_tx_pkt(priv, skb); - *cookie = random32() | 1; + *cookie = prandom_u32() | 1; cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_ATOMIC); wiphy_dbg(wiphy, "info: management frame transmitted\n"); @@ -271,7 +271,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, duration); if (!ret) { - *cookie = random32() | 1; + *cookie = prandom_u32() | 1; priv->roc_cfg.cookie = *cookie; priv->roc_cfg.chan = *chan; @@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (wdev->netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(wdev->netdev); - if (wdev->netdev->reg_state == NETREG_UNREGISTERED) - free_netdev(wdev->netdev); - /* Clear the priv in adapter */ priv->netdev = NULL; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 74db0d24a579..26755d9acb55 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; + adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 121443a0f2a1..2eb88ea9acf7 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; + dev->destructor = free_netdev; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 311d0b26b81c..1a8a19dbd635 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Set multicast addresses to firmware */ - if (old_pkt_filter == priv->curr_pkt_filter) { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } else { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index a2865f17c667..37984e6d4e99 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -51,9 +51,17 @@ * This is the maximum slots a skb can have. If a guest sends a skb * which exceeds this limit it is considered malicious. */ -#define MAX_SKB_SLOTS_DEFAULT 20 -static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT; -module_param(max_skb_slots, uint, 0444); +#define FATAL_SKB_SLOTS_DEFAULT 20 +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; +module_param(fatal_skb_slots, uint, 0444); + +/* + * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN typedef unsigned int pending_ring_idx_t; #define INVALID_PENDING_RING_IDX (~0U) @@ -928,18 +936,20 @@ static void netbk_fatal_tx_err(struct xenvif *vif) static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, - RING_IDX first_idx, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int slots = 0; int drop_err = 0; + int more_data; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { + struct xen_netif_tx_request dropped_tx = { 0 }; + if (slots >= work_to_do) { netdev_err(vif->dev, "Asked for %d slots but exceeds this limit\n", @@ -951,28 +961,32 @@ static int netbk_count_requests(struct xenvif *vif, /* This guest is really using too many slots and * considered malicious. */ - if (unlikely(slots >= max_skb_slots)) { + if (unlikely(slots >= fatal_skb_slots)) { netdev_err(vif->dev, "Malicious frontend using %d slots, threshold %u\n", - slots, max_skb_slots); + slots, fatal_skb_slots); netbk_fatal_tx_err(vif); return -E2BIG; } /* Xen network protocol had implicit dependency on - * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the - * historical MAX_SKB_FRAGS value 18 to honor the same - * behavior as before. Any packet using more than 18 - * slots but less than max_skb_slots slots is dropped + * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to + * the historical MAX_SKB_FRAGS value 18 to honor the + * same behavior as before. Any packet using more than + * 18 slots but less than fatal_skb_slots slots is + * dropped */ - if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) { + if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { if (net_ratelimit()) netdev_dbg(vif->dev, "Too many slots (%d) exceeding limit (%d), dropping packet\n", - slots, XEN_NETIF_NR_SLOTS_MIN); + slots, XEN_NETBK_LEGACY_SLOTS_MAX); drop_err = -E2BIG; } + if (drop_err) + txp = &dropped_tx; + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), sizeof(*txp)); @@ -1002,10 +1016,16 @@ static int netbk_count_requests(struct xenvif *vif, netbk_fatal_tx_err(vif); return -EINVAL; } - } while ((txp++)->flags & XEN_NETTXF_more_data); + + more_data = txp->flags & XEN_NETTXF_more_data; + + if (!drop_err) + txp++; + + } while (more_data); if (drop_err) { - netbk_tx_err(vif, first, first_idx + slots); + netbk_tx_err(vif, first, cons + slots); return drop_err; } @@ -1042,7 +1062,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, struct pending_tx_info *first = NULL; /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN. + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ nr_slots = shinfo->nr_frags; @@ -1404,12 +1424,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) struct sk_buff *skb; int ret; - while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN + while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; - struct xen_netif_tx_request txfrags[max_skb_slots]; + struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; @@ -1470,8 +1490,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) continue; } - ret = netbk_count_requests(vif, &txreq, idx, - txfrags, work_to_do); + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) continue; @@ -1498,7 +1517,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && - ret < XEN_NETIF_NR_SLOTS_MIN) ? + ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, @@ -1777,7 +1796,7 @@ static inline int rx_work_todo(struct xen_netbk *netbk) static inline int tx_work_todo(struct xen_netbk *netbk) { - if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN + if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) return 1; @@ -1862,11 +1881,11 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) { + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { printk(KERN_INFO - "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n", - max_skb_slots, XEN_NETIF_NR_SLOTS_MIN); - max_skb_slots = XEN_NETIF_NR_SLOTS_MIN; + "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", + fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); + fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; } xen_netbk_group_nr = num_online_cpus(); |