diff options
Diffstat (limited to 'drivers')
146 files changed, 3269 insertions, 894 deletions
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 7e3fd375a627..92f6e4deee74 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c @@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base) } -static void +static int kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) { - int loop_limit = 4; + int loop_limit = 3; /* * Read 64-bit free running counter @@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) * if new hi-word is equal to previously read hi-word then stop. */ - while (--loop_limit) { + do { *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) break; - } + } while (--loop_limit); if (!loop_limit) { pr_err("bcm_kona_timer: getting counter failed.\n"); pr_err(" Timer will be impacted\n"); + return -ETIMEDOUT; } - return; + return 0; } static int kona_timer_set_next_event(unsigned long clc, @@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc, uint32_t lsw, msw; uint32_t reg; + int ret; - kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); + ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); + if (ret) + return ret; /* Load the "next" event tick value */ writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index d91e8725917c..b4b3ab5a11ad 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency) gic_start_count(); } -static void __init gic_clocksource_of_init(struct device_node *node) +static int __init gic_clocksource_of_init(struct device_node *node) { struct clk *clk; int ret; diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 719b478d136e..3c39e6f45971 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np) struct clk *clk = of_clk_get_by_name(np, "fixed"); int ret; - clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("Failed to get clock"); return PTR_ERR(clk); diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f4b021..6dc597126b79 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; + if (!ctx->authsize) + return 0; + /* NULL encryption / decryption */ if (!ctx->enckeylen) return aead_null_set_sh_desc(aead); @@ -614,7 +617,7 @@ skip_enc: keys_fit_inline = true; /* aead_givencrypt shared descriptor */ - desc = ctx->sh_desc_givenc; + desc = ctx->sh_desc_enc; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); @@ -645,13 +648,13 @@ copy_iv: append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); - /* ivsize + cryptlen = seqoutlen - authsize */ - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); - /* Read and write assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + /* ivsize + cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); + /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); @@ -697,7 +700,7 @@ copy_iv: ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { + if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { dev_err(jrdev, "unable to map shared descriptor\n"); return -ENOMEM; } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8df8d41..36365b3efdfd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); + t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index d0c1dab9b435..dff1a4a6dc1b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -251,6 +251,14 @@ config EDAC_SBRIDGE Support for error detection and correction the Intel Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. +config EDAC_SKX + tristate "Intel Skylake server Integrated MC" + depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL + depends on PCI_MMCONFIG + help + Support for error detection and correction the Intel + Skylake server Integrated Memory Controllers. + config EDAC_MPC85XX tristate "Freescale MPC83xx / MPC85xx" depends on EDAC_MM_EDAC && FSL_SOC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index f9e4a3e0e6e9..986049925b08 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o obj-$(CONFIG_EDAC_I7300) += i7300_edac.o obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4fb2eb7c800d..ce0067b7a2f6 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { /* Knight's Landing Support */ /* * KNL's memory channels are swizzled between memory controllers. - * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 + * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 */ -#define knl_channel_remap(channel) ((channel + 3) % 6) +#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 @@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg) mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); - return knl_channel_remap(mc*3 + chan); + return knl_channel_remap(mc, chan); } /* @@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, } else { char A = *("A"); - channel = knl_channel_remap(channel); + /* + * Reported channel is in range 0-2, so we can't map it + * back to mc. To figure out mc we check machine check + * bank register that reported this error. + * bank15 means mc0 and bank16 means mc1. + */ + channel = knl_channel_remap(m->bank == 16, channel); channel_mask = 1 << channel; + snprintf(msg, sizeof(msg), "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", overflow ? " OVERFLOW" : "", diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c new file mode 100644 index 000000000000..0ff4878c2aa1 --- /dev/null +++ b/drivers/edac/skx_edac.c @@ -0,0 +1,1121 @@ +/* + * EDAC driver for Intel(R) Xeon(R) Skylake processors + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/edac.h> +#include <linux/mmzone.h> +#include <linux/smp.h> +#include <linux/bitmap.h> +#include <linux/math64.h> +#include <linux/mod_devicetable.h> +#include <asm/cpu_device_id.h> +#include <asm/processor.h> +#include <asm/mce.h> + +#include "edac_core.h" + +#define SKX_REVISION " Ver: 1.0 " + +/* + * Debug macros + */ +#define skx_printk(level, fmt, arg...) \ + edac_printk(level, "skx", fmt, ##arg) + +#define skx_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) + +/* + * Get a bit field at register value <v>, from bit <lo> to bit <hi> + */ +#define GET_BITFIELD(v, lo, hi) \ + (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) + +static LIST_HEAD(skx_edac_list); + +static u64 skx_tolm, skx_tohm; + +#define NUM_IMC 2 /* memory controllers per socket */ +#define NUM_CHANNELS 3 /* channels per memory controller */ +#define NUM_DIMMS 2 /* Max DIMMS per channel */ + +#define MASK26 0x3FFFFFF /* Mask for 2^26 */ +#define MASK29 0x1FFFFFFF /* Mask for 2^29 */ + +/* + * Each cpu socket contains some pci devices that provide global + * information, and also some that are local to each of the two + * memory controllers on the die. + */ +struct skx_dev { + struct list_head list; + u8 bus[4]; + struct pci_dev *sad_all; + struct pci_dev *util_all; + u32 mcroute; + struct skx_imc { + struct mem_ctl_info *mci; + u8 mc; /* system wide mc# */ + u8 lmc; /* socket relative mc# */ + u8 src_id, node_id; + struct skx_channel { + struct pci_dev *cdev; + struct skx_dimm { + u8 close_pg; + u8 bank_xor_enable; + u8 fine_grain_bank; + u8 rowbits; + u8 colbits; + } dimms[NUM_DIMMS]; + } chan[NUM_CHANNELS]; + } imc[NUM_IMC]; +}; +static int skx_num_sockets; + +struct skx_pvt { + struct skx_imc *imc; +}; + +struct decoded_addr { + struct skx_dev *dev; + u64 addr; + int socket; + int imc; + int channel; + u64 chan_addr; + int sktways; + int chanways; + int dimm; + int rank; + int channel_rank; + u64 rank_address; + int row; + int column; + int bank_address; + int bank_group; +}; + +static struct skx_dev *get_skx_dev(u8 bus, u8 idx) +{ + struct skx_dev *d; + + list_for_each_entry(d, &skx_edac_list, list) { + if (d->bus[idx] == bus) + return d; + } + + return NULL; +} + +enum munittype { + CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD +}; + +struct munit { + u16 did; + u16 devfn[NUM_IMC]; + u8 busidx; + u8 per_socket; + enum munittype mtype; +}; + +/* + * List of PCI device ids that we need together with some device + * number and function numbers to tell which memory controller the + * device belongs to. + */ +static const struct munit skx_all_munits[] = { + { 0x2054, { }, 1, 1, SAD_ALL }, + { 0x2055, { }, 1, 1, UTIL_ALL }, + { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, + { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, + { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, + { 0x208e, { }, 1, 0, SAD }, + { } +}; + +/* + * We use the per-socket device 0x2016 to count how many sockets are present, + * and to detemine which PCI buses are associated with each socket. Allocate + * and build the full list of all the skx_dev structures that we need here. + */ +static int get_all_bus_mappings(void) +{ + struct pci_dev *pdev, *prev; + struct skx_dev *d; + u32 reg; + int ndev = 0; + + prev = NULL; + for (;;) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); + if (!pdev) + break; + ndev++; + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + pci_dev_put(pdev); + return -ENOMEM; + } + pci_read_config_dword(pdev, 0xCC, ®); + d->bus[0] = GET_BITFIELD(reg, 0, 7); + d->bus[1] = GET_BITFIELD(reg, 8, 15); + d->bus[2] = GET_BITFIELD(reg, 16, 23); + d->bus[3] = GET_BITFIELD(reg, 24, 31); + edac_dbg(2, "busses: %x, %x, %x, %x\n", + d->bus[0], d->bus[1], d->bus[2], d->bus[3]); + list_add_tail(&d->list, &skx_edac_list); + skx_num_sockets++; + prev = pdev; + } + + return ndev; +} + +static int get_all_munits(const struct munit *m) +{ + struct pci_dev *pdev, *prev; + struct skx_dev *d; + u32 reg; + int i = 0, ndev = 0; + + prev = NULL; + for (;;) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); + if (!pdev) + break; + ndev++; + if (m->per_socket == NUM_IMC) { + for (i = 0; i < NUM_IMC; i++) + if (m->devfn[i] == pdev->devfn) + break; + if (i == NUM_IMC) + goto fail; + } + d = get_skx_dev(pdev->bus->number, m->busidx); + if (!d) + goto fail; + + /* Be sure that the device is enabled */ + if (unlikely(pci_enable_device(pdev) < 0)) { + skx_printk(KERN_ERR, + "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did); + goto fail; + } + + switch (m->mtype) { + case CHAN0: case CHAN1: case CHAN2: + pci_dev_get(pdev); + d->imc[i].chan[m->mtype].cdev = pdev; + break; + case SAD_ALL: + pci_dev_get(pdev); + d->sad_all = pdev; + break; + case UTIL_ALL: + pci_dev_get(pdev); + d->util_all = pdev; + break; + case SAD: + /* + * one of these devices per core, including cores + * that don't exist on this SKU. Ignore any that + * read a route table of zero, make sure all the + * non-zero values match. + */ + pci_read_config_dword(pdev, 0xB4, ®); + if (reg != 0) { + if (d->mcroute == 0) + d->mcroute = reg; + else if (d->mcroute != reg) { + skx_printk(KERN_ERR, + "mcroute mismatch\n"); + goto fail; + } + } + ndev--; + break; + } + + prev = pdev; + } + + return ndev; +fail: + pci_dev_put(pdev); + return -ENODEV; +} + +const struct x86_cpu_id skx_cpuids[] = { + { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */ + { } +}; +MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); + +static u8 get_src_id(struct skx_dev *d) +{ + u32 reg; + + pci_read_config_dword(d->util_all, 0xF0, ®); + + return GET_BITFIELD(reg, 12, 14); +} + +static u8 skx_get_node_id(struct skx_dev *d) +{ + u32 reg; + + pci_read_config_dword(d->util_all, 0xF4, ®); + + return GET_BITFIELD(reg, 0, 2); +} + +static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, + int maxval, char *name) +{ + u32 val = GET_BITFIELD(reg, lobit, hibit); + + if (val < minval || val > maxval) { + edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg); + return -EINVAL; + } + return val + add; +} + +#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) + +#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks") +#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") +#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") + +static int get_width(u32 mtr) +{ + switch (GET_BITFIELD(mtr, 8, 9)) { + case 0: + return DEV_X4; + case 1: + return DEV_X8; + case 2: + return DEV_X16; + } + return DEV_UNKNOWN; +} + +static int skx_get_hi_lo(void) +{ + struct pci_dev *pdev; + u32 reg; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); + if (!pdev) { + edac_dbg(0, "Can't get tolm/tohm\n"); + return -ENODEV; + } + + pci_read_config_dword(pdev, 0xD0, ®); + skx_tolm = reg; + pci_read_config_dword(pdev, 0xD4, ®); + skx_tohm = reg; + pci_read_config_dword(pdev, 0xD8, ®); + skx_tohm |= (u64)reg << 32; + + pci_dev_put(pdev); + edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm); + + return 0; +} + +static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno) +{ + int banks = 16, ranks, rows, cols, npages; + u64 size; + + if (!IS_DIMM_PRESENT(mtr)) + return 0; + ranks = numrank(mtr); + rows = numrow(mtr); + cols = numcol(mtr); + + /* + * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) + */ + size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); + npages = MiB_TO_PAGES(size); + + edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", + imc->mc, chan, dimmno, size, npages, + banks, ranks, rows, cols); + + imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); + imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); + imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); + imc->chan[chan].dimms[dimmno].rowbits = rows; + imc->chan[chan].dimms[dimmno].colbits = cols; + + dimm->nr_pages = npages; + dimm->grain = 32; + dimm->dtype = get_width(mtr); + dimm->mtype = MEM_DDR4; + dimm->edac_mode = EDAC_SECDED; /* likely better than this */ + snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", + imc->src_id, imc->lmc, chan, dimmno); + + return 1; +} + +#define SKX_GET_MTMTR(dev, reg) \ + pci_read_config_dword((dev), 0x87c, ®) + +static bool skx_check_ecc(struct pci_dev *pdev) +{ + u32 mtmtr; + + SKX_GET_MTMTR(pdev, mtmtr); + + return !!GET_BITFIELD(mtmtr, 2, 2); +} + +static int skx_get_dimm_config(struct mem_ctl_info *mci) +{ + struct skx_pvt *pvt = mci->pvt_info; + struct skx_imc *imc = pvt->imc; + struct dimm_info *dimm; + int i, j; + u32 mtr, amap; + int ndimms; + + for (i = 0; i < NUM_CHANNELS; i++) { + ndimms = 0; + pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); + for (j = 0; j < NUM_DIMMS; j++) { + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, + mci->n_layers, i, j, 0); + pci_read_config_dword(imc->chan[i].cdev, + 0x80 + 4*j, &mtr); + ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); + } + if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { + skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); + return -ENODEV; + } + } + + return 0; +} + +static void skx_unregister_mci(struct skx_imc *imc) +{ + struct mem_ctl_info *mci = imc->mci; + + if (!mci) + return; + + edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); + + /* Remove MC sysfs nodes */ + edac_mc_del_mc(mci->pdev); + + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); + kfree(mci->ctl_name); + edac_mc_free(mci); +} + +static int skx_register_mci(struct skx_imc *imc) +{ + struct mem_ctl_info *mci; + struct edac_mc_layer layers[2]; + struct pci_dev *pdev = imc->chan[0].cdev; + struct skx_pvt *pvt; + int rc; + + /* allocate a new MC control structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = NUM_CHANNELS; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = NUM_DIMMS; + layers[1].is_virt_csrow = true; + mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, + sizeof(struct skx_pvt)); + + if (unlikely(!mci)) + return -ENOMEM; + + edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); + + /* Associate skx_dev and mci for future usage */ + imc->mci = mci; + pvt = mci->pvt_info; + pvt->imc = imc; + + mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", + imc->node_id, imc->lmc); + mci->mtype_cap = MEM_FLAG_DDR4; + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = "skx_edac.c"; + mci->dev_name = pci_name(imc->chan[0].cdev); + mci->mod_ver = SKX_REVISION; + mci->ctl_page_to_phys = NULL; + + rc = skx_get_dimm_config(mci); + if (rc < 0) + goto fail; + + /* record ptr to the generic device */ + mci->pdev = &pdev->dev; + + /* add this new MC control structure to EDAC's list of MCs */ + if (unlikely(edac_mc_add_mc(mci))) { + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); + rc = -EINVAL; + goto fail; + } + + return 0; + +fail: + kfree(mci->ctl_name); + edac_mc_free(mci); + imc->mci = NULL; + return rc; +} + +#define SKX_MAX_SAD 24 + +#define SKX_GET_SAD(d, i, reg) \ + pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) +#define SKX_GET_ILV(d, i, reg) \ + pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) + +#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) +#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) +#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) +#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) +#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) +#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) +#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) + +#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) +#define SKX_ILV_TARGET(tgt) ((tgt) & 7) + +static bool skx_sad_decode(struct decoded_addr *res) +{ + struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); + u64 addr = res->addr; + int i, idx, tgt, lchan, shift; + u32 sad, ilv; + u64 limit, prev_limit; + int remote = 0; + + /* Simple sanity check for I/O space or out of range */ + if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { + edac_dbg(0, "Address %llx out of range\n", addr); + return false; + } + +restart: + prev_limit = 0; + for (i = 0; i < SKX_MAX_SAD; i++) { + SKX_GET_SAD(d, i, sad); + limit = SKX_SAD_LIMIT(sad); + if (SKX_SAD_ENABLE(sad)) { + if (addr >= prev_limit && addr <= limit) + goto sad_found; + } + prev_limit = limit + 1; + } + edac_dbg(0, "No SAD entry for %llx\n", addr); + return false; + +sad_found: + SKX_GET_ILV(d, i, ilv); + + switch (SKX_SAD_INTERLEAVE(sad)) { + case 0: + idx = GET_BITFIELD(addr, 6, 8); + break; + case 1: + idx = GET_BITFIELD(addr, 8, 10); + break; + case 2: + idx = GET_BITFIELD(addr, 12, 14); + break; + case 3: + idx = GET_BITFIELD(addr, 30, 32); + break; + } + + tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); + + /* If point to another node, find it and start over */ + if (SKX_ILV_REMOTE(tgt)) { + if (remote) { + edac_dbg(0, "Double remote!\n"); + return false; + } + remote = 1; + list_for_each_entry(d, &skx_edac_list, list) { + if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) + goto restart; + } + edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); + return false; + } + + if (SKX_SAD_MOD3(sad) == 0) + lchan = SKX_ILV_TARGET(tgt); + else { + switch (SKX_SAD_MOD3MODE(sad)) { + case 0: + shift = 6; + break; + case 1: + shift = 8; + break; + case 2: + shift = 12; + break; + default: + edac_dbg(0, "illegal mod3mode\n"); + return false; + } + switch (SKX_SAD_MOD3ASMOD2(sad)) { + case 0: + lchan = (addr >> shift) % 3; + break; + case 1: + lchan = (addr >> shift) % 2; + break; + case 2: + lchan = (addr >> shift) % 2; + lchan = (lchan << 1) | ~lchan; + break; + case 3: + lchan = ((addr >> shift) % 2) << 1; + break; + } + lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); + } + + res->dev = d; + res->socket = d->imc[0].src_id; + res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); + res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); + + edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n", + res->addr, res->socket, res->imc, res->channel); + return true; +} + +#define SKX_MAX_TAD 8 + +#define SKX_GET_TADBASE(d, mc, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) +#define SKX_GET_TADWAYNESS(d, mc, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) +#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) + +#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) +#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) +#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) +#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) +#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) +#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) +#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) + +/* which bit used for both socket and channel interleave */ +static int skx_granularity[] = { 6, 8, 12, 30 }; + +static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) +{ + addr >>= shift; + addr /= ways; + addr <<= shift; + + return addr | (lowbits & ((1ull << shift) - 1)); +} + +static bool skx_tad_decode(struct decoded_addr *res) +{ + int i; + u32 base, wayness, chnilvoffset; + int skt_interleave_bit, chn_interleave_bit; + u64 channel_addr; + + for (i = 0; i < SKX_MAX_TAD; i++) { + SKX_GET_TADBASE(res->dev, res->imc, i, base); + SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); + if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) + goto tad_found; + } + edac_dbg(0, "No TAD entry for %llx\n", res->addr); + return false; + +tad_found: + res->sktways = SKX_TAD_SKTWAYS(wayness); + res->chanways = SKX_TAD_CHNWAYS(wayness); + skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; + chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; + + SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); + channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); + + if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { + /* Must handle channel first, then socket */ + channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, + res->chanways, channel_addr); + channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, + res->sktways, channel_addr); + } else { + /* Handle socket then channel. Preserve low bits from original address */ + channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, + res->sktways, res->addr); + channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, + res->chanways, res->addr); + } + + res->chan_addr = channel_addr; + + edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n", + res->addr, res->chan_addr, res->sktways, res->chanways); + return true; +} + +#define SKX_MAX_RIR 4 + +#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ + 0x108 + 4 * (i), ®) +#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ + 0x120 + 16 * idx + 4 * (i), ®) + +#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) +#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) +#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) +#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) +#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) + +static bool skx_rir_decode(struct decoded_addr *res) +{ + int i, idx, chan_rank; + int shift; + u32 rirway, rirlv; + u64 rank_addr, prev_limit = 0, limit; + + if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) + shift = 6; + else + shift = 13; + + for (i = 0; i < SKX_MAX_RIR; i++) { + SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); + limit = SKX_RIR_LIMIT(rirway); + if (SKX_RIR_VALID(rirway)) { + if (prev_limit <= res->chan_addr && + res->chan_addr <= limit) + goto rir_found; + } + prev_limit = limit; + } + edac_dbg(0, "No RIR entry for %llx\n", res->addr); + return false; + +rir_found: + rank_addr = res->chan_addr >> shift; + rank_addr /= SKX_RIR_WAYS(rirway); + rank_addr <<= shift; + rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); + + res->rank_address = rank_addr; + idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); + + SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); + res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); + chan_rank = SKX_RIR_CHAN_RANK(rirlv); + res->channel_rank = chan_rank; + res->dimm = chan_rank / 4; + res->rank = chan_rank % 4; + + edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n", + res->addr, res->dimm, res->rank, + res->channel_rank, res->rank_address); + return true; +} + +static u8 skx_close_row[] = { + 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 +}; +static u8 skx_close_column[] = { + 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 +}; +static u8 skx_open_row[] = { + 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 +}; +static u8 skx_open_column[] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 +}; +static u8 skx_open_fine_column[] = { + 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 +}; + +static int skx_bits(u64 addr, int nbits, u8 *bits) +{ + int i, res = 0; + + for (i = 0; i < nbits; i++) + res |= ((addr >> bits[i]) & 1) << i; + return res; +} + +static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) +{ + int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); + + if (do_xor) + ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); + + return ret; +} + +static bool skx_mad_decode(struct decoded_addr *r) +{ + struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; + int bg0 = dimm->fine_grain_bank ? 6 : 13; + + if (dimm->close_pg) { + r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); + r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); + r->column |= 0x400; /* C10 is autoprecharge, always set */ + r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); + r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); + } else { + r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); + if (dimm->fine_grain_bank) + r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); + else + r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); + r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); + r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); + } + r->row &= (1u << dimm->rowbits) - 1; + + edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n", + r->addr, r->row, r->column, r->bank_address, + r->bank_group); + return true; +} + +static bool skx_decode(struct decoded_addr *res) +{ + + return skx_sad_decode(res) && skx_tad_decode(res) && + skx_rir_decode(res) && skx_mad_decode(res); +} + +#ifdef CONFIG_EDAC_DEBUG +/* + * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr. + * Write an address to this file to exercise the address decode + * logic in this driver. + */ +static struct dentry *skx_test; +static u64 skx_fake_addr; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct decoded_addr res; + + res.addr = val; + skx_decode(&res); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +static struct dentry *mydebugfs_create(const char *name, umode_t mode, + struct dentry *parent, u64 *value) +{ + return debugfs_create_file(name, mode, parent, value, &fops_u64_wo); +} + +static void setup_skx_debug(void) +{ + skx_test = debugfs_create_dir("skx_edac_test", NULL); + mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr); +} + +static void teardown_skx_debug(void) +{ + debugfs_remove_recursive(skx_test); +} +#else +static void setup_skx_debug(void) +{ +} + +static void teardown_skx_debug(void) +{ +} +#endif /*CONFIG_EDAC_DEBUG*/ + +static void skx_mce_output_error(struct mem_ctl_info *mci, + const struct mce *m, + struct decoded_addr *res) +{ + enum hw_event_mc_err_type tp_event; + char *type, *optype, msg[256]; + bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); + bool overflow = GET_BITFIELD(m->status, 62, 62); + bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); + bool recoverable; + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); + u32 mscod = GET_BITFIELD(m->status, 16, 31); + u32 errcode = GET_BITFIELD(m->status, 0, 15); + u32 optypenum = GET_BITFIELD(m->status, 4, 6); + + recoverable = GET_BITFIELD(m->status, 56, 56); + + if (uncorrected_error) { + if (ripv) { + type = "FATAL"; + tp_event = HW_EVENT_ERR_FATAL; + } else { + type = "NON_FATAL"; + tp_event = HW_EVENT_ERR_UNCORRECTED; + } + } else { + type = "CORRECTED"; + tp_event = HW_EVENT_ERR_CORRECTED; + } + + /* + * According with Table 15-9 of the Intel Architecture spec vol 3A, + * memory errors should fit in this mask: + * 000f 0000 1mmm cccc (binary) + * where: + * f = Correction Report Filtering Bit. If 1, subsequent errors + * won't be shown + * mmm = error type + * cccc = channel + * If the mask doesn't match, report an error to the parsing logic + */ + if (!((errcode & 0xef80) == 0x80)) { + optype = "Can't parse: it is not a mem"; + } else { + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; + } + } + + snprintf(msg, sizeof(msg), + "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, + res->socket, res->imc, res->rank, + res->bank_group, res->bank_address, res->row, res->column); + + edac_dbg(0, "%s\n", msg); + + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, + res->channel, res->dimm, -1, + optype, msg); +} + +static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct decoded_addr res; + struct mem_ctl_info *mci; + char *type; + + if (get_edac_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + + /* ignore unless this is memory related with an address */ + if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) + return NOTIFY_DONE; + + res.addr = mce->addr; + if (!skx_decode(&res)) + return NOTIFY_DONE; + mci = res.dev->imc[res.imc].mci; + + if (mce->mcgstatus & MCG_STATUS_MCIP) + type = "Exception"; + else + type = "Event"; + + skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); + + skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " + "Bank %d: %016Lx\n", mce->extcpu, type, + mce->mcgstatus, mce->bank, mce->status); + skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); + skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); + skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); + + skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " + "%u APIC %x\n", mce->cpuvendor, mce->cpuid, + mce->time, mce->socketid, mce->apicid); + + skx_mce_output_error(mci, mce, &res); + + return NOTIFY_DONE; +} + +static struct notifier_block skx_mce_dec = { + .notifier_call = skx_mce_check_error, +}; + +static void skx_remove(void) +{ + int i, j; + struct skx_dev *d, *tmp; + + edac_dbg(0, "\n"); + + list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { + list_del(&d->list); + for (i = 0; i < NUM_IMC; i++) { + skx_unregister_mci(&d->imc[i]); + for (j = 0; j < NUM_CHANNELS; j++) + pci_dev_put(d->imc[i].chan[j].cdev); + } + pci_dev_put(d->util_all); + pci_dev_put(d->sad_all); + + kfree(d); + } +} + +/* + * skx_init: + * make sure we are running on the correct cpu model + * search for all the devices we need + * check which DIMMs are present. + */ +int __init skx_init(void) +{ + const struct x86_cpu_id *id; + const struct munit *m; + int rc = 0, i; + u8 mc = 0, src_id, node_id; + struct skx_dev *d; + + edac_dbg(2, "\n"); + + id = x86_match_cpu(skx_cpuids); + if (!id) + return -ENODEV; + + rc = skx_get_hi_lo(); + if (rc) + return rc; + + rc = get_all_bus_mappings(); + if (rc < 0) + goto fail; + if (rc == 0) { + edac_dbg(2, "No memory controllers found\n"); + return -ENODEV; + } + + for (m = skx_all_munits; m->did; m++) { + rc = get_all_munits(m); + if (rc < 0) + goto fail; + if (rc != m->per_socket * skx_num_sockets) { + edac_dbg(2, "Expected %d, got %d of %x\n", + m->per_socket * skx_num_sockets, rc, m->did); + rc = -ENODEV; + goto fail; + } + } + + list_for_each_entry(d, &skx_edac_list, list) { + src_id = get_src_id(d); + node_id = skx_get_node_id(d); + edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); + for (i = 0; i < NUM_IMC; i++) { + d->imc[i].mc = mc++; + d->imc[i].lmc = i; + d->imc[i].src_id = src_id; + d->imc[i].node_id = node_id; + rc = skx_register_mci(&d->imc[i]); + if (rc < 0) + goto fail; + } + } + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + setup_skx_debug(); + + mce_register_decode_chain(&skx_mce_dec); + + return 0; +fail: + skx_remove(); + return rc; +} + +static void __exit skx_exit(void) +{ + edac_dbg(2, "\n"); + mce_unregister_decode_chain(&skx_mce_dec); + skx_remove(); + teardown_skx_debug(); +} + +module_init(skx_init); +module_exit(skx_exit); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1eb4c0..8c704c86597b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -646,9 +646,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); void amdgpu_gart_fini(struct amdgpu_device *adev); -void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages); -int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 49de92600074..10b5ddf2c588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); -#if 1 - /* This is a temporary hack until the D3 cold support - * makes it upstream. The ATPX power_control method seems - * to still work on even if the system should be using - * the new standardized hybrid D3 cold ACPI interface. - */ - atpx->functions.power_cntl = true; -#else atpx->functions.power_cntl = false; -#endif atpx->is_hybrid = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 921bce2df0b0..0feea347f680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) * Unbinds the requested pages from the gart page table and * replaces them with the dummy page (all asics). */ -void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; @@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, * (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, +int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, struct page **pagelist, dma_addr_t *dma_addr, uint32_t flags) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8868d7..4aa993d19018 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; } -error: fence_put(fence); + +error: return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc48df4..80120fa4092c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amd_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs); if (r) - return r; + goto err; vm->page_directory_fence = NULL; @@ -1565,6 +1565,9 @@ error_free_page_directory: error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); +err: + drm_free_large(vm->page_tables); + return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index e621eba63126..a7d3cb3fead0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, sizeof(u32)) + inx; pr_debug("kfd: get kernel queue doorbell\n" - " doorbell offset == 0x%08d\n" + " doorbell offset == 0x%08X\n" " kernel address == 0x%08lX\n", *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ce54e985d91b..0a06f9120b5a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) /* Sometimes user space wants everything disabled, so don't steal the * display if there's a master. */ - if (lockless_dereference(dev->master)) + if (READ_ONCE(dev->master)) return false; drm_for_each_crtc(crtc, dev) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 87ef34150d46..b382cf505262 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (ret < 0) return ret; - mutex_lock(&gpu->lock); - /* * TODO * @@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (unlikely(event == ~0U)) { DRM_ERROR("no free event\n"); ret = -EBUSY; - goto out_unlock; + goto out_pm_put; } fence = etnaviv_gpu_fence_alloc(gpu); if (!fence) { event_free(gpu, event); ret = -ENOMEM; - goto out_unlock; + goto out_pm_put; } + mutex_lock(&gpu->lock); + gpu->event[event].fence = fence; submit->fence = fence->seqno; gpu->active_fence = submit->fence; @@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, hangcheck_timer_reset(gpu); ret = 0; -out_unlock: mutex_unlock(&gpu->lock); +out_pm_put: etnaviv_gpu_pm_put(gpu); return ret; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f939074abc..20fe9d52e256 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1854,6 +1854,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; + struct drm_modeset_acquire_ctx reset_ctx; struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 11681501d7b1..a77ce9983f69 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_shmem_pread(dev, obj, args, file); /* pread for non shmem backed objects */ - if (ret == -EFAULT || ret == -ENODEV) + if (ret == -EFAULT || ret == -ENODEV) { + intel_runtime_pm_get(to_i915(dev)); ret = i915_gem_gtt_pread(dev, obj, args->size, args->offset, args->data_ptr); + intel_runtime_pm_put(to_i915(dev)); + } out: drm_gem_object_unreference(&obj->base); @@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT) { + if (ret == -EFAULT || ret == -ENOSPC) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); else if (i915_gem_object_has_struct_page(obj)) @@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) } intel_ring_init_seqno(engine, engine->last_submitted_seqno); + + engine->i915->gt.active_engines &= ~intel_engine_flag(engine); } void i915_gem_reset(struct drm_device *dev) @@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev) for_each_engine(engine, dev_priv) i915_gem_reset_engine_cleanup(engine); + mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); i915_gem_context_reset(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 10f1e32767e6..7a30af79d799 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); } i915_gem_cleanup_stolen(dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce14fe09d962..5c06413ae0e6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1536,6 +1536,7 @@ enum skl_disp_power_wells { #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) /* Balance leg disable bits */ #define BALANCE_LEG_DISABLE_SHIFT 23 +#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) /* * Fence registers diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 6700a7be7f78..d32f586f9c05 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) return; + i915_audio_component_get_power(dev); + /* * Enable/disable generating the codec wake signal, overriding the * internal logic to generate the codec wake to controller. @@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, I915_WRITE(HSW_AUD_CHICKENBIT, tmp); usleep_range(1000, 1500); } + + i915_audio_component_put_power(dev); } /* Get CDCLK in kHz */ @@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, !IS_HASWELL(dev_priv)) return 0; + i915_audio_component_get_power(dev); mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ intel_encoder = dev_priv->dig_port_map[port]; @@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, unlock: mutex_unlock(&dev_priv->av_mutex); + i915_audio_component_put_power(dev); return err; } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dd1d6fe12297..1a7efac65fd5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x1 }, { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, @@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, @@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) } } +static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) +{ + int n_hdmi_entries; + int hdmi_level; + int hdmi_default_entry; + + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) + return hdmi_level; + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); + hdmi_default_entry = 8; + } else if (IS_BROADWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } else if (IS_HASWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + hdmi_default_entry = 6; + } else { + WARN(1, "ddi translation table missing\n"); + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } + + /* Choose a good default if VBT is badly populated */ + if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || + hdmi_level >= n_hdmi_entries) + hdmi_level = hdmi_default_entry; + + return hdmi_level; +} + /* * Starting with Haswell, DDI port buffers must be programmed with correct * values in advance. The buffer values are different for FDI and DP modes, @@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; - int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, + int i, n_hdmi_entries, n_dp_entries, n_edp_entries, size; int hdmi_level; enum port port; @@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) const struct ddi_buf_trans *ddi_translations; port = intel_ddi_get_encoder_port(encoder); - hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + hdmi_level = intel_ddi_hdmi_level(dev_priv, port); if (IS_BROXTON(dev_priv)) { if (encoder->type != INTEL_OUTPUT_HDMI) @@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) skl_get_buf_trans_edp(dev_priv, &n_edp_entries); ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); - hdmi_default_entry = 8; /* If we're boosting the current, set bit 31 of trans1 */ if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || dev_priv->vbt.ddi_port_info[port].dp_boost_level) @@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; @@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) ddi_translations_hdmi = hsw_ddi_translations_hdmi; n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - hdmi_default_entry = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; @@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } switch (encoder->type) { @@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) if (encoder->type != INTEL_OUTPUT_HDMI) return; - /* Choose a good default if VBT is badly populated */ - if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || - hdmi_level >= n_hdmi_entries) - hdmi_level = hdmi_default_entry; - /* Entry 9 is for HDMI: */ I915_WRITE(DDI_BUF_TRANS_LO(port, i), ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); @@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) +static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + enum port port, uint8_t iboost) { + u32 tmp; + + tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); + tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); + if (iboost) + tmp |= iboost << BALANCE_LEG_SHIFT(port); + else + tmp |= BALANCE_LEG_DISABLE(port); + I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); +} + +static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->port; + int type = encoder->type; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; int n_entries; - u32 reg; /* VBT may override standard boost values */ dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; @@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, return; } - reg = I915_READ(DISPIO_CR_TX_BMU_CR0); - reg &= ~BALANCE_LEG_MASK(port); - reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); - - if (iboost) - reg |= iboost << BALANCE_LEG_SHIFT(port); - else - reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); + _skl_ddi_set_iboost(dev_priv, port, iboost); - I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); + if (port == PORT_A && intel_dig_port->max_lanes == 4) + _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, @@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + skl_ddi_set_iboost(encoder, level); else if (IS_BROXTON(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); @@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int level = intel_ddi_hdmi_level(dev_priv, port); + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(intel_encoder, level); intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dcf93b3d4fb6..2a751b6e0253 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev) for_each_crtc(dev, crtc) { struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state; - - drm_modeset_lock_crtc(crtc, &plane->base); - plane_state = to_intel_plane_state(plane->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); if (plane_state->visible) plane->update_plane(&plane->base, to_intel_crtc_state(crtc->state), plane_state); + } +} + +static int +__intel_display_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i, ret; + + intel_modeset_setup_hw_state(dev); + i915_redisable_vga(dev); - drm_modeset_unlock_crtc(crtc); + if (!state) + return 0; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; } + + /* ignore any reset values/BIOS leftovers in the WM registers */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + + ret = drm_atomic_commit(state); + + WARN_ON(ret == -EDEADLK); + return ret; } void intel_prepare_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state; + int ret; + /* no reset support for gen2 */ if (IS_GEN2(dev_priv)) return; - /* reset doesn't touch the display */ + /* + * Need mode_config.mutex so that we don't + * trample ongoing ->detect() and whatnot. + */ + mutex_lock(&dev->mode_config.mutex); + drm_modeset_acquire_init(ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(dev, ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(ctx); + } + + /* reset doesn't touch the display, but flips might get nuked anyway, */ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(&dev_priv->drm); + state = drm_atomic_helper_duplicate_state(dev, ctx); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + state = NULL; + DRM_ERROR("Duplicating state failed with %i\n", ret); + goto err; + } + + ret = drm_atomic_helper_disable_all(dev, ctx); + if (ret) { + DRM_ERROR("Suspending crtc's failed with %i\n", ret); + goto err; + } + + dev_priv->modeset_restore_state = state; + state->acquire_ctx = ctx; + return; + +err: + drm_atomic_state_free(state); } void intel_finish_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state = dev_priv->modeset_restore_state; + int ret; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) if (IS_GEN2(dev_priv)) return; + dev_priv->modeset_restore_state = NULL; + /* reset doesn't touch the display */ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* @@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(&dev_priv->drm); - return; - } - - /* - * The display has been reset as well, - * so need a full re-initialization. - */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_update_primary_planes(dev); + } else { + /* + * The display has been reset as well, + * so need a full re-initialization. + */ + intel_runtime_pm_disable_interrupts(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(&dev_priv->drm); + intel_modeset_init_hw(dev); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(&dev_priv->drm); + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); - intel_hpd_init(dev_priv); + intel_hpd_init(dev_priv); + } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + mutex_unlock(&dev->mode_config.mutex); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -16156,9 +16231,10 @@ void intel_display_resume(struct drm_device *dev) struct drm_atomic_state *state = dev_priv->modeset_restore_state; struct drm_modeset_acquire_ctx ctx; int ret; - bool setup = false; dev_priv->modeset_restore_state = NULL; + if (state) + state->acquire_ctx = &ctx; /* * This is a cludge because with real atomic modeset mode_config.mutex @@ -16169,43 +16245,17 @@ void intel_display_resume(struct drm_device *dev) mutex_lock(&dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, 0); -retry: - ret = drm_modeset_lock_all_ctx(dev, &ctx); - - if (ret == 0 && !setup) { - setup = true; - - intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); - } - - if (ret == 0 && state) { - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - state->acquire_ctx = &ctx; - - /* ignore any reset values/BIOS leftovers in the WM registers */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - /* - * Force recalculation even if we restore - * current state. With fast modeset this may not result - * in a modeset when the state is compatible. - */ - crtc_state->mode_changed = true; - } - - ret = drm_atomic_commit(state); - } + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret != -EDEADLK) + break; - if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); - goto retry; } + if (!ret) + ret = __intel_display_resume(dev, state); + drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6a7ad3ed1463..3836a1c79714 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (i915.enable_fbc >= 0) return !!i915.enable_fbc; + if (!HAS_FBC(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv)) return 1; return 0; } +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ +#ifdef CONFIG_INTEL_IOMMU + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_iommu_gfx_mapped && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } +#endif + + return false; +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; fbc->work.scheduled = false; + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->has_fbc = false; + i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 97ba6c8cf907..d5deb58a2128 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3344,6 +3344,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, plane_bytes_per_line *= 4; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); plane_blocks_per_line /= 4; + } else if (tiling == DRM_FORMAT_MOD_NONE) { + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; } else { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); } @@ -6574,9 +6576,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - if (IS_CHERRYVIEW(dev_priv)) - return; - else if (IS_VALLEYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cca7792f26d5..1d3161bbea24 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2)); - /* WaInsertDummyPushConstPs:bxt */ - if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); - /* WaInsertDummyPushConstPs:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:kbl */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 23ac8041c562..294de4549922 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -2,6 +2,9 @@ config DRM_MEDIATEK tristate "DRM Support for Mediatek SoCs" depends on DRM depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) + depends on COMMON_CLK + depends on HAVE_ARM_SMCCC + depends on OF select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6de342861202..ddef0d494084 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); -#if 1 - /* This is a temporary hack until the D3 cold support - * makes it upstream. The ATPX power_control method seems - * to still work on even if the system should be using - * the new standardized hybrid D3 cold ACPI interface. - */ - atpx->functions.power_cntl = true; -#else atpx->functions.power_cntl = false; -#endif atpx->is_hybrid = true; } diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 730d84028260..d0203a115eff 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -491,7 +491,7 @@ struct it87_sio_data { struct it87_data { const struct attribute_group *groups[7]; enum chips type; - u16 features; + u32 features; u8 peci_mask; u8 old_peci_mask; diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index f23372669f77..1bb97f658b47 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -38,6 +38,7 @@ #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ #define AUTOSUSPEND_TIMEOUT 2000 +#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256 /* AT91 TWI register definitions */ #define AT91_TWI_CR 0x0000 /* Control Register */ @@ -141,6 +142,7 @@ struct at91_twi_dev { unsigned twi_cwgr_reg; struct at91_twi_pdata *pdata; bool use_dma; + bool use_alt_cmd; bool recv_len_abort; u32 fifo_size; struct at91_twi_dma dma; @@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) /* send stop when last byte has been written */ if (--dev->buf_len == 0) - if (!dev->pdata->has_alt_cmd) + if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); @@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data) * we just have to enable TXCOMP one. */ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); - if (!dev->pdata->has_alt_cmd) + if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); } @@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) } /* send stop if second but last byte has been read */ - if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) + if (!dev->use_alt_cmd && dev->buf_len == 1) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); @@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data) dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), dev->buf_len, DMA_FROM_DEVICE); - if (!dev->pdata->has_alt_cmd) { + if (!dev->use_alt_cmd) { /* The last two bytes have to be read without using dma */ dev->buf += dev->buf_len - 2; dev->buf_len = 2; @@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev) struct dma_chan *chan_rx = dma->chan_rx; size_t buf_len; - buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; + buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; dma->direction = DMA_FROM_DEVICE; /* Keep in mind that we won't use dma to read the last two bytes */ @@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) unsigned start_flags = AT91_TWI_START; /* if only one byte is to be read, immediately stop transfer */ - if (!has_alt_cmd && dev->buf_len <= 1 && + if (!dev->use_alt_cmd && dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN)) start_flags |= AT91_TWI_STOP; at91_twi_write(dev, AT91_TWI_CR, start_flags); @@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) int ret; unsigned int_addr_flag = 0; struct i2c_msg *m_start = msg; - bool is_read, use_alt_cmd = false; + bool is_read; dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); @@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) at91_twi_write(dev, AT91_TWI_IADR, internal_address); } + dev->use_alt_cmd = false; is_read = (m_start->flags & I2C_M_RD); if (dev->pdata->has_alt_cmd) { - if (m_start->len > 0) { + if (m_start->len > 0 && + m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); at91_twi_write(dev, AT91_TWI_ACR, AT91_TWI_ACR_DATAL(m_start->len) | ((is_read) ? AT91_TWI_ACR_DIR : 0)); - use_alt_cmd = true; + dev->use_alt_cmd = true; } else { at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); } @@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag | - ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); + ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); dev->buf_len = m_start->len; dev->buf = m_start->buf; diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 19c843828fe2..95f7cac76f89 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) if (status & BIT(IS_M_START_BUSY_SHIFT)) { iproc_i2c->xfer_is_done = 1; - complete_all(&iproc_i2c->done); + complete(&iproc_i2c->done); } writel(status, iproc_i2c->base + IS_OFFSET); diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index ac9f47679c3a..f98743277e3c 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c @@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) dev->base + TXFCR_OFFSET); writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); - complete_all(&dev->done); + complete(&dev->done); return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 3f5a4d71d3bf..385b57bfcb38 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) return IRQ_NONE; brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); - complete_all(&dev->done); + complete(&dev->done); dev_dbg(dev->device, "isr handled"); return IRQ_HANDLED; diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index a0d95ff682ae..2d5ff86398d0 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], msg->outsize = request_len; msg->insize = response_len; - result = cros_ec_cmd_xfer(bus->ec, msg); + result = cros_ec_cmd_xfer_status(bus->ec, msg); if (result < 0) { dev_err(dev, "Error transferring EC i2c message %d\n", result); goto exit; diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 71d3929adf54..76e28980904f 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c) meson_i2c_add_token(i2c, TOKEN_STOP); } else { i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); } } @@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) dev_dbg(i2c->dev, "error bit set\n"); i2c->error = -ENXIO; i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); goto out; } @@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) break; case STATE_STOP: i2c->state = STATE_IDLE; - complete_all(&i2c->done); + complete(&i2c->done); break; case STATE_IDLE: break; diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index dfa7a4b4a91d..ac88a524143e 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, if (!clock_frequency_present) { dev_err(&pdev->dev, "Missing required parameter 'opencores,ip-clock-frequency'\n"); + clk_disable_unprepare(i2c->clk); return -ENODEV; } i2c->ip_clock_khz = clock_frequency / 1000; @@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", i2c->reg_io_width); - return -EINVAL; + ret = -EINVAL; + goto err_clk; } } ret = ocores_init(&pdev->dev, i2c); if (ret) - return ret; + goto err_clk; init_waitqueue_head(&i2c->wait); ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, pdev->name, i2c); if (ret) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); - return ret; + goto err_clk; } /* hook up driver to tree */ @@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) ret = i2c_add_adapter(&i2c->adap); if (ret) { dev_err(&pdev->dev, "Failed to add adapter\n"); - return ret; + goto err_clk; } /* add in known devices to the bus */ @@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev) } return 0; + +err_clk: + clk_disable_unprepare(i2c->clk); + return ret; } static int ocores_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 8de073aed001..215ac87f606d 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); if (!adap) { ret = -ENODEV; - goto err; + goto err_with_revert; } p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); @@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne err_with_put: i2c_put_adapter(adap); + err_with_revert: + of_changeset_revert(&priv->chan[new_chan].chgset); err: dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); return ret; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 96de97a46079..a7aa0e76eebd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -137,6 +137,7 @@ struct iommu_dev_data { bool pri_tlp; /* PASID TLB required for PPR completions */ u32 errata; /* Bitmap for errata to apply */ + bool use_vapic; /* Enable device to use vapic mode */ }; /* @@ -707,14 +708,74 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) } } +#ifdef CONFIG_IRQ_REMAP +static int (*iommu_ga_log_notifier)(u32); + +int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)) +{ + iommu_ga_log_notifier = notifier; + + return 0; +} +EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier); + +static void iommu_poll_ga_log(struct amd_iommu *iommu) +{ + u32 head, tail, cnt = 0; + + if (iommu->ga_log == NULL) + return; + + head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET); + + while (head != tail) { + volatile u64 *raw; + u64 log_entry; + + raw = (u64 *)(iommu->ga_log + head); + cnt++; + + /* Avoid memcpy function-call overhead */ + log_entry = *raw; + + /* Update head pointer of hardware ring-buffer */ + head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE; + writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + + /* Handle GA entry */ + switch (GA_REQ_TYPE(log_entry)) { + case GA_GUEST_NR: + if (!iommu_ga_log_notifier) + break; + + pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n", + __func__, GA_DEVID(log_entry), + GA_TAG(log_entry)); + + if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0) + pr_err("AMD-Vi: GA log notifier failed.\n"); + break; + default: + break; + } + } +} +#endif /* CONFIG_IRQ_REMAP */ + +#define AMD_IOMMU_INT_MASK \ + (MMIO_STATUS_EVT_INT_MASK | \ + MMIO_STATUS_PPR_INT_MASK | \ + MMIO_STATUS_GALOG_INT_MASK) + irqreturn_t amd_iommu_int_thread(int irq, void *data) { struct amd_iommu *iommu = (struct amd_iommu *) data; u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) { - /* Enable EVT and PPR interrupts again */ - writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK), + while (status & AMD_IOMMU_INT_MASK) { + /* Enable EVT and PPR and GA interrupts again */ + writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & MMIO_STATUS_EVT_INT_MASK) { @@ -727,6 +788,13 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) iommu_poll_ppr_log(iommu); } +#ifdef CONFIG_IRQ_REMAP + if (status & MMIO_STATUS_GALOG_INT_MASK) { + pr_devel("AMD-Vi: Processing IOMMU GA Log\n"); + iommu_poll_ga_log(iommu); + } +#endif + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 @@ -2948,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, if (!iommu) return; +#ifdef CONFIG_IRQ_REMAP + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && + (dom->type == IOMMU_DOMAIN_UNMANAGED)) + dev_data->use_vapic = 0; +#endif + iommu_completion_wait(iommu); } @@ -2973,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, ret = attach_device(dev, domain); +#ifdef CONFIG_IRQ_REMAP + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { + if (dom->type == IOMMU_DOMAIN_UNMANAGED) + dev_data->use_vapic = 1; + else + dev_data->use_vapic = 0; + } +#endif + iommu_completion_wait(iommu); return ret; @@ -3511,34 +3594,6 @@ EXPORT_SYMBOL(amd_iommu_device_info); * *****************************************************************************/ -union irte { - u32 val; - struct { - u32 valid : 1, - no_fault : 1, - int_type : 3, - rq_eoi : 1, - dm : 1, - rsvd_1 : 1, - destination : 8, - vector : 8, - rsvd_2 : 8; - } fields; -}; - -struct irq_2_irte { - u16 devid; /* Device ID for IRTE table */ - u16 index; /* Index into IRTE table*/ -}; - -struct amd_ir_data { - struct irq_2_irte irq_2_irte; - union irte irte_entry; - union { - struct msi_msg msi_entry; - }; -}; - static struct irq_chip amd_ir_chip; #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) @@ -3560,8 +3615,6 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) amd_iommu_dev_table[devid].data[2] = dte; } -#define IRTE_ALLOCATED (~1U) - static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) { struct irq_remap_table *table = NULL; @@ -3607,13 +3660,18 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) goto out; } - memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32)); + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + memset(table->table, 0, + MAX_IRQS_PER_TABLE * sizeof(u32)); + else + memset(table->table, 0, + (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2))); if (ioapic) { int i; for (i = 0; i < 32; ++i) - table->table[i] = IRTE_ALLOCATED; + iommu->irte_ops->set_allocated(table, i); } irq_lookup_table[devid] = table; @@ -3639,6 +3697,10 @@ static int alloc_irq_index(u16 devid, int count) struct irq_remap_table *table; unsigned long flags; int index, c; + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + if (!iommu) + return -ENODEV; table = get_irq_table(devid, false); if (!table) @@ -3650,14 +3712,14 @@ static int alloc_irq_index(u16 devid, int count) for (c = 0, index = table->min_index; index < MAX_IRQS_PER_TABLE; ++index) { - if (table->table[index] == 0) + if (!iommu->irte_ops->is_allocated(table, index)) c += 1; else c = 0; if (c == count) { for (; c != 0; --c) - table->table[index - c + 1] = IRTE_ALLOCATED; + iommu->irte_ops->set_allocated(table, index - c + 1); index -= count - 1; goto out; @@ -3672,7 +3734,42 @@ out: return index; } -static int modify_irte(u16 devid, int index, union irte irte) +static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, + struct amd_ir_data *data) +{ + struct irq_remap_table *table; + struct amd_iommu *iommu; + unsigned long flags; + struct irte_ga *entry; + + iommu = amd_iommu_rlookup_table[devid]; + if (iommu == NULL) + return -EINVAL; + + table = get_irq_table(devid, false); + if (!table) + return -ENOMEM; + + spin_lock_irqsave(&table->lock, flags); + + entry = (struct irte_ga *)table->table; + entry = &entry[index]; + entry->lo.fields_remap.valid = 0; + entry->hi.val = irte->hi.val; + entry->lo.val = irte->lo.val; + entry->lo.fields_remap.valid = 1; + if (data) + data->ref = entry; + + spin_unlock_irqrestore(&table->lock, flags); + + iommu_flush_irt(iommu, devid); + iommu_completion_wait(iommu); + + return 0; +} + +static int modify_irte(u16 devid, int index, union irte *irte) { struct irq_remap_table *table; struct amd_iommu *iommu; @@ -3687,7 +3784,7 @@ static int modify_irte(u16 devid, int index, union irte irte) return -ENOMEM; spin_lock_irqsave(&table->lock, flags); - table->table[index] = irte.val; + table->table[index] = irte->val; spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); @@ -3711,13 +3808,146 @@ static void free_irte(u16 devid, int index) return; spin_lock_irqsave(&table->lock, flags); - table->table[index] = 0; + iommu->irte_ops->clear_allocated(table, index); spin_unlock_irqrestore(&table->lock, flags); iommu_flush_irt(iommu, devid); iommu_completion_wait(iommu); } +static void irte_prepare(void *entry, + u32 delivery_mode, u32 dest_mode, + u8 vector, u32 dest_apicid, int devid) +{ + union irte *irte = (union irte *) entry; + + irte->val = 0; + irte->fields.vector = vector; + irte->fields.int_type = delivery_mode; + irte->fields.destination = dest_apicid; + irte->fields.dm = dest_mode; + irte->fields.valid = 1; +} + +static void irte_ga_prepare(void *entry, + u32 delivery_mode, u32 dest_mode, + u8 vector, u32 dest_apicid, int devid) +{ + struct irte_ga *irte = (struct irte_ga *) entry; + struct iommu_dev_data *dev_data = search_dev_data(devid); + + irte->lo.val = 0; + irte->hi.val = 0; + irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; + irte->lo.fields_remap.int_type = delivery_mode; + irte->lo.fields_remap.dm = dest_mode; + irte->hi.fields.vector = vector; + irte->lo.fields_remap.destination = dest_apicid; + irte->lo.fields_remap.valid = 1; +} + +static void irte_activate(void *entry, u16 devid, u16 index) +{ + union irte *irte = (union irte *) entry; + + irte->fields.valid = 1; + modify_irte(devid, index, irte); +} + +static void irte_ga_activate(void *entry, u16 devid, u16 index) +{ + struct irte_ga *irte = (struct irte_ga *) entry; + + irte->lo.fields_remap.valid = 1; + modify_irte_ga(devid, index, irte, NULL); +} + +static void irte_deactivate(void *entry, u16 devid, u16 index) +{ + union irte *irte = (union irte *) entry; + + irte->fields.valid = 0; + modify_irte(devid, index, irte); +} + +static void irte_ga_deactivate(void *entry, u16 devid, u16 index) +{ + struct irte_ga *irte = (struct irte_ga *) entry; + + irte->lo.fields_remap.valid = 0; + modify_irte_ga(devid, index, irte, NULL); +} + +static void irte_set_affinity(void *entry, u16 devid, u16 index, + u8 vector, u32 dest_apicid) +{ + union irte *irte = (union irte *) entry; + + irte->fields.vector = vector; + irte->fields.destination = dest_apicid; + modify_irte(devid, index, irte); +} + +static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, + u8 vector, u32 dest_apicid) +{ + struct irte_ga *irte = (struct irte_ga *) entry; + struct iommu_dev_data *dev_data = search_dev_data(devid); + + if (!dev_data || !dev_data->use_vapic) { + irte->hi.fields.vector = vector; + irte->lo.fields_remap.destination = dest_apicid; + irte->lo.fields_remap.guest_mode = 0; + modify_irte_ga(devid, index, irte, NULL); + } +} + +#define IRTE_ALLOCATED (~1U) +static void irte_set_allocated(struct irq_remap_table *table, int index) +{ + table->table[index] = IRTE_ALLOCATED; +} + +static void irte_ga_set_allocated(struct irq_remap_table *table, int index) +{ + struct irte_ga *ptr = (struct irte_ga *)table->table; + struct irte_ga *irte = &ptr[index]; + + memset(&irte->lo.val, 0, sizeof(u64)); + memset(&irte->hi.val, 0, sizeof(u64)); + irte->hi.fields.vector = 0xff; +} + +static bool irte_is_allocated(struct irq_remap_table *table, int index) +{ + union irte *ptr = (union irte *)table->table; + union irte *irte = &ptr[index]; + + return irte->val != 0; +} + +static bool irte_ga_is_allocated(struct irq_remap_table *table, int index) +{ + struct irte_ga *ptr = (struct irte_ga *)table->table; + struct irte_ga *irte = &ptr[index]; + + return irte->hi.fields.vector != 0; +} + +static void irte_clear_allocated(struct irq_remap_table *table, int index) +{ + table->table[index] = 0; +} + +static void irte_ga_clear_allocated(struct irq_remap_table *table, int index) +{ + struct irte_ga *ptr = (struct irte_ga *)table->table; + struct irte_ga *irte = &ptr[index]; + + memset(&irte->lo.val, 0, sizeof(u64)); + memset(&irte->hi.val, 0, sizeof(u64)); +} + static int get_devid(struct irq_alloc_info *info) { int devid = -1; @@ -3802,19 +4032,17 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, { struct irq_2_irte *irte_info = &data->irq_2_irte; struct msi_msg *msg = &data->msi_entry; - union irte *irte = &data->irte_entry; struct IO_APIC_route_entry *entry; + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + if (!iommu) + return; data->irq_2_irte.devid = devid; data->irq_2_irte.index = index + sub_handle; - - /* Setup IRTE for IOMMU */ - irte->val = 0; - irte->fields.vector = irq_cfg->vector; - irte->fields.int_type = apic->irq_delivery_mode; - irte->fields.destination = irq_cfg->dest_apicid; - irte->fields.dm = apic->irq_dest_mode; - irte->fields.valid = 1; + iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode, + apic->irq_dest_mode, irq_cfg->vector, + irq_cfg->dest_apicid, devid); switch (info->type) { case X86_IRQ_ALLOC_TYPE_IOAPIC: @@ -3845,12 +4073,32 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data, } } +struct amd_irte_ops irte_32_ops = { + .prepare = irte_prepare, + .activate = irte_activate, + .deactivate = irte_deactivate, + .set_affinity = irte_set_affinity, + .set_allocated = irte_set_allocated, + .is_allocated = irte_is_allocated, + .clear_allocated = irte_clear_allocated, +}; + +struct amd_irte_ops irte_128_ops = { + .prepare = irte_ga_prepare, + .activate = irte_ga_activate, + .deactivate = irte_ga_deactivate, + .set_affinity = irte_ga_set_affinity, + .set_allocated = irte_ga_set_allocated, + .is_allocated = irte_ga_is_allocated, + .clear_allocated = irte_ga_clear_allocated, +}; + static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct irq_alloc_info *info = arg; struct irq_data *irq_data; - struct amd_ir_data *data; + struct amd_ir_data *data = NULL; struct irq_cfg *cfg; int i, ret, devid; int index = -1; @@ -3902,6 +4150,16 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, if (!data) goto out_free_data; + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + data->entry = kzalloc(sizeof(union irte), GFP_KERNEL); + else + data->entry = kzalloc(sizeof(struct irte_ga), + GFP_KERNEL); + if (!data->entry) { + kfree(data); + goto out_free_data; + } + irq_data->hwirq = (devid << 16) + i; irq_data->chip_data = data; irq_data->chip = &amd_ir_chip; @@ -3938,6 +4196,7 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, data = irq_data->chip_data; irte_info = &data->irq_2_irte; free_irte(irte_info->devid, irte_info->index); + kfree(data->entry); kfree(data); } } @@ -3949,8 +4208,11 @@ static void irq_remapping_activate(struct irq_domain *domain, { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; + struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; - modify_irte(irte_info->devid, irte_info->index, data->irte_entry); + if (iommu) + iommu->irte_ops->activate(data->entry, irte_info->devid, + irte_info->index); } static void irq_remapping_deactivate(struct irq_domain *domain, @@ -3958,10 +4220,11 @@ static void irq_remapping_deactivate(struct irq_domain *domain, { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; - union irte entry; + struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; - entry.val = 0; - modify_irte(irte_info->devid, irte_info->index, data->irte_entry); + if (iommu) + iommu->irte_ops->deactivate(data->entry, irte_info->devid, + irte_info->index); } static struct irq_domain_ops amd_ir_domain_ops = { @@ -3971,6 +4234,70 @@ static struct irq_domain_ops amd_ir_domain_ops = { .deactivate = irq_remapping_deactivate, }; +static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) +{ + struct amd_iommu *iommu; + struct amd_iommu_pi_data *pi_data = vcpu_info; + struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; + struct amd_ir_data *ir_data = data->chip_data; + struct irte_ga *irte = (struct irte_ga *) ir_data->entry; + struct irq_2_irte *irte_info = &ir_data->irq_2_irte; + struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); + + /* Note: + * This device has never been set up for guest mode. + * we should not modify the IRTE + */ + if (!dev_data || !dev_data->use_vapic) + return 0; + + pi_data->ir_data = ir_data; + + /* Note: + * SVM tries to set up for VAPIC mode, but we are in + * legacy mode. So, we force legacy mode instead. + */ + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) { + pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n", + __func__); + pi_data->is_guest_mode = false; + } + + iommu = amd_iommu_rlookup_table[irte_info->devid]; + if (iommu == NULL) + return -EINVAL; + + pi_data->prev_ga_tag = ir_data->cached_ga_tag; + if (pi_data->is_guest_mode) { + /* Setting */ + irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); + irte->hi.fields.vector = vcpu_pi_info->vector; + irte->lo.fields_vapic.guest_mode = 1; + irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; + + ir_data->cached_ga_tag = pi_data->ga_tag; + } else { + /* Un-Setting */ + struct irq_cfg *cfg = irqd_cfg(data); + + irte->hi.val = 0; + irte->lo.val = 0; + irte->hi.fields.vector = cfg->vector; + irte->lo.fields_remap.guest_mode = 0; + irte->lo.fields_remap.destination = cfg->dest_apicid; + irte->lo.fields_remap.int_type = apic->irq_delivery_mode; + irte->lo.fields_remap.dm = apic->irq_dest_mode; + + /* + * This communicates the ga_tag back to the caller + * so that it can do all the necessary clean up. + */ + ir_data->cached_ga_tag = 0; + } + + return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); +} + static int amd_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -3978,8 +4305,12 @@ static int amd_ir_set_affinity(struct irq_data *data, struct irq_2_irte *irte_info = &ir_data->irq_2_irte; struct irq_cfg *cfg = irqd_cfg(data); struct irq_data *parent = data->parent_data; + struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; int ret; + if (!iommu) + return -ENODEV; + ret = parent->chip->irq_set_affinity(parent, mask, force); if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) return ret; @@ -3988,9 +4319,8 @@ static int amd_ir_set_affinity(struct irq_data *data, * Atomically updates the IRTE with the new destination, vector * and flushes the interrupt entry cache. */ - ir_data->irte_entry.fields.vector = cfg->vector; - ir_data->irte_entry.fields.destination = cfg->dest_apicid; - modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry); + iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, + irte_info->index, cfg->vector, cfg->dest_apicid); /* * After this point, all the interrupts will start arriving @@ -4012,6 +4342,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) static struct irq_chip amd_ir_chip = { .irq_ack = ir_ack_apic_edge, .irq_set_affinity = amd_ir_set_affinity, + .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity, .irq_compose_msi_msg = ir_compose_msi_msg, }; @@ -4026,4 +4357,43 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) return 0; } + +int amd_iommu_update_ga(int cpu, bool is_run, void *data) +{ + unsigned long flags; + struct amd_iommu *iommu; + struct irq_remap_table *irt; + struct amd_ir_data *ir_data = (struct amd_ir_data *)data; + int devid = ir_data->irq_2_irte.devid; + struct irte_ga *entry = (struct irte_ga *) ir_data->entry; + struct irte_ga *ref = (struct irte_ga *) ir_data->ref; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || + !ref || !entry || !entry->lo.fields_vapic.guest_mode) + return 0; + + iommu = amd_iommu_rlookup_table[devid]; + if (!iommu) + return -ENODEV; + + irt = get_irq_table(devid, false); + if (!irt) + return -ENODEV; + + spin_lock_irqsave(&irt->lock, flags); + + if (ref->lo.fields_vapic.guest_mode) { + if (cpu >= 0) + ref->lo.fields_vapic.destination = cpu; + ref->lo.fields_vapic.is_run = is_run; + barrier(); + } + + spin_unlock_irqrestore(&irt->lock, flags); + + iommu_flush_irt(iommu, devid); + iommu_completion_wait(iommu); + return 0; +} +EXPORT_SYMBOL(amd_iommu_update_ga); #endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 59741ead7e15..cd1713631a4a 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -84,6 +84,7 @@ #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 +#define LOOP_TIMEOUT 100000 /* * ACPI table definitions * @@ -145,6 +146,8 @@ struct ivmd_header { bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; +int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; + static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; static int amd_iommu_target_ivhd_type; @@ -386,6 +389,10 @@ static void iommu_disable(struct amd_iommu *iommu) iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + /* Disable IOMMU GA_LOG */ + iommu_feature_disable(iommu, CONTROL_GALOG_EN); + iommu_feature_disable(iommu, CONTROL_GAINT_EN); + /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); } @@ -671,6 +678,99 @@ static void __init free_ppr_log(struct amd_iommu *iommu) free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); } +static void free_ga_log(struct amd_iommu *iommu) +{ +#ifdef CONFIG_IRQ_REMAP + if (iommu->ga_log) + free_pages((unsigned long)iommu->ga_log, + get_order(GA_LOG_SIZE)); + if (iommu->ga_log_tail) + free_pages((unsigned long)iommu->ga_log_tail, + get_order(8)); +#endif +} + +static int iommu_ga_log_enable(struct amd_iommu *iommu) +{ +#ifdef CONFIG_IRQ_REMAP + u32 status, i; + + if (!iommu->ga_log) + return -EINVAL; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + + /* Check if already running */ + if (status & (MMIO_STATUS_GALOG_RUN_MASK)) + return 0; + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); + iommu_feature_enable(iommu, CONTROL_GALOG_EN); + + for (i = 0; i < LOOP_TIMEOUT; ++i) { + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & (MMIO_STATUS_GALOG_RUN_MASK)) + break; + } + + if (i >= LOOP_TIMEOUT) + return -EINVAL; +#endif /* CONFIG_IRQ_REMAP */ + return 0; +} + +#ifdef CONFIG_IRQ_REMAP +static int iommu_init_ga_log(struct amd_iommu *iommu) +{ + u64 entry; + + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) + return 0; + + iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(GA_LOG_SIZE)); + if (!iommu->ga_log) + goto err_out; + + iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(8)); + if (!iommu->ga_log_tail) + goto err_out; + + entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, + &entry, sizeof(entry)); + entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, + &entry, sizeof(entry)); + writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); + + return 0; +err_out: + free_ga_log(iommu); + return -EINVAL; +} +#endif /* CONFIG_IRQ_REMAP */ + +static int iommu_init_ga(struct amd_iommu *iommu) +{ + int ret = 0; + +#ifdef CONFIG_IRQ_REMAP + /* Note: We have already checked GASup from IVRS table. + * Now, we need to make sure that GAMSup is set. + */ + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && + !iommu_feature(iommu, FEATURE_GAM_VAPIC)) + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; + + ret = iommu_init_ga_log(iommu); +#endif /* CONFIG_IRQ_REMAP */ + + return ret; +} + static void iommu_enable_gt(struct amd_iommu *iommu) { if (!iommu_feature(iommu, FEATURE_GT)) @@ -1144,6 +1244,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) free_command_buffer(iommu); free_event_buffer(iommu); free_ppr_log(iommu); + free_ga_log(iommu); iommu_unmap_mmio_space(iommu); } @@ -1258,6 +1359,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; + if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; case 0x11: case 0x40: @@ -1265,6 +1368,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->mmio_phys_end = MMIO_REG_END_OFFSET; else iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; + if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; break; default: return -EINVAL; @@ -1432,6 +1537,7 @@ static int iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc, low, high; + int ret; iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), iommu->devid & 0xff); @@ -1488,6 +1594,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) return -ENOMEM; + ret = iommu_init_ga(iommu); + if (ret) + return ret; + if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; @@ -1545,16 +1655,24 @@ static void print_iommu_info(void) dev_name(&iommu->dev->dev), iommu->cap_ptr); if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - pr_info("AMD-Vi: Extended features: "); + pr_info("AMD-Vi: Extended features (%#llx):\n", + iommu->features); for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } + + if (iommu->features & FEATURE_GAM_VAPIC) + pr_cont(" GA_vAPIC"); + pr_cont("\n"); } } - if (irq_remapping_enabled) + if (irq_remapping_enabled) { pr_info("AMD-Vi: Interrupt remapping enabled\n"); + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) + pr_info("AMD-Vi: virtual APIC enabled\n"); + } } static int __init amd_iommu_init_pci(void) @@ -1645,6 +1763,8 @@ enable_faults: if (iommu->ppr_log != NULL) iommu_feature_enable(iommu, CONTROL_PPFINT_EN); + iommu_ga_log_enable(iommu); + return 0; } @@ -1862,6 +1982,24 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu) iommu->stored_addr_lo | 1); } +static void iommu_enable_ga(struct amd_iommu *iommu) +{ +#ifdef CONFIG_IRQ_REMAP + switch (amd_iommu_guest_ir) { + case AMD_IOMMU_GUEST_IR_VAPIC: + iommu_feature_enable(iommu, CONTROL_GAM_EN); + /* Fall through */ + case AMD_IOMMU_GUEST_IR_LEGACY_GA: + iommu_feature_enable(iommu, CONTROL_GA_EN); + iommu->irte_ops = &irte_128_ops; + break; + default: + iommu->irte_ops = &irte_32_ops; + break; + } +#endif +} + /* * This function finally enables all IOMMUs found in the system after * they have been initialized @@ -1877,9 +2015,15 @@ static void early_enable_iommus(void) iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); + iommu_enable_ga(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } + +#ifdef CONFIG_IRQ_REMAP + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) + amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); +#endif } static void enable_iommus_v2(void) @@ -1905,6 +2049,11 @@ static void disable_iommus(void) for_each_iommu(iommu) iommu_disable(iommu); + +#ifdef CONFIG_IRQ_REMAP + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) + amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); +#endif } /* @@ -2059,7 +2208,7 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; acpi_size ivrs_size; acpi_status status; - int i, ret = 0; + int i, remap_cache_sz, ret = 0; if (!amd_iommu_detected) return -ENODEV; @@ -2157,10 +2306,14 @@ static int __init early_amd_iommu_init(void) * remapping tables. */ ret = -ENOMEM; + if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); + else + remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", - MAX_IRQS_PER_TABLE * sizeof(u32), - IRQ_TABLE_ALIGNMENT, - 0, NULL); + remap_cache_sz, + IRQ_TABLE_ALIGNMENT, + 0, NULL); if (!amd_iommu_irq_cache) goto out; @@ -2413,6 +2566,21 @@ static int __init parse_amd_iommu_dump(char *str) return 1; } +static int __init parse_amd_iommu_intr(char *str) +{ + for (; *str; ++str) { + if (strncmp(str, "legacy", 6) == 0) { + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; + break; + } + if (strncmp(str, "vapic", 5) == 0) { + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; + break; + } + } + return 1; +} + static int __init parse_amd_iommu_options(char *str) { for (; *str; ++str) { @@ -2521,6 +2689,7 @@ static int __init parse_ivrs_acpihid(char *str) __setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu=", parse_amd_iommu_options); +__setup("amd_iommu_intr=", parse_amd_iommu_intr); __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); __setup("ivrs_acpihid", parse_ivrs_acpihid); diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 0bd9eb374462..faa3b4895cf0 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -38,6 +38,7 @@ extern int amd_iommu_enable(void); extern void amd_iommu_disable(void); extern int amd_iommu_reenable(int); extern int amd_iommu_enable_faulting(void); +extern int amd_iommu_guest_ir; /* IOMMUv2 specific functions */ struct iommu_domain; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index caf5e3822715..fa766eefd590 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -22,6 +22,7 @@ #include <linux/types.h> #include <linux/mutex.h> +#include <linux/msi.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/pci.h> @@ -69,6 +70,8 @@ #define MMIO_EXCL_LIMIT_OFFSET 0x0028 #define MMIO_EXT_FEATURES 0x0030 #define MMIO_PPR_LOG_OFFSET 0x0038 +#define MMIO_GA_LOG_BASE_OFFSET 0x00e0 +#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 @@ -76,6 +79,8 @@ #define MMIO_STATUS_OFFSET 0x2020 #define MMIO_PPR_HEAD_OFFSET 0x2030 #define MMIO_PPR_TAIL_OFFSET 0x2038 +#define MMIO_GA_HEAD_OFFSET 0x2040 +#define MMIO_GA_TAIL_OFFSET 0x2048 #define MMIO_CNTR_CONF_OFFSET 0x4000 #define MMIO_CNTR_REG_OFFSET 0x40000 #define MMIO_REG_END_OFFSET 0x80000 @@ -92,6 +97,7 @@ #define FEATURE_GA (1ULL<<7) #define FEATURE_HE (1ULL<<8) #define FEATURE_PC (1ULL<<9) +#define FEATURE_GAM_VAPIC (1ULL<<21) #define FEATURE_PASID_SHIFT 32 #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) @@ -110,6 +116,9 @@ #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) +#define MMIO_STATUS_GALOG_RUN_MASK (1 << 8) +#define MMIO_STATUS_GALOG_OVERFLOW_MASK (1 << 9) +#define MMIO_STATUS_GALOG_INT_MASK (1 << 10) /* event logging constants */ #define EVENT_ENTRY_SIZE 0x10 @@ -146,6 +155,10 @@ #define CONTROL_PPFINT_EN 0x0eULL #define CONTROL_PPR_EN 0x0fULL #define CONTROL_GT_EN 0x10ULL +#define CONTROL_GA_EN 0x11ULL +#define CONTROL_GAM_EN 0x19ULL +#define CONTROL_GALOG_EN 0x1CULL +#define CONTROL_GAINT_EN 0x1DULL #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) #define CTRL_INV_TO_NONE 0 @@ -224,6 +237,19 @@ #define PPR_REQ_FAULT 0x01 +/* Constants for GA Log handling */ +#define GA_LOG_ENTRIES 512 +#define GA_LOG_SIZE_SHIFT 56 +#define GA_LOG_SIZE_512 (0x8ULL << GA_LOG_SIZE_SHIFT) +#define GA_ENTRY_SIZE 8 +#define GA_LOG_SIZE (GA_ENTRY_SIZE * GA_LOG_ENTRIES) + +#define GA_TAG(x) (u32)(x & 0xffffffffULL) +#define GA_DEVID(x) (u16)(((x) >> 32) & 0xffffULL) +#define GA_REQ_TYPE(x) (((x) >> 60) & 0xfULL) + +#define GA_GUEST_NR 0x1 + #define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 @@ -329,6 +355,12 @@ #define IOMMU_CAP_NPCACHE 26 #define IOMMU_CAP_EFR 27 +/* IOMMU Feature Reporting Field (for IVHD type 10h */ +#define IOMMU_FEAT_GASUP_SHIFT 6 + +/* IOMMU Extended Feature Register (EFR) */ +#define IOMMU_EFR_GASUP_SHIFT 7 + #define MAX_DOMAIN_ID 65536 /* Protection domain flags */ @@ -400,6 +432,7 @@ struct amd_iommu_fault { struct iommu_domain; struct irq_domain; +struct amd_irte_ops; /* * This structure contains generic data for IOMMU protection domains @@ -490,6 +523,12 @@ struct amd_iommu { /* Base of the PPR log, if present */ u8 *ppr_log; + /* Base of the GA log, if present */ + u8 *ga_log; + + /* Tail of the GA log, if present */ + u8 *ga_log_tail; + /* true if interrupts for this IOMMU are already enabled */ bool int_enabled; @@ -523,6 +562,8 @@ struct amd_iommu { #ifdef CONFIG_IRQ_REMAP struct irq_domain *ir_domain; struct irq_domain *msi_domain; + + struct amd_irte_ops *irte_ops; #endif }; @@ -681,4 +722,112 @@ static inline int get_hpet_devid(int id) return -EINVAL; } +enum amd_iommu_intr_mode_type { + AMD_IOMMU_GUEST_IR_LEGACY, + + /* This mode is not visible to users. It is used when + * we cannot fully enable vAPIC and fallback to only support + * legacy interrupt remapping via 128-bit IRTE. + */ + AMD_IOMMU_GUEST_IR_LEGACY_GA, + AMD_IOMMU_GUEST_IR_VAPIC, +}; + +#define AMD_IOMMU_GUEST_IR_GA(x) (x == AMD_IOMMU_GUEST_IR_VAPIC || \ + x == AMD_IOMMU_GUEST_IR_LEGACY_GA) + +#define AMD_IOMMU_GUEST_IR_VAPIC(x) (x == AMD_IOMMU_GUEST_IR_VAPIC) + +union irte { + u32 val; + struct { + u32 valid : 1, + no_fault : 1, + int_type : 3, + rq_eoi : 1, + dm : 1, + rsvd_1 : 1, + destination : 8, + vector : 8, + rsvd_2 : 8; + } fields; +}; + +union irte_ga_lo { + u64 val; + + /* For int remapping */ + struct { + u64 valid : 1, + no_fault : 1, + /* ------ */ + int_type : 3, + rq_eoi : 1, + dm : 1, + /* ------ */ + guest_mode : 1, + destination : 8, + rsvd : 48; + } fields_remap; + + /* For guest vAPIC */ + struct { + u64 valid : 1, + no_fault : 1, + /* ------ */ + ga_log_intr : 1, + rsvd1 : 3, + is_run : 1, + /* ------ */ + guest_mode : 1, + destination : 8, + rsvd2 : 16, + ga_tag : 32; + } fields_vapic; +}; + +union irte_ga_hi { + u64 val; + struct { + u64 vector : 8, + rsvd_1 : 4, + ga_root_ptr : 40, + rsvd_2 : 12; + } fields; +}; + +struct irte_ga { + union irte_ga_lo lo; + union irte_ga_hi hi; +}; + +struct irq_2_irte { + u16 devid; /* Device ID for IRTE table */ + u16 index; /* Index into IRTE table*/ +}; + +struct amd_ir_data { + u32 cached_ga_tag; + struct irq_2_irte irq_2_irte; + struct msi_msg msi_entry; + void *entry; /* Pointer to union irte or struct irte_ga */ + void *ref; /* Pointer to the actual irte */ +}; + +struct amd_irte_ops { + void (*prepare)(void *, u32, u32, u8, u32, int); + void (*activate)(void *, u16, u16); + void (*deactivate)(void *, u16, u16); + void (*set_affinity)(void *, u16, u16, u8, u32); + void *(*get)(struct irq_remap_table *, int); + void (*set_allocated)(struct irq_remap_table *, int); + bool (*is_allocated)(struct irq_remap_table *, int); + void (*clear_allocated)(struct irq_remap_table *, int); +}; + +#ifdef CONFIG_IRQ_REMAP +extern struct amd_irte_ops irte_32_ops; +extern struct amd_irte_ops irte_128_ops; +#endif + #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f3690f..00c8a08d56e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (!iovad) return; - put_iova_domain(iovad); + if (iovad->granule) + put_iova_domain(iovad); kfree(iovad); domain->iova_cookie = NULL; } @@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) } } -static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, +static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, dma_addr_t dma_limit) { + struct iova_domain *iovad = domain->iova_cookie; unsigned long shift = iova_shift(iovad); unsigned long length = iova_align(iovad, size) >> shift; + if (domain->geometry.force_aperture) + dma_limit = min(dma_limit, domain->geometry.aperture_end); /* * Enforce size-alignment to be safe - there could perhaps be an * attribute to control this per-device, or at least per-domain... @@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, if (!pages) return NULL; - iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); + iova = __alloc_iova(domain, size, dev->coherent_dma_mask); if (!iova) goto out_free_pages; @@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, phys_addr_t phys = page_to_phys(page) + offset; size_t iova_off = iova_offset(iovad, phys); size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); + struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); if (!iova) return DMA_ERROR_CODE; @@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); + iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); if (!iova) goto out_restore_sg; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9ed0a8462ccf..3dab13b4a211 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -55,19 +55,19 @@ struct mtk_iommu_data { bool enable_4GB; }; -static int compare_of(struct device *dev, void *data) +static inline int compare_of(struct device *dev, void *data) { return dev->of_node == data; } -static int mtk_iommu_bind(struct device *dev) +static inline int mtk_iommu_bind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); return component_bind_all(dev, &data->smi_imu); } -static void mtk_iommu_unbind(struct device *dev) +static inline void mtk_iommu_unbind(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e9784b4e0ac..eedba67b0e3e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -181,7 +181,7 @@ struct crypt_config { u8 key[0]; }; -#define MIN_IOS 16 +#define MIN_IOS 64 static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1b9795d75ef8..8abde6b8cedc 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -191,7 +191,6 @@ struct raid_dev { #define RT_FLAG_RS_BITMAP_LOADED 2 #define RT_FLAG_UPDATE_SBS 3 #define RT_FLAG_RESHAPE_RS 4 -#define RT_FLAG_KEEP_RS_FROZEN 5 /* Array elements of 64 bit needed for rebuild/failed disk bits */ #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) @@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) { unsigned long min_region_size = rs->ti->len / (1 << 21); + if (rs_is_raid0(rs)) + return 0; + if (!region_size) { /* * Choose a reasonable default. All figures in sectors. @@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs) rebuild_cnt++; switch (rs->raid_type->level) { + case 0: + break; case 1: if (rebuild_cnt >= rs->md.raid_disks) goto too_many; @@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) case 0: break; default: + /* + * We have to keep any raid0 data/metadata device pairs or + * the MD raid0 personality will fail to start the array. + */ + if (rs_is_raid0(rs)) + continue; + dev = container_of(rdev, struct raid_dev, rdev); if (dev->meta_dev) dm_put_device(ti, dev->meta_dev); @@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs) } else { /* Process raid1 without delta_disks */ mddev->raid_disks = rs->raid_disks; - set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); reshape = false; } } else { @@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs) if (reshape) { set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); - set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); } else if (mddev->raid_disks < rs->raid_disks) /* Create new superblocks and bitmaps, if any new disks */ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); @@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); - set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); /* Takeover ain't recovery, so disable recovery */ rs_setup_recovery(rs, MaxSector); rs_set_new(rs); @@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti) { struct raid_set *rs = ti->private; - if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { - if (!rs->md.suspended) - mddev_suspend(&rs->md); - rs->md.ro = 1; - } + if (!rs->md.suspended) + mddev_suspend(&rs->md); + + rs->md.ro = 1; } static void attempt_restore_of_faulty_devices(struct raid_set *rs) { int i; - uint64_t failed_devices, cleared_failed_devices = 0; + uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS]; unsigned long flags; + bool cleared = false; struct dm_raid_superblock *sb; + struct mddev *mddev = &rs->md; struct md_rdev *r; + /* RAID personalities have to provide hot add/remove methods or we need to bail out. */ + if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) + return; + + memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); + for (i = 0; i < rs->md.raid_disks; i++) { r = &rs->dev[i].rdev; if (test_bit(Faulty, &r->flags) && r->sb_page && @@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) * ourselves. */ if ((r->raid_disk >= 0) && - (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) + (mddev->pers->hot_remove_disk(mddev, r) != 0)) /* Failed to revive this device, try next */ continue; @@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) clear_bit(Faulty, &r->flags); clear_bit(WriteErrorSeen, &r->flags); clear_bit(In_sync, &r->flags); - if (r->mddev->pers->hot_add_disk(r->mddev, r)) { + if (mddev->pers->hot_add_disk(mddev, r)) { r->raid_disk = -1; r->saved_raid_disk = -1; r->flags = flags; } else { r->recovery_offset = 0; - cleared_failed_devices |= 1 << i; + set_bit(i, (void *) cleared_failed_devices); + cleared = true; } } } - if (cleared_failed_devices) { + + /* If any failed devices could be cleared, update all sbs failed_devices bits */ + if (cleared) { + uint64_t failed_devices[DISKS_ARRAY_ELEMS]; + rdev_for_each(r, &rs->md) { sb = page_address(r->sb_page); - failed_devices = le64_to_cpu(sb->failed_devices); - failed_devices &= ~cleared_failed_devices; - sb->failed_devices = cpu_to_le64(failed_devices); + sb_retrieve_failed_devices(sb, failed_devices); + + for (i = 0; i < DISKS_ARRAY_ELEMS; i++) + failed_devices[i] &= ~cleared_failed_devices[i]; + + sb_update_failed_devices(sb, failed_devices); } } } @@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti) * devices are reachable again. */ attempt_restore_of_faulty_devices(rs); - } else { - mddev->ro = 0; - mddev->in_sync = 0; + } - /* - * When passing in flags to the ctr, we expect userspace - * to reset them because they made it to the superblocks - * and reload the mapping anyway. - * - * -> only unfreeze recovery in case of a table reload or - * we'll have a bogus recovery/reshape position - * retrieved from the superblock by the ctr because - * the ongoing recovery/reshape will change it after read. - */ - if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags)) - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + mddev->ro = 0; + mddev->in_sync = 0; - if (mddev->suspended) - mddev_resume(mddev); - } + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + + if (mddev->suspended) + mddev_resume(mddev); } static struct target_type raid_target = { diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 4ace1da17db8..6c25213ab38c 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c @@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) struct path_info *pi = NULL; struct dm_path *current_path = NULL; + local_irq_save(flags); current_path = *this_cpu_ptr(s->current_path); if (current_path) { percpu_counter_dec(&s->repeat_count); - if (percpu_counter_read_positive(&s->repeat_count) > 0) + if (percpu_counter_read_positive(&s->repeat_count) > 0) { + local_irq_restore(flags); return current_path; + } } - spin_lock_irqsave(&s->lock, flags); + spin_lock(&s->lock); if (!list_empty(&s->valid_paths)) { pi = list_entry(s->valid_paths.next, struct path_info, list); list_move_tail(&pi->list, &s->valid_paths); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f276fa30ba6..217e8da0628c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " "0 for slow, 1 for fast"); module_param(ad_select, charp, 0); -MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " +MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " "0 for stable (default), 1 for bandwidth, " "2 for count"); module_param(min_links, int, 0); diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8f12bddd5dc9..a0b453ea34c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h @@ -258,7 +258,7 @@ * BCM5325 and BCM5365 share most definitions below */ #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) -#define ARLTBL_MAC_MASK 0xffffffffffff +#define ARLTBL_MAC_MASK 0xffffffffffffULL #define ARLTBL_VID_S 48 #define ARLTBL_VID_MASK_25 0xff #define ARLTBL_VID_MASK 0xfff diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d36aedde8cb9..d1d9d3cf9139 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) return err; } +#ifdef CONFIG_NET_DSA_HWMON static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, int reg) { @@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, return ret; } +#endif static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f463b8de..18bb9556dd00 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } +#else + return -ENODEV; #endif } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3040df..b0da9693f28a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) - return PTR_ERR(priv->regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_put_node; + } dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7cf529..659261218d9f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = TG3_RSS_MAX_NUM_QS; } - /* The first interrupt vector only - * handles link interrupts. - */ - info->data -= 1; return 0; default: @@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) } if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (!ec->rx_coalesce_usecs) || (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8958d4..b6fcf10621b6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -403,11 +403,11 @@ #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 +#define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 -#define MACB_CAPS_JUMBO 0x00000010 /* Bit manipulation macros */ #define MACB_BIT(name) \ diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba719..f45385f5c6e5 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1299,6 +1299,7 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); + unsigned int irq_flags = irq_get_trigger_type(dev->irq); if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); @@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) /* If there is no IRQ type specified, tell the user that this is a * problem */ - if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) + if (irq_flags == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); + irq_flags |= IRQF_SHARED; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, - dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f2564b..1e1eb92998fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, - {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, + {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d54599e4..6b03c8553e59 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX - | FLAG2_DMA_BURST, + | FLAG2_DMA_BURST + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, @@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 - | FLAG2_NO_DISABLE_RX, + | FLAG2_NO_DISABLE_RX + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d6d2..879cca47b021 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) #define FLAG2_DFLT_CRC_STRIPPING BIT(12) #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) +#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8d58..f3aaca743ea3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, + | FLAG2_HAS_EEE + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02f443958f31..7017281ba2dc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) } /** + * e1000e_sanitize_systim - sanitize raw cycle counter reads + * @hw: pointer to the HW structure + * @systim: cycle_t value read, sanitized and returned + * + * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue. + **/ +static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) +{ + u64 time_delta, rem, temp; + cycle_t systim_next; + u32 incvalue; + int i; + + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + /* VMWare users have seen incvalue of zero, don't div / 0 */ + rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) + break; + } + + return systim; +} + +/** * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) * @cc: cyclecounter structure **/ @@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) cc); struct e1000_hw *hw = &adapter->hw; u32 systimel, systimeh; - cycle_t systim, systim_next; + cycle_t systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before * we read SYSTIMH, the value of SYSTIMH has been incremented and we @@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) systim = (cycle_t)systimel; systim |= (cycle_t)systimeh << 32; - if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { - u64 time_delta, rem, temp; - u32 incvalue; - int i; - - /* errata for 82574/82583 possible bad bits read from SYSTIMH/L - * check to see that the time is incrementing at a reasonable - * rate and is a multiple of incvalue - */ - incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; - for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { - /* latch SYSTIMH on read of SYSTIML */ - systim_next = (cycle_t)er32(SYSTIML); - systim_next |= (cycle_t)er32(SYSTIMH) << 32; - - time_delta = systim_next - systim; - temp = time_delta; - /* VMWare users have seen incvalue of zero, don't div / 0 */ - rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); - - systim = systim_next; + if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) + systim = e1000e_sanitize_systim(hw, systim); - if ((time_delta < E1000_82574_SYSTIM_EPSILON) && - (rem == 0)) - break; - } - } return systim; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81c99e1be708..c6ac7a61812f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { + int i, tc_unused = 0; u8 num_tc = 0; - int i; + u8 ret = 0; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority - * and use the traffic class index to get the - * number of traffic classes enabled + * and create a bitmask of enabled TCs */ - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - if (dcbcfg->etscfg.prioritytable[i] > num_tc) - num_tc = dcbcfg->etscfg.prioritytable[i]; - } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); - /* Traffic class index starts from zero so - * increment to return the actual count + /* Now scan the bitmask to check for + * contiguous TCs starting with TC0 */ - return num_tc + 1; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + pr_err("Non-contiguous TC - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = 1; + } + } + + /* There is always at least TC0 */ + if (!ret) + ret = 1; + + return ret; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e61b647f5f2a..336c103ae374 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) } } - shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); @@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) { __le64 *regval = (__le64 *)va; + struct igb_adapter *adapter = q_vector->adapter; + int adjust = 0; /* The timestamp is recorded in little endian format. * DWORD: 0 1 2 3 * Field: Reserved Reserved SYSTIML SYSTIMH */ - igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); } /** @@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, } } skb_hwtstamps(skb)->hwtstamp = - ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); /* Update the last_rx_timestamp timer in order to enable watchdog check * for error case of latched timestamp on a dropped packet. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5418c69a7463..b4f03748adc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: @@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) /* fall through */ case ixgbe_mac_82598EB: /* legacy case, we can just disable VLAN filtering */ - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + vlnctrl &= ~IXGBE_VLNCTRL_VFE; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); return; } @@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) /* Set flag so we don't redo unnecessary work */ adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; + /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + /* Add PF to all active pools */ for (i = IXGBE_VLVF_ENTRIES; --i;) { u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); @@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 vlnctrl, i; + /* Set VLAN filtering to enabled */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: @@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) break; /* fall through */ case ixgbe_mac_82598EB: - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - vlnctrl |= IXGBE_VLNCTRL_VFE; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); return; } @@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; + LIST_HEAD(actions); int err; if (tc_no_actions(exts)) return -EINVAL; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Drop action */ if (is_tcf_gact_shot(a)) { @@ -9517,6 +9525,7 @@ skip_sriov: /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXALL | diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3afb994..f1609542adf1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) case PHY_INTERFACE_MODE_MII: ge_mode = 1; break; - case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_REVMII: ge_mode = 2; break; + case PHY_INTERFACE_MODE_RMII: + if (!mac->id) + goto err_phy; + ge_mode = 3; + break; default: - dev_err(eth->dev, "invalid phy_mode\n"); - return -1; + goto err_phy; } /* put the gmac into the right mode */ @@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac) mac->phy_dev->autoneg = AUTONEG_ENABLE; mac->phy_dev->speed = 0; mac->phy_dev->duplex = 0; + + if (of_phy_is_fixed_link(mac->of_node)) + mac->phy_dev->supported |= + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause; mac->phy_dev->advertising = mac->phy_dev->supported | ADVERTISED_Autoneg; phy_start_aneg(mac->phy_dev); + of_node_put(np); + return 0; + +err_phy: + of_node_put(np); + dev_err(eth->dev, "invalid phy_mode\n"); + return -EINVAL; } static int mtk_mdio_init(struct mtk_eth *eth) @@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, return &ring->buf[idx]; } -static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(dev, + dma_unmap_single(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(dev, + dma_unmap_page(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); @@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (skb_vlan_tag_present(skb)) txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - mapped_addr = dma_map_single(&dev->dev, skb->data, + mapped_addr = dma_map_single(eth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); @@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, n_desc++; frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, frag_map_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) goto err_dma; if (i == nr_frags - 1 && @@ -679,7 +695,7 @@ err_dma: tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ - mtk_tx_unmap(&dev->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); @@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, netdev->stats.rx_dropped++; goto release_desc; } - dma_addr = dma_map_single(ð->netdev[mac]->dev, + dma_addr = dma_map_single(eth->dev, new_data + NET_SKB_PAD, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; @@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - dma_unmap_single(&netdev->dev, trxd.rxd1, + dma_unmap_single(eth->dev, trxd.rxd1, ring->buf_size, DMA_FROM_DEVICE); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; @@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) done[mac]++; budget--; } - mtk_tx_unmap(eth->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); ring->last_free = desc; atomic_inc(&ring->free_count); @@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->buf) { for (i = 0; i < MTK_DMA_SIZE; i++) - mtk_tx_unmap(eth->dev, &ring->buf[i]); + mtk_tx_unmap(eth, &ring->buf[i]); kfree(ring->buf); ring->buf = NULL; } @@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) goto free_netdev; } spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0f19b01e3fff..dc8b1cb0fdc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 *action, u32 *flow_tag) { const struct tc_action *a; + LIST_HEAD(actions); if (tc_no_actions(exts)) return -EINVAL; @@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; *action = 0; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Only support a single action per rule */ if (*action) return -EINVAL; @@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 *action, u32 *dest_vport) { const struct tc_action *a; + LIST_HEAD(actions); if (tc_no_actions(exts)) return -EINVAL; *action = 0; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Only support a single action per rule */ if (*action) return -EINVAL; @@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; struct tc_action *a; struct mlx5_fc *counter; + LIST_HEAD(actions); u64 bytes; u64 packets; u64 lastuse; @@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); - tc_for_each_action(a, f->exts) + tcf_exts_to_list(f->exts, &actions); + list_for_each_entry(a, &actions, list) tcf_action_stats_update(a, bytes, packets, lastuse); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7ca9201f7dcb..1721098eef13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); */ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); +/* reg_ritr_lb_en + * Loop-back filter enable for unicast packets. + * If the flag is set then loop-back filter for unicast packets is + * implemented on the RIF. Multicast packets are always subject to + * loop-back filtering. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); + /* reg_ritr_virtual_router * Virtual router ID associated with the router interface. * Access: RW @@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_mtu_set(payload, mtu); mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); } @@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload, { MLXSW_REG_ZERO(ralue, payload); mlxsw_reg_ralue_protocol_set(payload, protocol); + mlxsw_reg_ralue_op_set(payload, op); mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); mlxsw_reg_ralue_entry_type_set(payload, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e61500819d..1f8168906811 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) kfree(mlxsw_sp_vport); } -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid) +static int mlxsw_sp_port_add_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; @@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { - netdev_warn(dev, "VID=%d already configured\n", vid); + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) return 0; - } mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); + if (!mlxsw_sp_vport) return -ENOMEM; - } /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit @@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, */ if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to Virtual mode\n"); + if (err) goto err_port_vp_mode_trans; - } } err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); + if (err) goto err_port_vid_learning_set; - } err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); + if (err) goto err_port_add_vid; - } return 0; @@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_fid *f; - int err; /* VLAN 0 is removed from HW filter when device goes down, but * it is reserved in our case, so simply return. @@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - netdev_warn(dev, "VID=%d does not exist\n", vid); + if (WARN_ON(!mlxsw_sp_vport)) return 0; - } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); - return err; - } + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - if (err) { - netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); - return err; - } + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); /* Drop FID reference. If this was the last reference the * resources will be freed. @@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (list_is_singular(&mlxsw_sp_port->vports_list)) { - err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to VLAN mode\n"); - return err; - } - } + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); @@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress) { const struct tc_action *a; + LIST_HEAD(actions); int err; if (!tc_single_action(cls->exts)) { @@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOTSUPP; } - tc_for_each_action(a, cls->exts) { + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) return -ENOTSUPP; @@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->pvid = 1; + + return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); +} + +static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width, u8 lane) { @@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_dcb_init; } + err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", + mlxsw_sp_port->local_port); + goto err_port_pvid_vport_create; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); + mlxsw_sp->ports[local_port] = mlxsw_sp_port; err = register_netdev(dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", @@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_core_port_init; } - err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); - if (err) - goto err_port_vlan_init; - - mlxsw_sp->ports[local_port] = mlxsw_sp_port; return 0; -err_port_vlan_init: - mlxsw_core_port_fini(&mlxsw_sp_port->core_port); err_core_port_init: unregister_netdev(dev); err_register_netdev: + mlxsw_sp->ports[local_port] = NULL; + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); +err_port_pvid_vport_create: + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); err_port_dcb_init: err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: err_port_speed_by_width_set: + mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); err_port_swid_set: err_port_system_port_mapping_set: err_dev_addr_init: @@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; - mlxsw_sp->ports[local_port] = NULL; mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ - mlxsw_sp_port_dcb_fini(mlxsw_sp_port); - mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); + mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); @@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { { .func = mlxsw_sp_rx_listener_func, .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MTUERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_TTLERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LBERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_OSPF, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, .trap_id = MLXSW_TRAP_ID_IP2ME, }, { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f69aa37d1521..ab3feb81bd43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged); -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid); int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, bool set); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b6f3..237418a0e6e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512827..b6ed7f7c531e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, char pfcc_pl[MLXSW_REG_PFCC_LEN]; mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), @@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && - pfc->pfc_en) { + if (pause_en && pfc->pfc_en) { netdev_err(dev, "PAUSE frames already enabled on port\n"); return -EINVAL; } err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, mlxsw_sp_port->dcb.ets->prio_tc, - false, pfc); + pause_en, pfc); if (err) { netdev_err(dev, "Failed to configure port's headroom for PFC\n"); return err; @@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, err_port_pfc_set: __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.ets->prio_tc, pause_en, mlxsw_sp_port->dcb.pfc); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d629231..90bb93b037ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) const struct mlxsw_sp_router_fib4_add_info *info = data; struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; + struct mlxsw_sp_vr *vr = fib_entry->vr; mlxsw_sp_fib_entry_destroy(fib_entry); - mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); + mlxsw_sp_vr_put(mlxsw_sp, vr); kfree(info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a1ad5e6bdfa8..d1b59cdfacc1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) kfree(f); + mlxsw_sp_fid_map(mlxsw_sp, fid, false); + mlxsw_sp_fid_op(mlxsw_sp, fid, false); } @@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, } static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid_begin, u16 vid_end, bool init) + u16 vid_begin, u16 vid_end) { struct net_device *dev = mlxsw_sp_port->dev; u16 vid, pvid; int err; - if (!init && !mlxsw_sp_port->bridged) + if (!mlxsw_sp_port->bridged) return -EINVAL; err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, @@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - if (init) - goto out; - pvid = mlxsw_sp_port->pvid; if (pvid >= vid_begin && pvid <= vid_end) { err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); @@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); -out: /* Changing activity bits only if HW operation succeded */ for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); @@ -1039,8 +1037,8 @@ out: static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan) { - return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, - vlan->vid_begin, vlan->vid_end, false); + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, + vlan->vid_end); } void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) @@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) u16 vid; for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) - __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); } static int @@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_fdb_fini(mlxsw_sp); } -int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct net_device *dev = mlxsw_sp_port->dev; - int err; - - /* Allow only untagged packets to ingress and tag them internally - * with VID 1. - */ - mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, - true); - if (err) { - netdev_err(dev, "Unable to init VLANs\n"); - return err; - } - - /* Add implicit VLAN interface in the device, so that untagged - * packets will be classified to the default vFID. - */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to configure default vFID\n"); - - return err; -} - void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) { mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 470d7696e9fe..ed8e30186400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -56,6 +56,10 @@ enum { MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, + MLXSW_TRAP_ID_MTUERROR = 0x52, + MLXSW_TRAP_ID_TTLERROR = 0x53, + MLXSW_TRAP_ID_LBERROR = 0x54, + MLXSW_TRAP_ID_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f93c0e..226cb08cc055 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) DCBX_APP_SF_ETHTYPE); } +static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + static bool qed_dcbx_app_port(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_PORT); } -static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_DEFAULT); + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); } -static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_TCP_PORT_ISCSI); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } -static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_FCOE); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); } -static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_ROCE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); } -static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_UDP_PORT_TYPE_ROCE_V2); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); +} + +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_UDP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); } static void @@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, static bool qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, u32 app_prio_bitmap, - u16 id, enum dcbx_protocol_type *type) + u16 id, enum dcbx_protocol_type *type, bool ieee) { - if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_FCOE; - } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; - } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ISCSI; - } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; - } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; @@ -194,17 +248,18 @@ static int qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, - u32 pri_tc_tbl, int count, bool dcbx_enabled) + u32 pri_tc_tbl, int count, u8 dcbx_version) { u8 tc, priority_map; enum dcbx_protocol_type type; + bool enable, ieee; u16 protocol_id; int priority; - bool enable; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, - protocol_id, &type)) { + protocol_id, &type, ieee)) { /* ETH always have the enable bit reset, as it gets * vlan information per packet. For other protocols, * should be set according to the dcbx_enabled @@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) struct dcbx_ets_feature *p_ets; struct qed_hw_info *p_info; u32 pri_tc_tbl, flags; - bool dcbx_enabled; + u8 dcbx_version; int num_entries; int rc = 0; - /* If DCBx version is non zero, then negotiation was - * successfuly performed - */ flags = p_hwfn->p_dcbx_info->operational.flags; - dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); p_app = &p_hwfn->p_dcbx_info->operational.features.app; p_tbl = p_app->app_pri_tbl; @@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, - num_entries, dcbx_enabled); + num_entries, dcbx_version); if (rc) return rc; p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); data.pf_id = p_hwfn->rel_pf_id; - data.dcbx_enabled = dcbx_enabled; + data.dcbx_enabled = !!dcbx_version; qed_dcbx_dp_protocol(p_hwfn, &data); @@ -400,7 +452,7 @@ static void qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, - struct qed_dcbx_params *p_params) + struct qed_dcbx_params *p_params, bool ieee) { struct qed_app_entry *entry; u8 pri_map; @@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, DCBX_APP_NUM_ENTRIES); for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_params->app_entry[i]; - entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, - DCBX_APP_SF)); + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + QED_DCBX_SF_IEEE_TCP_UDP_PORT : + QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); entry->prio = ffs(pri_map) - 1; entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PROTOCOL_ID); qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, entry->proto_id, - &entry->proto_type); + &entry->proto_type, ieee); } DP_VERBOSE(p_hwfn, QED_MSG_DCB, @@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); - pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); + pri_map = p_ets->pri_tc_tbl[0]; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; @@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, struct dcbx_app_priority_entry *p_tbl, struct dcbx_ets_feature *p_ets, - u32 pfc, struct qed_dcbx_params *p_params) + u32 pfc, struct qed_dcbx_params *p_params, bool ieee) { - qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); + qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); } @@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, p_feat = &p_hwfn->p_dcbx_info->local_admin.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->local.params); + p_feat->pfc, ¶ms->local.params, false); params->local.valid = true; } @@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, p_feat = &p_hwfn->p_dcbx_info->remote.features; qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->remote.params); + p_feat->pfc, ¶ms->remote.params, false); params->remote.valid = true; } @@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, qed_dcbx_get_common_params(p_hwfn, &p_feat->app, p_feat->app.app_pri_tbl, &p_feat->ets, - p_feat->pfc, ¶ms->operational.params); + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); p_operational->err = err; @@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); p_ets->pri_tc_tbl[0] |= val; } - p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); for (i = 0; i < 2; i++) { p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); @@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, static void qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_feature *p_app, - struct qed_dcbx_params *p_params) + struct qed_dcbx_params *p_params, bool ieee) { u32 *entry; int i; @@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { entry = &p_app->app_pri_tbl[i].entry; - *entry &= ~DCBX_APP_SF_MASK; - if (p_params->app_entry[i].ethtype) - *entry |= ((u32)DCBX_APP_SF_ETHTYPE << - DCBX_APP_SF_SHIFT); - else - *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); + if (ieee) { + *entry &= ~DCBX_APP_SF_IEEE_MASK; + switch (p_params->app_entry[i].sf_ieee) { + case QED_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + } + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; *entry |= ((u32)p_params->app_entry[i].proto_id << DCBX_APP_PROTOCOL_ID_SHIFT); @@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, struct dcbx_local_params *local_admin, struct qed_dcbx_set *params) { + bool ieee = false; + local_admin->flags = 0; memcpy(&local_admin->features, &p_hwfn->p_dcbx_info->operational.features, sizeof(local_admin->features)); - if (params->enabled) + if (params->enabled) { local_admin->config = params->ver_num; - else + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, @@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, - ¶ms->config.params); + ¶ms->config.params, ieee); } int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) break; /* First empty slot */ - if (!entry->proto_id) + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; break; + } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { @@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) (entry->proto_id == app->protocol)) break; /* First empty slot */ - if (!entry->proto_id) + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; break; + } } if (i == QED_DCBX_MAX_APP_PROTOCOL) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592784019994..6f9d3b831a2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { #define DCBX_APP_SF_SHIFT 8 #define DCBX_APP_SF_ETHTYPE 0 #define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 #define DCBX_APP_PROTOCOL_ID_SHIFT 16 }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4f16c7..49bad00a0f8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 64 -#define QLCNIC_LINUX_VERSIONID "5.3.64" +#define _QLCNIC_LINUX_SUBVERSION 65 +#define QLCNIC_LINUX_VERSIONID "5.3.65" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b075..fedd7366713c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -102,7 +102,6 @@ #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 -#define QLCNIC_TX_POLL_BUDGET 128 #define QLCNIC_TCP_HDR_SIZE 20 #define QLCNIC_TCP_TS_OPTION_SIZE 12 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) @@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; - budget = QLCNIC_TX_POLL_BUDGET; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8285..24061b9b92e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -156,10 +156,8 @@ struct qlcnic_vf_info { spinlock_t vlan_list_lock; /* Lock for VLAN list */ }; -struct qlcnic_async_work_list { +struct qlcnic_async_cmd { struct list_head list; - struct work_struct work; - void *ptr; struct qlcnic_cmd_args *cmd; }; @@ -168,7 +166,10 @@ struct qlcnic_back_channel { struct workqueue_struct *bc_trans_wq; struct workqueue_struct *bc_async_wq; struct workqueue_struct *bc_flr_wq; - struct list_head async_list; + struct qlcnic_adapter *adapter; + struct list_head async_cmd_list; + struct work_struct vf_async_work; + spinlock_t queue_lock; /* async_cmd_list queue lock */ }; struct qlcnic_sriov { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..d7107055ec60 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -29,6 +29,7 @@ #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 +static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); @@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } bc->bc_async_wq = wq; - INIT_LIST_HEAD(&bc->async_list); + INIT_LIST_HEAD(&bc->async_cmd_list); + INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); + spin_lock_init(&bc->queue_lock); + bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; @@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { - struct list_head *head = &bc->async_list; - struct qlcnic_async_work_list *entry; + struct list_head *head = &bc->async_cmd_list; + struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); + cancel_work_sync(&bc->vf_async_work); + + spin_lock(&bc->queue_lock); while (!list_empty(head)) { - entry = list_entry(head->next, struct qlcnic_async_work_list, + entry = list_entry(head->next, struct qlcnic_async_cmd, list); - cancel_work_sync(&entry->work); list_del(&entry->list); + kfree(entry->cmd); kfree(entry); } + spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) @@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { - struct qlcnic_async_work_list *entry; - struct qlcnic_adapter *adapter; + struct qlcnic_async_cmd *entry, *tmp; + struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; + struct list_head *head; + LIST_HEAD(del_list); + + bc = container_of(work, struct qlcnic_back_channel, vf_async_work); + head = &bc->async_cmd_list; + + spin_lock(&bc->queue_lock); + list_splice_init(head, &del_list); + spin_unlock(&bc->queue_lock); + + list_for_each_entry_safe(entry, tmp, &del_list, list) { + list_del(&entry->list); + cmd = entry->cmd; + __qlcnic_sriov_issue_cmd(bc->adapter, cmd); + kfree(entry); + } + + if (!list_empty(head)) + queue_work(bc->bc_async_wq, &bc->vf_async_work); - entry = container_of(work, struct qlcnic_async_work_list, work); - adapter = entry->ptr; - cmd = entry->cmd; - __qlcnic_sriov_issue_cmd(adapter, cmd); return; } -static struct qlcnic_async_work_list * -qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +static struct qlcnic_async_cmd * +qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, + struct qlcnic_cmd_args *cmd) { - struct list_head *node; - struct qlcnic_async_work_list *entry = NULL; - u8 empty = 0; + struct qlcnic_async_cmd *entry = NULL; - list_for_each(node, &bc->async_list) { - entry = list_entry(node, struct qlcnic_async_work_list, list); - if (!work_pending(&entry->work)) { - empty = 1; - break; - } - } + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; - if (!empty) { - entry = kzalloc(sizeof(struct qlcnic_async_work_list), - GFP_ATOMIC); - if (entry == NULL) - return NULL; - list_add_tail(&entry->list, &bc->async_list); - } + entry->cmd = cmd; + + spin_lock(&bc->queue_lock); + list_add_tail(&entry->list, &bc->async_cmd_list); + spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, - work_func_t func, void *data, struct qlcnic_cmd_args *cmd) { - struct qlcnic_async_work_list *entry = NULL; + struct qlcnic_async_cmd *entry = NULL; - entry = qlcnic_sriov_get_free_node_async_work(bc); - if (!entry) + entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); + if (!entry) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); return; + } - entry->ptr = data; - entry->cmd = cmd; - INIT_WORK(&entry->work, func); - queue_work(bc->bc_async_wq, &entry->work); + queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, @@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, if (adapter->need_fw_reset) return -EIO; - qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, - adapter, cmd); + qlcnic_sriov_schedule_async_cmd(bc, cmd); + return 0; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f34693eae..f85d605e4560 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) netif_receive_skb(skb); ndev->stats.rx_bytes += len; ndev->stats.rx_packets++; + kmemleak_not_leak(new_skb); } else { ndev->stats.rx_dropped++; new_skb = skb; @@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) kfree_skb(skb); goto err_cleanup; } + kmemleak_not_leak(skb); } /* continue even if we didn't manage to submit all * receive descs diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a0fa..8fd131207ee1 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { static void tsi108_timed_checker(unsigned long dev_ptr); +#ifdef DEBUG static void dump_eth_one(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); @@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) TSI_READ(TSI108_EC_RXESTAT), TSI_READ(TSI108_EC_RXERR), data->rxpending); } +#endif /* Synchronization is needed between the thread and up/down events. * Note that the PHY is accessed through the same registers for both diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 467fb8b4d083..591af71eae56 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -644,12 +644,6 @@ struct netvsc_reconfig { u32 event; }; -struct garp_wrk { - struct work_struct dwrk; - struct net_device *netdev; - struct netvsc_device *netvsc_dev; -}; - /* The context of the netvsc device */ struct net_device_context { /* point back to our device context */ @@ -667,7 +661,6 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ - struct garp_wrk gwrk; struct netvsc_stats __percpu *tx_stats; struct netvsc_stats __percpu *rx_stats; @@ -678,6 +671,15 @@ struct net_device_context { /* the device is going away */ bool start_remove; + + /* State to manage the associated VF interface. */ + struct net_device *vf_netdev; + bool vf_inject; + atomic_t vf_use_cnt; + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 vf_alloc; + /* Serial number of the VF to team with */ + u32 vf_serial; }; /* Per netvsc device */ @@ -733,15 +735,7 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - /* 1: allocated, serial number is valid. 0: not allocated */ - u32 vf_alloc; - /* Serial number of the VF to team with */ - u32 vf_serial; atomic_t open_cnt; - /* State to manage the associated VF interface. */ - bool vf_inject; - struct net_device *vf_netdev; - atomic_t vf_use_cnt; }; static inline struct netvsc_device * diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 20e09174ff62..410fb8e81376 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); - atomic_set(&net_device->vf_use_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; - net_device->vf_netdev = NULL; - net_device->vf_inject = false; - return net_device; } @@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, nvscdev->send_table[i] = tab[i]; } -static void netvsc_send_vf(struct netvsc_device *nvdev, +static void netvsc_send_vf(struct net_device_context *net_device_ctx, struct nvsp_message *nvmsg) { - nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; - nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; + net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; } static inline void netvsc_receive_inband(struct hv_device *hdev, - struct netvsc_device *nvdev, - struct nvsp_message *nvmsg) + struct net_device_context *net_device_ctx, + struct nvsp_message *nvmsg) { switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: @@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: - netvsc_send_vf(nvdev, nvmsg); + netvsc_send_vf(net_device_ctx, nvmsg); break; } } @@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, struct vmpacket_descriptor *desc) { struct nvsp_message *nvmsg; + struct net_device_context *net_device_ctx = netdev_priv(ndev); nvmsg = (struct nvsp_message *)((unsigned long) desc + (desc->offset8 << 3)); @@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, break; case VM_PKT_DATA_INBAND: - netvsc_receive_inband(device, net_device, nvmsg); + netvsc_receive_inband(device, net_device_ctx, nvmsg); break; default: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 41bd952cc28d..3ba29fc80d05 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct sk_buff *skb; struct sk_buff *vf_skb; struct netvsc_stats *rx_stats; - struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; u32 bytes_recvd = packet->total_data_buflen; int ret = 0; if (!net || net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - if (READ_ONCE(netvsc_dev->vf_inject)) { - atomic_inc(&netvsc_dev->vf_use_cnt); - if (!READ_ONCE(netvsc_dev->vf_inject)) { + if (READ_ONCE(net_device_ctx->vf_inject)) { + atomic_inc(&net_device_ctx->vf_use_cnt); + if (!READ_ONCE(net_device_ctx->vf_inject)) { /* * We raced; just move on. */ - atomic_dec(&netvsc_dev->vf_use_cnt); + atomic_dec(&net_device_ctx->vf_use_cnt); goto vf_injection_done; } @@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, * the host). Deliver these via the VF interface * in the guest. */ - vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, - csum_info, *data, vlan_tci); + vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, + packet, csum_info, *data, + vlan_tci); if (vf_skb != NULL) { - ++netvsc_dev->vf_netdev->stats.rx_packets; - netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; + ++net_device_ctx->vf_netdev->stats.rx_packets; + net_device_ctx->vf_netdev->stats.rx_bytes += + bytes_recvd; netif_receive_skb(vf_skb); } else { ++net->stats.rx_dropped; ret = NVSP_STAT_FAIL; } - atomic_dec(&netvsc_dev->vf_use_cnt); + atomic_dec(&net_device_ctx->vf_use_cnt); return ret; } @@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) free_netdev(netdev); } -static void netvsc_notify_peers(struct work_struct *wrk) -{ - struct garp_wrk *gwrk; - - gwrk = container_of(wrk, struct garp_wrk, dwrk); - - netdev_notify_peers(gwrk->netdev); - - atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); -} - static struct net_device *get_netvsc_net_device(char *mac) { struct net_device *dev, *found = NULL; @@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if (netvsc_dev == NULL) + if (!netvsc_dev || net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); @@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) * Take a reference on the module. */ try_module_get(THIS_MODULE); - netvsc_dev->vf_netdev = vf_netdev; + net_device_ctx->vf_netdev = vf_netdev; return NOTIFY_OK; } +static void netvsc_inject_enable(struct net_device_context *net_device_ctx) +{ + net_device_ctx->vf_inject = true; +} + +static void netvsc_inject_disable(struct net_device_context *net_device_ctx) +{ + net_device_ctx->vf_inject = false; + + /* Wait for currently active users to drain out. */ + while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) + udelay(50); +} static int netvsc_vf_up(struct net_device *vf_netdev) { @@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - netvsc_dev->vf_inject = true; + netvsc_inject_enable(net_device_ctx); /* * Open the device before switching data path. @@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) netif_carrier_off(ndev); - /* - * Now notify peers. We are scheduling work to - * notify peers; take a reference to prevent - * the VF interface from vanishing. - */ - atomic_inc(&netvsc_dev->vf_use_cnt); - net_device_ctx->gwrk.netdev = vf_netdev; - net_device_ctx->gwrk.netvsc_dev = netvsc_dev; - schedule_work(&net_device_ctx->gwrk.dwrk); + /* Now notify peers through VF device. */ + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); return NOTIFY_OK; } @@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF down: %s\n", vf_netdev->name); - netvsc_dev->vf_inject = false; - /* - * Wait for currently active users to - * drain out. - */ - - while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) - udelay(50); + netvsc_inject_disable(net_device_ctx); netvsc_switch_datapath(ndev, false); netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); rndis_filter_close(netvsc_dev); netif_carrier_on(ndev); - /* - * Notify peers. - */ - atomic_inc(&netvsc_dev->vf_use_cnt); - net_device_ctx->gwrk.netdev = ndev; - net_device_ctx->gwrk.netvsc_dev = netvsc_dev; - schedule_work(&net_device_ctx->gwrk.dwrk); + + /* Now notify peers through netvsc device. */ + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); return NOTIFY_OK; } @@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if (netvsc_dev == NULL) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); - - netvsc_dev->vf_netdev = NULL; + netvsc_inject_disable(net_device_ctx); + net_device_ctx->vf_netdev = NULL; module_put(THIS_MODULE); return NOTIFY_OK; } @@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); - INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + atomic_set(&net_device_ctx->vf_use_cnt, 0); + net_device_ctx->vf_netdev = NULL; + net_device_ctx->vf_inject = false; + net->netdev_ops = &device_ops; net->hw_features = NETVSC_HW_FEATURES; @@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this, { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); - /* Avoid Vlan, Bonding dev with same MAC registering as VF */ - if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) + /* Avoid Vlan dev with same MAC registering as VF */ + if (event_dev->priv_flags & IFF_802_1Q_VLAN) + return NOTIFY_DONE; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (event_dev->priv_flags & IFF_BONDING && + event_dev->flags & IFF_MASTER) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d13e6e15d7b5..351e701eb043 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -270,6 +270,7 @@ struct macsec_dev { struct pcpu_secy_stats __percpu *stats; struct list_head secys; struct gro_cells gro_cells; + unsigned int nest_level; }; /** @@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) +static struct lock_class_key macsec_netdev_addr_lock_key; + static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) return macsec_priv(dev)->real_dev->ifindex; } + +static int macsec_get_nest_level(struct net_device *dev) +{ + return macsec_priv(dev)->nest_level; +} + + static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, @@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, + .ndo_get_lock_subclass = macsec_get_nest_level, }; static const struct device_type macsec_type = { @@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec) } } +static void macsec_common_dellink(struct net_device *dev, struct list_head *head) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + unregister_netdevice_queue(dev, head); + list_del_rcu(&macsec->secys); + macsec_del_dev(macsec); + netdev_upper_dev_unlink(real_dev, dev); + + macsec_generation++; +} + static void macsec_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - macsec_generation++; + macsec_common_dellink(dev, head); - unregister_netdevice_queue(dev, head); - list_del_rcu(&macsec->secys); if (list_empty(&rxd->secys)) { netdev_rx_handler_unregister(real_dev); kfree(rxd); } - - macsec_del_dev(macsec); } static int register_macsec_dev(struct net_device *real_dev, @@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev, dev_hold(real_dev); + macsec->nest_level = dev_get_nest_level(real_dev) + 1; + netdev_lockdep_set_classes(dev); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key, + macsec_get_nest_level(dev)); + + err = netdev_upper_dev_link(real_dev, dev); + if (err < 0) + goto unregister; + /* need to be already registered so that ->init has run and * the MAC addr is set */ @@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (rx_handler && sci_exists(real_dev, sci)) { err = -EBUSY; - goto unregister; + goto unlink; } err = macsec_add_dev(dev, sci, icv_len); if (err) - goto unregister; + goto unlink; if (data) macsec_changelink_common(dev, data); @@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, del_dev: macsec_del_dev(macsec); +unlink: + netdev_upper_dev_unlink(real_dev, dev); unregister: unregister_netdevice(dev); return err; @@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, rxd = macsec_data_rtnl(real_dev); list_for_each_entry_safe(m, n, &rxd->secys, secys) { - macsec_dellink(m->secy.netdev, &head); + macsec_common_dellink(m->secy.netdev, &head); } + + netdev_rx_handler_unregister(real_dev); + kfree(rxd); + unregister_netdevice_many(&head); break; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cd9b53834bf6..3234fcdea317 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; - vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; + vlan->nest_level = dev_get_nest_level(lowerdev) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a38c0dac514b..070e3290aa6e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q) rtnl_unlock(); synchronize_rcu(); - skb_array_cleanup(&q->skb_array); sock_put(&q->sk); } @@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk) static void macvtap_sock_destruct(struct sock *sk) { struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); - struct sk_buff *skb; - while ((skb = skb_array_consume(&q->skb_array)) != NULL) - kfree_skb(skb); + skb_array_cleanup(&q->skb_array); } static int macvtap_open(struct inode *inode, struct file *file) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1882d9828c99..053e87905b94 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, data[i] = kszphy_get_stat(phydev, i); } -static int kszphy_resume(struct phy_device *phydev) +static int kszphy_suspend(struct phy_device *phydev) { - int value; + /* Disable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_DISABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } - mutex_lock(&phydev->lock); + return genphy_suspend(phydev); +} - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); +static int kszphy_resume(struct phy_device *phydev) +{ + genphy_resume(phydev); - kszphy_config_intr(phydev); - mutex_unlock(&phydev->lock); + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } return 0; } @@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, + .suspend = kszphy_suspend, .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8061, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index da4e3d6632f6..c0dda6fc0921 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; - fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; + fl4.saddr = *saddr; rt = ip_route_output_key(vxlan->net, &fl4); if (!IS_ERR(rt)) { @@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.daddr = *daddr; - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.saddr = *saddr; fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; @@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct rtable *rt = NULL; const struct iphdr *old_iph; union vxlan_addr *dst; - union vxlan_addr remote_ip; + union vxlan_addr remote_ip, local_ip; + union vxlan_addr *src; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; @@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; + src = &vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; } else { if (!info) { @@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; vni = vxlan_tun_id_to_vni(info->key.tun_id); remote_ip.sa.sa_family = ip_tunnel_info_af(info); - if (remote_ip.sa.sa_family == AF_INET) + if (remote_ip.sa.sa_family == AF_INET) { remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; - else + local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; + } else { remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; + local_ip.sin6.sin6_addr = info->key.u.ipv6.src; + } dst = &remote_ip; + src = &local_ip; dst_cache = &info->dst_cache; } @@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { - __be32 saddr; - if (!vxlan->vn4_sock) goto drop; sk = vxlan->vn4_sock->sock->sk; rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - dst->sin.sin_addr.s_addr, &saddr, + dst->sin.sin_addr.s_addr, + &src->sin.sin_addr.s_addr, dst_cache, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", @@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } /* Bypass encapsulation if the destination is local */ - if (rt->rt_flags & RTCF_LOCAL && + if (!info && rt->rt_flags & RTCF_LOCAL && !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto xmit_tx_error; - udp_tunnel_xmit_skb(rt, sk, skb, saddr, + udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; - struct in6_addr saddr; u32 rt6i_flags; if (!vxlan->vn6_sock) @@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - label, &dst->sin6.sin6_addr, &saddr, + label, &dst->sin6.sin6_addr, + &src->sin6.sin6_addr, dst_cache, info); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", @@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, /* Bypass encapsulation if the destination is local */ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - if (rt6i_flags & RTF_LOCAL && + if (!info && rt6i_flags & RTF_LOCAL && !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, tos, ttl, + &src->sin6.sin6_addr, + &dst->sin6.sin6_addr, tos, ttl, label, src_port, dst_port, !udp_sum); #endif } diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1d689169da76..9e1f2d9c9865 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5700,10 +5700,11 @@ out: mutex_unlock(&wl->mutex); } -static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) +static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; - struct wl1271 *wl = wl_sta->wl; + struct wl1271 *wl = hw->priv; u8 hlid = wl_sta->hlid; /* return in units of Kbps */ diff --git a/drivers/of/base.c b/drivers/of/base.c index 7792266db259..3ce69536a7b3 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np, */ err: - if (it.node) - of_node_put(it.node); + of_node_put(it.node); return rc; } @@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg) { struct of_endpoint endpoint; - struct device_node *node, *prev_node = NULL; - - while (1) { - node = of_graph_get_next_endpoint(parent, prev_node); - of_node_put(prev_node); - if (!node) - break; + struct device_node *node = NULL; + for_each_endpoint_of_node(parent, node) { of_graph_parse_endpoint(node, &endpoint); if (((port_reg == -1) || (endpoint.port == port_reg)) && ((reg == -1) || (endpoint.id == reg))) return node; - - prev_node = node; } return NULL; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 55f1b8391149..085c6389afd1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob, pr_warning("End of tree marker overwritten: %08x\n", be32_to_cpup(mem + size)); - if (detached) { + if (detached && mynodes) { of_node_set_flag(*mynodes, OF_DETACHED); pr_debug("unflattened tree is detached\n"); } diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 89a71c6074fc..a2e68f740eda 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches) list_del(&desc->list); + of_node_set_flag(desc->dev, OF_POPULATED); + pr_debug("of_irq_init: init %s (%p), parent %p\n", desc->dev->full_name, desc->dev, desc->interrupt_parent); ret = desc->irq_init_cb(desc->dev, desc->interrupt_parent); if (ret) { + of_node_clear_flag(desc->dev, OF_POPULATED); kfree(desc); continue; } @@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches) * its children can get processed in a subsequent pass. */ list_add_tail(&desc->list, &intc_parent_list); - - of_node_set_flag(desc->dev, OF_POPULATED); } /* Get the next pending parent that might have children */ diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 8aa197691074..f39ccd5aa701 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root, } EXPORT_SYMBOL_GPL(of_platform_default_populate); +#ifndef CONFIG_PPC static int __init of_platform_default_populate_init(void) { struct device_node *node; @@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void) return 0; } arch_initcall_sync(of_platform_default_populate_init); +#endif static int of_platform_device_destroy(struct device *dev, void *data) { diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index eb4990ff26ca..7fb765642ee7 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include <linux/err.h> +#include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pinctrl/pinconf.h> diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 11623c6b0cb3..44e69c963f5d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(pc->pcdev); } - ret = meson_gpiolib_register(pc); - if (ret) { - pinctrl_unregister(pc->pcdev); - return ret; - } - - return 0; + return meson_gpiolib_register(pc); } static struct platform_driver meson_pinctrl_driver = { diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 634b4d30eefb..b3e772390ab6 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + offset * 4); - /* - * Suppose BIOS or Bootloader sets specific debounce for the - * GPIO. if not, set debounce to be 2.75ms and remove glitch. - */ - if ((pin_reg & DB_TMR_OUT_MASK) == 0) { - pin_reg |= 0xf; - pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); - pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; - pin_reg &= ~BIT(DB_TMR_LARGE_OFF); - } - pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); writel(pin_reg, gpio_dev->base + offset * 4); spin_unlock_irqrestore(&gpio_dev->lock, flags); @@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + (d->hwirq)*4); - /* - Suppose BIOS or Bootloader sets specific debounce for the - GPIO. if not, set debounce to be 2.75ms. - */ - if ((pin_reg & DB_TMR_OUT_MASK) == 0) { - pin_reg |= 0xf; - pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); - pin_reg &= ~BIT(DB_TMR_LARGE_OFF); - } pin_reg |= BIT(INTERRUPT_ENABLE_OFF); pin_reg |= BIT(INTERRUPT_MASK_OFF); writel(pin_reg, gpio_dev->base + (d->hwirq)*4); diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index c6d410ef8de0..7bad200bd67c 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) { struct pistachio_pinctrl *pctl; struct resource *res; - int ret; pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); if (!pctl) @@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) return PTR_ERR(pctl->pctldev); } - ret = pistachio_gpio_register(pctl); - if (ret < 0) { - pinctrl_unregister(pctl->pctldev); - return ret; - } - - return 0; + return pistachio_gpio_register(pctl); } static struct platform_driver pistachio_pinctrl_driver = { diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f134d447..da7a75f82489 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u32 *data, int size) + u8 addr, u16 *data, int size) { struct regmap *map = chip->regmap; int i; + u32 tmp; - for (i = 0; i < size; i++) - regmap_read(map, addr + i, &data[i]); + for (i = 0; i < size; i++) { + regmap_read(map, addr + i, &tmp); + data[i] = (u16)tmp; + } } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - (u16 *)temp_data, + temp_data, table_size); max10742_lock_model(chip); @@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 3bfac539334b..c74c3f67b8da 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -200,8 +200,8 @@ config REBOOT_MODE config SYSCON_REBOOT_MODE tristate "Generic SYSCON regmap reboot mode driver" depends on OF + depends on MFD_SYSCON select REBOOT_MODE - select MFD_SYSCON help Say y here will enable reboot mode driver. This will get reboot mode arguments and store it in SYSCON mapped diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f562a83b..f69387e12c1e 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c @@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { pr_err("failed to find reboot-offset property\n"); + iounmap(base); return -EINVAL; } err = register_restart_handler(&hisi_restart_nb); - if (err) + if (err) { dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", err); + iounmap(base); + } return err; } diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index 73dfae41def8..4c56e54af6ac 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c @@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) if (!charger) return -ENOMEM; + platform_set_drvdata(pdev, charger); charger->tps = tps; charger->dev = &pdev->dev; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8973d34ce5ba..fb1b56a71475 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, u8 *sense = NULL; int expires; + cqr = (struct dasd_ccw_req *) intparm; if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: + if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { + device = (struct dasd_device *) cqr->startdev; + cqr->status = DASD_CQR_CLEARED; + dasd_device_clear_timer(device); + wake_up(&dasd_flush_wq); + dasd_schedule_device_bh(device); + return; + } break; case -ETIMEDOUT: DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " @@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, } now = get_tod_clock(); - cqr = (struct dasd_ccw_req *) intparm; /* check for conditions that should be handled immediately */ if (!cqr || !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fd2eff440098..98bbec44bcd0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, return PTR_ERR(cqr); } + cqr->lpm = lpum; +retry: cqr->startdev = device; cqr->memdev = device; cqr->block = NULL; @@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, (prssdp + 1); memcpy(messages, message_buf, sizeof(struct dasd_rssd_messages)); + } else if (cqr->lpm) { + /* + * on z/VM we might not be able to do I/O on the requested path + * but instead we get the required information on any path + * so retry with open path mask + */ + cqr->lpm = 0; + goto retry; } else DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "Reading messages failed with rc=%d\n" diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078ffdd0..6a58bc8f46e2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, priv->state = DEV_STATE_NOT_OPER; priv->dev_id.devno = sch->schib.pmcw.dev; priv->dev_id.ssid = sch->schid.ssid; - priv->schid = sch->schid; INIT_WORK(&priv->todo_work, ccw_device_todo); INIT_LIST_HEAD(&priv->cmb_list); @@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, put_device(&old_sch->dev); /* Initialize new subchannel. */ spin_lock_irq(sch->lock); - cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; if (!sch_is_pseudo_sch(sch)) sch_set_cdev(sch, cdev); diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a15db15..9bc3512374c9 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -26,6 +26,7 @@ static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { + struct subchannel *sch = to_subchannel(cdev->dev.parent); char dbf_text[15]; if (!scsw_is_valid_cstat(&irb->scsw) || @@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) "received" " ... device %04x on subchannel 0.%x.%04x, dev_stat " ": %02X sch_stat : %02X\n", - cdev->private->dev_id.devno, cdev->private->schid.ssid, - cdev->private->schid.sch_no, + cdev->private->dev_id.devno, sch->schid.ssid, + sch->schid.sch_no, scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); - sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + sprintf(dbf_text, "chk%x", sch->schid.sch_no); CIO_TRACE_EVENT(0, dbf_text); CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060af96c..220f49145b2f 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -120,7 +120,6 @@ struct ccw_device_private { int state; /* device state */ atomic_t onoff; struct ccw_dev_id dev_id; /* device id */ - struct subchannel_id schid; /* subchannel number */ struct ccw_request req; /* internal I/O request */ int iretry; u8 pgid_valid_mask; /* mask of valid PGIDs */ diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f7aee..71bf9bded485 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) q->qdio_error = 0; } +static inline int qdio_tasklet_schedule(struct qdio_q *q) +{ + if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { + tasklet_schedule(&q->tasklet); + return 0; + } + return -EPERM; +} + static void __qdio_inbound_processing(struct qdio_q *q) { qperf_inc(q, tasklet_inbound); @@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) * is noticed and outbound_handler is called after some time. */ if (qdio_outbound_q_done(q)) - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); else - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + 10 * HZ); return; sched: - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } /* outbound tasklet */ @@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; - if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) - return; - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) @@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) for_each_output_queue(q->irq_ptr, out, i) if (!qdio_outbound_q_done(out)) - tasklet_schedule(&out->tasklet); + qdio_tasklet_schedule(out); } static void __tiqdio_inbound_processing(struct qdio_q *q) @@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { - tasklet_schedule(&q->tasklet); + if (!qdio_tasklet_schedule(q)) return; - } } qdio_stop_polling(q); @@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) */ if (!qdio_inbound_q_done(q)) { qperf_inc(q, tasklet_inbound_resched2); - if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; - if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; for_each_input_queue(irq_ptr, q, i) { @@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) continue; if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) qdio_siga_sync_q(q); - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); } } @@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int cstat, dstat; if (!intparm || !irq_ptr) { - DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_ERROR("qint:%4x", schid.sch_no); return; } @@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, int qdio_get_ssqd_desc(struct ccw_device *cdev, struct qdio_ssqd_desc *data) { + struct subchannel_id schid; if (!cdev || !cdev->private) return -EINVAL; - DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); - return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("get ssqd:%4x", schid.sch_no); + return qdio_setup_get_ssqd(NULL, &schid, data); } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); @@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { - del_timer(&q->u.out.timer); + del_timer_sync(&q->u.out.timer); tasklet_kill(&q->tasklet); } } @@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) int qdio_shutdown(struct ccw_device *cdev, int how) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; int rc; - unsigned long flags; if (!irq_ptr) return -ENODEV; WARN_ON_ONCE(irqs_disabled()); - DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qshutdown:%4x", schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); /* @@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) qdio_shutdown_debug_entries(irq_ptr); /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); @@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) } qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); wait_event_interruptible_timeout(cdev->private->wait_q, irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || irq_ptr->state == QDIO_IRQ_STATE_ERR, 10 * HZ); - spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + spin_lock_irq(get_ccwdev_lock(cdev)); no_cleanup: qdio_shutdown_thinint(irq_ptr); @@ -1211,7 +1216,7 @@ no_cleanup: /* restore interrupt handler */ if ((void *)cdev->handler == (void *)qdio_int_handler) cdev->handler = irq_ptr->orig_handler; - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + spin_unlock_irq(get_ccwdev_lock(cdev)); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); mutex_unlock(&irq_ptr->setup_mutex); @@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); int qdio_free(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct subchannel_id schid; if (!irq_ptr) return -ENODEV; - DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qfree:%4x", schid.sch_no); DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); mutex_lock(&irq_ptr->setup_mutex); @@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free); */ int qdio_allocate(struct qdio_initialize *init_data) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; - DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); + ccw_device_get_schid(init_data->cdev, &schid); + DBF_EVENT("qallocate:%4x", schid.sch_no); if ((init_data->no_input_qs && !init_data->input_handler) || (init_data->no_output_qs && !init_data->output_handler)) @@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) */ int qdio_establish(struct qdio_initialize *init_data) { - struct qdio_irq *irq_ptr; struct ccw_device *cdev = init_data->cdev; - unsigned long saveflags; + struct subchannel_id schid; + struct qdio_irq *irq_ptr; int rc; - DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qestablish:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); qdio_setup_irq(init_data); @@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data) irq_ptr->ccw.count = irq_ptr->equeue.count; irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - - if (rc) { mutex_unlock(&irq_ptr->setup_mutex); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); return rc; @@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish); */ int qdio_activate(struct ccw_device *cdev) { + struct subchannel_id schid; struct qdio_irq *irq_ptr; int rc; - unsigned long saveflags; - DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); + ccw_device_get_schid(cdev, &schid); + DBF_EVENT("qactivate:%4x", schid.sch_no); irq_ptr = cdev->private->qdio_data; if (!irq_ptr) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { rc = -EBUSY; @@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev) irq_ptr->ccw.count = irq_ptr->aqueue.count; irq_ptr->ccw.cda = 0; - spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); + spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); DBF_ERROR("rc:%4x", rc); - } - spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); - - if (rc) goto out; + } if (is_thinint_irq(irq_ptr)) tiqdio_add_input_queues(irq_ptr); @@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, /* in case of SIGA errors we must process the error immediately */ if (used >= q->u.out.scan_threshold || rc) - tasklet_schedule(&q->tasklet); + qdio_tasklet_schedule(q); else /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer)) + if (!timer_pending(&q->u.out.timer) && + likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index b381b3718a98..5648b715fed9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; - unsigned size; + unsigned int size, osize; int retval; if (dev->in_reset) { @@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) * will not overrun the buffer when we copy the memory. Return * an error if we would. */ - size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); + osize = size = le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { @@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) goto cleanup; } + /* Sanity check the second copy */ + if ((osize != le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr)) + || (size < le16_to_cpu(kfib->header.SenderSize))) { + retval = -EINVAL; + goto cleanup; + } + if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index a569c65f22b1..dcf36537a767 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) mutex_unlock(&fip->ctlr_mutex); drop: - kfree(skb); + kfree_skb(skb); return rc; } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2dab3dc2aa69..c1ed25adb17e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); - if (pci_request_selected_regions(instance->pdev, instance->bar, + if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, "megasas: LSI")) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); return -EBUSY; @@ -5339,7 +5339,7 @@ fail_ready_state: iounmap(instance->reg_set); fail_ioremap: - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); return -EINVAL; } @@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ec837544f784..52d8bbf7feb5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 751f13edece0..750f82c339d4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) } else ioc->msix96_vector = 0; + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = (resource_size_t __iomem *) + &ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = + (resource_size_t __iomem *) + ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) pr_info(MPT3SAS_FMT "%s: IRQ %d\n", reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : @@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_free_resources; - if (ioc->is_warpdrive) { - ioc->reply_post_host_index[0] = (resource_size_t __iomem *) - &ioc->chip->ReplyPostHostIndex; - - for (i = 1; i < ioc->cpu_msix_table_sz; i++) - ioc->reply_post_host_index[i] = - (resource_size_t __iomem *) - ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) - * 4))); - } - pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb6418e..0e8601aa877a 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) if (!edev) return; + enclosure_unregister(edev); + ses_dev = edev->scratch; edev->scratch = NULL; @@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(edev->component[0].scratch); put_device(&edev->edev); - enclosure_unregister(edev); } static void ses_intf_remove(struct device *cdev, diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 71912301ef7f..0f3f62e81e5b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1354,7 +1354,6 @@ made_compressed_probe: spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); - acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; @@ -1394,14 +1393,14 @@ made_compressed_probe: urb->transfer_dma = rb->dma; if (acm->is_int_ep) { usb_fill_int_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvintpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); } else { usb_fill_bulk_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb); diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 05ce308d5d2a..1f1eabfd8462 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -96,7 +96,6 @@ struct acm { struct acm_rb read_buffers[ACM_NR]; struct acm_wb *putbuffer; /* for acm_tty_put_char() */ int rx_buflimit; - int rx_endpoint; spinlock_t read_lock; int write_used; /* number of non-empty write buffers */ int transmitting; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 31ccdccd7a04..051163189810 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ep, buffer, size); } +static const unsigned short low_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 8, + [USB_ENDPOINT_XFER_ISOC] = 0, + [USB_ENDPOINT_XFER_BULK] = 0, + [USB_ENDPOINT_XFER_INT] = 8, +}; +static const unsigned short full_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1023, + [USB_ENDPOINT_XFER_BULK] = 64, + [USB_ENDPOINT_XFER_INT] = 64, +}; +static const unsigned short high_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 512, + [USB_ENDPOINT_XFER_INT] = 1023, +}; +static const unsigned short super_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 512, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 1024, + [USB_ENDPOINT_XFER_INT] = 1024, +}; + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) @@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; + unsigned int maxp; + const unsigned short *maxpacket_maxes; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; @@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } + /* Validate the wMaxPacketSize field */ + maxp = usb_endpoint_maxp(&endpoint->desc); + + /* Find the highest legal maxpacket size for this endpoint */ + i = 0; /* additional transactions per microframe */ + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_LOW: + maxpacket_maxes = low_speed_maxpacket_maxes; + break; + case USB_SPEED_FULL: + maxpacket_maxes = full_speed_maxpacket_maxes; + break; + case USB_SPEED_HIGH: + /* Bits 12..11 are allowed only for HS periodic endpoints */ + if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { + i = maxp & (BIT(12) | BIT(11)); + maxp &= ~i; + } + /* fallthrough */ + default: + maxpacket_maxes = high_speed_maxpacket_maxes; + break; + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + maxpacket_maxes = super_speed_maxpacket_maxes; + break; + } + j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; + + if (maxp > j) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", + cfgno, inum, asnum, d->bEndpointAddress, maxp, j); + maxp = j; + endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); + } + /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not @@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, */ if (to_usb_device(ddev)->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { - unsigned maxp; - - maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9f5043a2167..e6a6d67c8705 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) goto error_decrease_mem; } - mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); + mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, + &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; @@ -2582,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file, if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) mask |= POLLOUT | POLLWRNORM; if (!connected(ps)) - mask |= POLLERR | POLLHUP; + mask |= POLLHUP; + if (list_empty(&ps->list)) + mask |= POLLERR; return mask; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee13517676f..1d5fc32d06d0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Continue a partial initialization */ if (type == HUB_INIT2 || type == HUB_INIT3) { - device_lock(hub->intfdev); + device_lock(&hdev->dev); /* Was the hub disconnected while we were waiting? */ - if (hub->disconnected) { - device_unlock(hub->intfdev); - kref_put(&hub->kref, hub_release); - return; - } + if (hub->disconnected) + goto disconnected; if (type == HUB_INIT2) goto init2; goto init3; @@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) queue_delayed_work(system_power_efficient_wq, &hub->init_work, msecs_to_jiffies(delay)); - device_unlock(hub->intfdev); + device_unlock(&hdev->dev); return; /* Continues at init3: below */ } else { msleep(delay); @@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) /* Scan all ports that need attention */ kick_hub_wq(hub); - /* Allow autosuspend if it was suppressed */ - if (type <= HUB_INIT3) + if (type == HUB_INIT2 || type == HUB_INIT3) { + /* Allow autosuspend if it was suppressed */ + disconnected: usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); - - if (type == HUB_INIT2 || type == HUB_INIT3) - device_unlock(hub->intfdev); + device_unlock(&hdev->dev); + } kref_put(&hub->kref, hub_release); } @@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) struct usb_device *hdev = hub->hdev; int i; - cancel_delayed_work_sync(&hub->init_work); - /* hub_wq and related activity won't re-trigger */ hub->quiescing = 1; diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 974335377d9f..e56d59b19a0e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) if (!simple->clks) return -ENOMEM; + platform_set_drvdata(pdev, simple); simple->dev = dev; for (i = 0; i < simple->num_clocks; i++) { diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 45f5a232d9fb..2eb84d6c24a6 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -37,6 +37,7 @@ #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa +#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; @@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8f8c2157910e..1f5597ef945d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, if (!req->request.no_interrupt && !chain) trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; - if (last) + if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) trb->ctrl |= DWC3_TRB_CTRL_LST; if (chain) @@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req, struct dwc3_trb *trb, - const struct dwc3_event_depevt *event, int status) + const struct dwc3_event_depevt *event, int status, + int chain) { unsigned int count; unsigned int s_pkt = 0; @@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, dep->queued_requests--; trace_dwc3_complete_trb(dep, trb); + /* + * If we're in the middle of series of chained TRBs and we + * receive a short transfer along the way, DWC3 will skip + * through all TRBs including the last TRB in the chain (the + * where CHN bit is zero. DWC3 will also avoid clearing HWO + * bit and SW has to do it manually. + * + * We're going to do that here to avoid problems of HW trying + * to use bogus TRBs for transfers. + */ + if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; + if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) - /* - * We continue despite the error. There is not much we - * can do. If we don't clean it up we loop forever. If - * we skip the TRB then it gets overwritten after a - * while since we use them in a ring buffer. A BUG() - * would help. Lets hope that if this occurs, someone - * fixes the root cause instead of looking away :) - */ - dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", - dep->name, trb); + return 1; + count = trb->size & DWC3_TRB_SIZE_MASK; if (dep->direction) { @@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, s_pkt = 1; } - /* - * We assume here we will always receive the entire data block - * which we should receive. Meaning, if we program RX to - * receive 4K but we receive only 2K, we assume that's all we - * should receive and we simply bounce the request back to the - * gadget driver for further processing. - */ - req->request.actual += req->request.length - count; - if (s_pkt) + if (s_pkt && !chain) return 1; if ((event->status & DEPEVT_STATUS_LST) && (trb->ctrl & (DWC3_TRB_CTRL_LST | @@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_trb *trb; unsigned int slot; unsigned int i; + int count = 0; int ret; do { + int chain; + req = next_request(&dep->started_list); if (WARN_ON_ONCE(!req)) return 1; + chain = req->request.num_mapped_sgs > 0; i = 0; do { slot = req->first_trb_index + i; @@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, slot++; slot %= DWC3_TRB_NUM; trb = &dep->trb_pool[slot]; + count += trb->size & DWC3_TRB_SIZE_MASK; ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, - event, status); + event, status, chain); if (ret) break; } while (++i < req->request.num_mapped_sgs); + /* + * We assume here we will always receive the entire data block + * which we should receive. Meaning, if we program RX to + * receive 4K but we receive only 2K, we assume that's all we + * should receive and we simply bounce the request back to the + * gadget driver for further processing. + */ + req->request.actual += req->request.length - count; dwc3_gadget_giveback(dep, req, status); if (ret) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index eb648485a58c..5ebe6af7976e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1913,6 +1913,8 @@ unknown: break; case USB_RECIP_ENDPOINT: + if (!cdev->config) + break; endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) @@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); if (!cdev->os_desc_req) { - ret = PTR_ERR(cdev->os_desc_req); + ret = -ENOMEM; goto end; } /* OS feature descriptor length <= 4kB */ cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { - ret = PTR_ERR(cdev->os_desc_req->buf); + ret = -ENOMEM; kfree(cdev->os_desc_req); goto end; } diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 70cf3477f951..f9237fe2be05 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item) { struct gadget_info *gi = to_gadget_info(item); + mutex_lock(&gi->lock); unregister_gadget(gi); + mutex_unlock(&gi->lock); } EXPORT_SYMBOL_GPL(unregister_gadget_item); diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 943c21aafd3b..ab6ac1b74ac0 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params, { rndis_reset_cmplt_type *resp; rndis_resp_t *r; + u8 *xbuf; + u32 length; + + /* drain the response queue */ + while ((xbuf = rndis_get_next_response(params, &length))) + rndis_free_response(params, xbuf); r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); if (!r) diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index a3f7e7c55ebb..5f562c1ec795 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, /* Multi frame CDC protocols may store the frame for * later which is not a dropped frame. */ - if (dev->port_usb->supports_multi_frame) + if (dev->port_usb && + dev->port_usb->supports_multi_frame) goto multiframe; goto drop; } diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 66753ba7a42e..31125a4a2658 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, if (!data) { kfree(*class_array); *class_array = NULL; - ret = PTR_ERR(data); + ret = -ENOMEM; goto unlock; } cl_arr = *class_array; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa3707bdebb4..16104b5ebdcb 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb, */ spin_lock_irq(&epdata->dev->lock); value = -ENODEV; - if (unlikely(epdata->ep)) + if (unlikely(epdata->ep == NULL)) goto fail; req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); @@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to) } if (is_sync_kiocb(iocb)) { value = ep_io(epdata, buf, len); - if (value >= 0 && copy_to_iter(buf, value, to)) + if (value >= 0 && (copy_to_iter(buf, value, to) != value)) value = -EFAULT; } else { struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index ff8685ea7219..934f83881c30 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, if (ret != -EPROBE_DEFER) list_del(&driver->pending); if (ret) - goto err4; + goto err5; break; } } @@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, return 0; +err5: + device_del(&udc->dev); + err4: list_del(&udc->list); mutex_unlock(&udc_lock); diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 93d28cb00b76..cf8819a5c5b2 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc, struct qe_ep *ep; if (wValue != 0 || wLength != 0 - || pipe > USB_MAX_ENDPOINTS) + || pipe >= USB_MAX_ENDPOINTS) break; ep = &udc->eps[pipe]; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a962b89b65a6..1e5f529d51a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) int port = HCS_N_PORTS(ehci->hcs_params); while (port--) { - ehci_writel(ehci, PORT_RWC_BITS, - &ehci->regs->port_status[port]); spin_unlock_irq(&ehci->lock); ehci_port_power(ehci, port, false); spin_lock_irq(&ehci->lock); + ehci_writel(ehci, PORT_RWC_BITS, + &ehci->regs->port_status[port]); } } diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index c369c29e496d..2f7690092a7f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) if (pin_number > 7) return; - mask = 1u << pin_number; + mask = 1u << (pin_number % 4); idx = pin_number / 4; if (value) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index d61fcc48099e..730b9fd26685 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) ret = 0; virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return -ENODEV; + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); if (!cmd) { xhci_dbg(xhci, "Couldn't allocate command structure.\n"); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4fd041bec332..d7b0f97abbad 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev) usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); } - usb_hcd_pci_remove(dev); /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) pci_set_power_state(dev, PCI_D3hot); + + usb_hcd_pci_remove(dev); } #ifdef CONFIG_PM diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 918e0c739b79..fd9fd12e4861 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1334,12 +1334,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); - if (cmd->command_trb != xhci->cmd_ring->dequeue) { - xhci_err(xhci, - "Command completion event does not match command\n"); - return; - } - del_timer(&xhci->cmd_timer); trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); @@ -1351,6 +1345,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_stopped_cmd_ring(xhci, cmd); return; } + + if (cmd->command_trb != xhci->cmd_ring->dequeue) { + xhci_err(xhci, + "Command completion event does not match command\n"); + return; + } + /* * Host aborted the command ring, check if the current command was * supposed to be aborted, otherwise continue normally. @@ -3243,7 +3244,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, send_addr = addr; /* Queue the TRBs, even if they are zero-length */ - for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { + for (enqd_len = 0; first_trb || enqd_len < full_len; + enqd_len += trb_buff_len) { field = TRB_TYPE(TRB_NORMAL); /* TRB buffer should not cross 64KB boundaries */ diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 52c27cab78c3..9b5b3b2281ca 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, { char data[30 *3 + 4]; char *d = data; - int m = (sizeof(data) - 1) / 3; + int m = (sizeof(data) - 1) / 3 - 1; int bytes_read = 0; int retry_on_empty = 10; int retry_on_timeout = 5; @@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) { int i = 0; char data[30 *3 + 4]; char *d = data; - int m = (sizeof(data) - 1) / 3; + int m = (sizeof(data) - 1) / 3 - 1; int l = 0; struct u132_target *target = &ftdi->target[ed]; struct u132_command *command = &ftdi->command[ @@ -1876,7 +1876,7 @@ more:{ if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; @@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; unsigned char c = 0; @@ -2155,7 +2155,7 @@ more:{ if (packet_bytes > 2) { char diag[30 *3 + 4]; char *d = diag; - int m = (sizeof(diag) - 1) / 3; + int m = (sizeof(diag) - 1) / 3 - 1; char *b = ftdi->bulk_in_buffer; int bytes_read = 0; diag[0] = 0; diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f04b8d7..5c8210dc6fd9 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req) { struct usb_sg_request *req = (struct usb_sg_request *) _req; - req->status = -ETIMEDOUT; usb_sg_cancel(req); } @@ -616,8 +615,10 @@ static int perform_sglist( mod_timer(&sg_timer, jiffies + msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); usb_sg_wait(req); - del_timer_sync(&sg_timer); - retval = req->status; + if (!del_timer_sync(&sg_timer)) + retval = -ETIMEDOUT; + else + retval = req->status; /* FIXME check resulting data pattern */ @@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) ktime_get_ts64(&start); retval = usbtest_do_ioctl(intf, param_32); - if (retval) + if (retval < 0) goto free_mutex; ktime_get_ts64(&end); diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 6f6d2a7fd5a0..6523af4f8f93 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c @@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev) (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, otg_dev->vbus); + platform_set_drvdata(pdev, otg_dev); + return 0; } diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8fbbc2d32371..ac67bab9124c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) if (gpio > 0) dparam->enable_gpio = gpio; - if (dparam->type == USBHS_TYPE_RCAR_GEN2) + if (dparam->type == USBHS_TYPE_RCAR_GEN2 || + dparam->type == USBHS_TYPE_RCAR_GEN3) dparam->has_usb_dmac = 1; return info; diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 280ed5ff021b..857e78337324 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) /* use PIO if packet is less than pio_dma_border or pipe is DCP */ if ((len < usbhs_get_dparam(priv, pio_dma_border)) || - usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) goto usbhsf_pio_prepare_push; /* check data length if this driver don't use USB-DMAC */ @@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, /* use PIO if packet is less than pio_dma_border or pipe is DCP */ if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || - usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) goto usbhsf_pio_prepare_pop; fifo = usbhsf_get_dma_fifo(priv, pkt); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 50f3363cc382..92bc83b92d10 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, * use dmaengine if possible. * It will use pio handler if impossible. */ - if (usb_endpoint_dir_in(desc)) + if (usb_endpoint_dir_in(desc)) { pipe->handler = &usbhs_fifo_dma_push_handler; - else + } else { pipe->handler = &usbhs_fifo_dma_pop_handler; + usbhs_xxxsts_clear(priv, BRDYSTS, + usbhs_pipe_number(pipe)); + } ret = 0; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 00820809139a..b2d767e743fc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, @@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c5d6c1e73e8e..f87a938cf005 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -406,6 +406,12 @@ #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 /* + * Ivium Technologies product IDs + */ +#define FTDI_PALMSENS_PID 0xf440 +#define FTDI_IVIUM_XSTAT_PID 0xf441 + +/* * Linx Technologies product ids */ #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ @@ -673,6 +679,12 @@ #define INTREPID_NEOVI_PID 0x0701 /* + * WICED USB UART + */ +#define WICED_VID 0x0A5C +#define WICED_USB20706V2_PID 0x6422 + +/* * Definitions for ID TECH (www.idt-net.com) devices */ #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8e07536c233a..bc472584a229 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 +#define TELIT_PRODUCT_LE920A4_1207 0x1207 +#define TELIT_PRODUCT_LE920A4_1208 0x1208 +#define TELIT_PRODUCT_LE920A4_1211 0x1211 +#define TELIT_PRODUCT_LE920A4_1212 0x1212 +#define TELIT_PRODUCT_LE920A4_1213 0x1213 +#define TELIT_PRODUCT_LE920A4_1214 0x1214 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -628,6 +634,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { .reserved = BIT(1) | BIT(5), }; +static const struct option_blacklist_info telit_le920a4_blacklist_1 = { + .sendsetup = BIT(0), + .reserved = BIT(1), +}; + static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { .sendsetup = BIT(2), .reserved = BIT(0) | BIT(1) | BIT(3), @@ -1203,6 +1214,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -1966,6 +1987,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b1b9bac44016..d213cf44a7e4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] rc = usb_register(udriver); if (rc) - return rc; + goto failed_usb_register; for (sd = serial_drivers; *sd; ++sd) { (*sd)->usb_driver = udriver; @@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] while (sd-- > serial_drivers) usb_serial_deregister(*sd); usb_deregister(udriver); +failed_usb_register: + kfree(udriver); return rc; } EXPORT_SYMBOL_GPL(usb_serial_register_drivers); diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 388eec4e1a90..97fb2f8fa930 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) { void *priv = NULL; long err; - struct vhost_memory *memory; + struct vhost_umem *umem; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); if (err) goto done; - memory = vhost_dev_reset_owner_prepare(); - if (!memory) { + umem = vhost_dev_reset_owner_prepare(); + if (!umem) { err = -ENOMEM; goto done; } vhost_test_stop(n, &priv); vhost_test_flush(n); - vhost_dev_reset_owner(&n->dev, memory); + vhost_dev_reset_owner(&n->dev, umem); done: mutex_unlock(&n->dev.mutex); return err; |