diff options
Diffstat (limited to 'drivers')
66 files changed, 761 insertions, 293 deletions
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 976019429164..18be8b98e9a8 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) if (status == 0) return 0; - /* Disable global interrupt before handling local buttress interrupts */ - REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); - if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status)) ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL)); @@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq) else REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status); - /* Re-enable global interrupt */ - REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); - if (schedule_recovery) ivpu_pm_schedule_recovery(vdev); @@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr) struct ivpu_device *vdev = ptr; u32 ret_irqv, ret_irqb; + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1); + ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq); ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq); + /* Re-enable global interrupts to re-trigger MSI for pending interrupts */ + REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0); + return IRQ_RETVAL(ret_irqb | ret_irqv); } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index f96bf32cd368..7d88db451cfb 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -3339,6 +3339,16 @@ static int acpi_nfit_add(struct acpi_device *adev) acpi_size sz; int rc = 0; + rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY, + acpi_nfit_notify); + if (rc) + return rc; + + rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler, + adev); + if (rc) + return rc; + status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl); if (ACPI_FAILURE(status)) { /* The NVDIMM root device allows OS to trigger enumeration of @@ -3386,17 +3396,7 @@ static int acpi_nfit_add(struct acpi_device *adev) if (rc) return rc; - rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); - if (rc) - return rc; - - rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY, - acpi_nfit_notify); - if (rc) - return rc; - - return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler, - adev); + return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); } static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a371b497035e..3a957c4da409 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1053,10 +1053,11 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) /* * Ask the sd driver to issue START STOP UNIT on runtime suspend - * and resume only. For system level suspend/resume, devices - * power state is handled directly by libata EH. + * and resume and shutdown only. For system level suspend/resume, + * devices power state is handled directly by libata EH. */ sdev->manage_runtime_start_stop = true; + sdev->manage_shutdown = true; } /* diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig index a57677f908f3..d6e5e3abaad8 100644 --- a/drivers/cache/Kconfig +++ b/drivers/cache/Kconfig @@ -3,7 +3,7 @@ menu "Cache Drivers" config AX45MP_L2_CACHE bool "Andes Technology AX45MP L2 Cache controller" - depends on RISCV_DMA_NONCOHERENT + depends on RISCV select RISCV_NONSTANDARD_CACHE_OPS help Support for the L2 cache controller on Andes Technology AX45MP platforms. diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c249f9791ae8..473563bc7496 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3416,6 +3416,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core, unsigned int i, char terminator) { struct clk_core *parent; + const char *name = NULL; /* * Go through the following options to fetch a parent's name. @@ -3430,18 +3431,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core, * registered (yet). */ parent = clk_core_get_parent_by_index(core, i); - if (parent) + if (parent) { seq_puts(s, parent->name); - else if (core->parents[i].name) + } else if (core->parents[i].name) { seq_puts(s, core->parents[i].name); - else if (core->parents[i].fw_name) + } else if (core->parents[i].fw_name) { seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); - else if (core->parents[i].index >= 0) - seq_puts(s, - of_clk_get_parent_name(core->of_node, - core->parents[i].index)); - else - seq_puts(s, "(missing)"); + } else { + if (core->parents[i].index >= 0) + name = of_clk_get_parent_name(core->of_node, core->parents[i].index); + if (!name) + name = "(missing)"; + + seq_puts(s, name); + } seq_putc(s, terminator); } diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index 8dd601bd8538..0a5a95e0267f 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -87,10 +87,8 @@ static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent) return 0; } -static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, - unsigned long parent_rate) +static u32 socfpga_clk_get_div(struct socfpga_gate_clk *socfpgaclk) { - struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); u32 div = 1, val; if (socfpgaclk->fixed_div) @@ -105,12 +103,33 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, div = (1 << val); } + return div; +} + +static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + u32 div = socfpga_clk_get_div(socfpgaclk); + return parent_rate / div; } + +static int socfpga_clk_determine_rate(struct clk_hw *hwclk, + struct clk_rate_request *req) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + u32 div = socfpga_clk_get_div(socfpgaclk); + + req->rate = req->best_parent_rate / div; + + return 0; +} + static struct clk_ops gateclk_ops = { .recalc_rate = socfpga_clk_recalc_rate, - .determine_rate = clk_hw_determine_rate_no_reparent, + .determine_rate = socfpga_clk_determine_rate, .get_parent = socfpga_clk_get_parent, .set_parent = socfpga_clk_set_parent, }; diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c index d5aa09e9fce4..067b918a8894 100644 --- a/drivers/clk/stm32/clk-stm32-core.c +++ b/drivers/clk/stm32/clk-stm32-core.c @@ -431,7 +431,7 @@ static int clk_stm32_composite_determine_rate(struct clk_hw *hw, { struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); const struct stm32_div_cfg *divider; - unsigned long rate; + long rate; if (composite->div_id == NO_STM32_DIV) return 0; diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 868bc7af21b0..9b2824ed785b 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = { DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"), DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"), DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"), + DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"), + DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"), + DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"), DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"), + DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"), DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"), DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"), + DT_CLK(NULL, "pad_fck", "pad_clks_ck"), DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"), DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"), DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"), diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index b4aff76eb373..74dfd5823f83 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = { DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"), DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"), DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"), + DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"), DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"), DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"), + DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"), DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"), DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"), + DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"), DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"), DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"), DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"), DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"), DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"), + DT_CLK(NULL, "pad_fck", "pad_clks_ck"), DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"), DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"), DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"), diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 05d562e9c8b1..44b19e696176 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data) enum proc_cn_mcast_op mc_op; uintptr_t val; - if (!dsk || !data) + if (!dsk || !dsk->sk_user_data || !data) return 0; ptr = (__u32 *)data; diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h index 59a4c0259456..154590e1f764 100644 --- a/drivers/crypto/virtio/virtio_crypto_common.h +++ b/drivers/crypto/virtio/virtio_crypto_common.h @@ -35,6 +35,9 @@ struct virtio_crypto { struct virtqueue *ctrl_vq; struct data_queue *data_vq; + /* Work struct for config space updates */ + struct work_struct config_work; + /* To protect the vq operations for the controlq */ spinlock_t ctrl_lock; diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 94849fa3bd74..43a0838d31ff 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto) virtcrypto_free_queues(vcrypto); } +static void vcrypto_config_changed_work(struct work_struct *work) +{ + struct virtio_crypto *vcrypto = + container_of(work, struct virtio_crypto, config_work); + + virtcrypto_update_status(vcrypto); +} + static int virtcrypto_probe(struct virtio_device *vdev) { int err = -EFAULT; @@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev) if (err) goto free_engines; + INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work); + return 0; free_engines: @@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev) dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); + flush_work(&vcrypto->config_work); if (virtcrypto_dev_started(vcrypto)) virtcrypto_dev_stop(vcrypto); virtio_reset_device(vdev); @@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev) { struct virtio_crypto *vcrypto = vdev->priv; - virtcrypto_update_status(vcrypto); + schedule_work(&vcrypto->config_work); } #ifdef CONFIG_PM_SLEEP @@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev) { struct virtio_crypto *vcrypto = vdev->priv; + flush_work(&vcrypto->config_work); virtio_reset_device(vdev); virtcrypto_free_unused_reqs(vcrypto); if (virtcrypto_dev_started(vcrypto)) diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 749868b9e80d..7edf2c95282f 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1521,6 +1521,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) if (sbp2_param_exclusive_login) { sdev->manage_system_start_stop = true; sdev->manage_runtime_start_stop = true; + sdev->manage_shutdown = true; } if (sdev->type == TYPE_ROM) diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c index 508eab346fc6..a48a58e0c61f 100644 --- a/drivers/firmware/imx/imx-dsp.c +++ b/drivers/firmware/imx/imx-dsp.c @@ -114,11 +114,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc) dsp_chan->idx = i % 2; dsp_chan->ch = mbox_request_channel_byname(cl, chan_name); if (IS_ERR(dsp_chan->ch)) { - kfree(dsp_chan->name); ret = PTR_ERR(dsp_chan->ch); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request mbox chan %s ret %d\n", chan_name, ret); + kfree(dsp_chan->name); goto out; } diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig index e4a64815f16d..d4e55204c092 100644 --- a/drivers/fpga/tests/Kconfig +++ b/drivers/fpga/tests/Kconfig @@ -1,6 +1,6 @@ config FPGA_KUNIT_TESTS - tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS - depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y + bool "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS + depends on FPGA=y && FPGA_REGION=y && FPGA_BRIDGE=y && KUNIT=y && MODULES=n default KUNIT_ALL_TESTS help This builds unit tests for the FPGA subsystem diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c index 9f9d50ee7871..baab07e3fc59 100644 --- a/drivers/fpga/tests/fpga-region-test.c +++ b/drivers/fpga/tests/fpga-region-test.c @@ -93,6 +93,8 @@ static void fpga_region_test_class_find(struct kunit *test) region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match); KUNIT_EXPECT_PTR_EQ(test, region, ctx->region); + + put_device(®ion->dev); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7d6daf8d2bfa..e036011137aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, if (unlikely(ret)) goto error; - ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base); + ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1); drm_exec_retry_on_contention(&ctx->exec); if (unlikely(ret)) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index efdb1c48f431..d93a8961274c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, } amdgpu_sync_create(&p->sync); - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | + DRM_EXEC_IGNORE_DUPLICATES); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index aac52d9754e6..76549c2cffeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -55,6 +55,10 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) return true; default: case AMDGPU_CTX_PRIORITY_UNSET: + /* UNSET priority is not valid and we don't carry that + * around, but set it to NORMAL in the only place this + * function is called, amdgpu_ctx_ioctl(). + */ return false; } } @@ -95,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) static int amdgpu_ctx_priority_permit(struct drm_file *filp, int32_t priority) { - if (!amdgpu_ctx_priority_is_valid(priority)) - return -EINVAL; - /* NORMAL and below are accessible by everyone */ if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0; @@ -632,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, return 0; } - - static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id, bool set, u32 *stable_pstate) @@ -676,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, id = args->in.ctx_id; priority = args->in.priority; - /* For backwards compatibility reasons, we need to accept - * ioctls with garbage in the priority field */ + /* For backwards compatibility, we need to accept ioctls with garbage + * in the priority field. Garbage values in the priority field, result + * in the priority being set to NORMAL. + */ if (!amdgpu_ctx_priority_is_valid(priority)) priority = AMDGPU_CTX_PRIORITY_NORMAL; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6a8494f98d3e..fe8ba9e9837b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) + if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported()) return; if (adev->flags & AMD_IS_APU || diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index ed96cfcfa304..8c929ef72c72 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; + if (!mstb) + return NULL; + if (memcmp(mstb->guid, guid, 16) == 0) return mstb; list_for_each_entry(port, &mstb->ports, next) { - if (!port->mstb) - continue; - found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); if (found_mstb) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c index 0b414eae1683..2c0f1f3e28ff 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c @@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags) * driver threads, but also with hardware/firmware agents. A dedicated * locking register is used. */ - if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) { + /* + * The steering control and semaphore registers are inside an + * "always on" power domain with respect to RC6. However there + * are some issues if higher-level platform sleep states are + * entering/exiting at the same time these registers are + * accessed. Grabbing GT forcewake and holding it over the + * entire lock/steer/unlock cycle ensures that those sleep + * states have been fully exited before we access these + * registers. This wakeref will be released in the unlock + * routine. + * + * This is expected to become a formally documented/numbered + * workaround soon. + */ + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT); + err = wait_for(intel_uncore_read_fw(gt->uncore, MTL_STEER_SEMAPHORE) == 0x1, 100); + } /* * Even on platforms with a hardware lock, we'll continue to grab @@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags) { spin_unlock_irqrestore(>->mcr_lock, flags); - if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) { intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1); + + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT); + } } /** diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 04bc1f4a1115..59e1e21df271 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report) static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report) { return !(oa_report_id(stream, report) & - stream->perf->gen8_valid_ctx_bit) && - GRAPHICS_VER(stream->perf->i915) <= 11; + stream->perf->gen8_valid_ctx_bit); } static u64 oa_timestamp(struct i915_perf_stream *stream, void *report) @@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915) perf->gen8_valid_ctx_bit = BIT(16); break; case 12: + perf->gen8_valid_ctx_bit = BIT(16); /* * Calculate offset at runtime in oa_pin_context for gen12 and * cache the value in perf->ctx_oactxctrl_offset. diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d35973b41186..7b1076b5e748 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags) { + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->closed) + goto out; + if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); i915_pmu_disable(event); + +out: event->hw.state = PERF_HES_STOPPED; } diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig index fa7a88368809..1df22a852a23 100644 --- a/drivers/gpu/drm/logicvc/Kconfig +++ b/drivers/gpu/drm/logicvc/Kconfig @@ -5,5 +5,7 @@ config DRM_LOGICVC select DRM_KMS_HELPER select DRM_KMS_DMA_HELPER select DRM_GEM_DMA_HELPER + select REGMAP + select REGMAP_MMIO help DRM display driver for the logiCVC programmable logic block from Xylon diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 5a416b39b818..28e2a5fc4528 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -749,6 +749,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); + + bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; } static int aspeed_i2c_reg_slave(struct i2c_client *client) @@ -765,7 +767,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client) __aspeed_i2c_reg_slave(bus, client->addr); bus->slave = client; - bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; spin_unlock_irqrestore(&bus->lock, flags); return 0; diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 579b30581725..0d3c9a041b56 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -1059,9 +1059,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev, /* Configure PEC */ if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) { cr1 |= STM32F7_I2C_CR1_PECEN; - cr2 |= STM32F7_I2C_CR2_PECBYTE; - if (!f7_msg->read_write) + if (!f7_msg->read_write) { + cr2 |= STM32F7_I2C_CR2_PECBYTE; f7_msg->count++; + } } else { cr1 &= ~STM32F7_I2C_CR1_PECEN; cr2 &= ~STM32F7_I2C_CR2_PECBYTE; @@ -1149,8 +1150,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev) f7_msg->stop = true; /* Add one byte for PEC if needed */ - if (cr1 & STM32F7_I2C_CR1_PECEN) + if (cr1 & STM32F7_I2C_CR1_PECEN) { + cr2 |= STM32F7_I2C_CR2_PECBYTE; f7_msg->count++; + } /* Set number of bytes to be transferred */ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK); diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 22f2280eab7f..9f2e4aa28159 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne if (ret) goto err; - adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); + adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np); if (!adap) { ret = -ENODEV; goto err_with_revert; diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c index baccf4bfaf02..8305661e1253 100644 --- a/drivers/i2c/muxes/i2c-mux-gpmux.c +++ b/drivers/i2c/muxes/i2c-mux-gpmux.c @@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev) dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } - parent = of_find_i2c_adapter_by_node(parent_np); + parent = of_get_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 18236b9fa14a..6ebca7bfd8a2 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev) dev_err(dev, "Cannot parse i2c-parent\n"); return ERR_PTR(-ENODEV); } - parent = of_find_i2c_adapter_by_node(parent_np); + parent = of_get_i2c_adapter_by_node(parent_np); of_node_put(parent_np); if (!parent) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index cff1ba57fb16..43c8af41b4a9 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -826,16 +826,26 @@ static int exynos_adc_probe(struct platform_device *pdev) } } + /* leave out any TS related code if unreachable */ + if (IS_REACHABLE(CONFIG_INPUT)) { + has_ts = of_property_read_bool(pdev->dev.of_node, + "has-touchscreen") || pdata; + } + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; info->irq = irq; - irq = platform_get_irq(pdev, 1); - if (irq == -EPROBE_DEFER) - return irq; + if (has_ts) { + irq = platform_get_irq(pdev, 1); + if (irq == -EPROBE_DEFER) + return irq; - info->tsirq = irq; + info->tsirq = irq; + } else { + info->tsirq = -1; + } info->dev = &pdev->dev; @@ -900,12 +910,6 @@ static int exynos_adc_probe(struct platform_device *pdev) if (info->data->init_hw) info->data->init_hw(info); - /* leave out any TS related code if unreachable */ - if (IS_REACHABLE(CONFIG_INPUT)) { - has_ts = of_property_read_bool(pdev->dev.of_node, - "has-touchscreen") || pdata; - } - if (pdata) info->delay = pdata->delay; else diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index dba73300f894..564c0cad0fc7 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c @@ -456,6 +456,9 @@ static const struct xadc_ops xadc_zynq_ops = { .interrupt_handler = xadc_zynq_interrupt_handler, .update_alarm = xadc_zynq_update_alarm, .type = XADC_TYPE_S7, + /* Temp in C = (val * 503.975) / 2**bits - 273.15 */ + .temp_scale = 503975, + .temp_offset = 273150, }; static const unsigned int xadc_axi_reg_offsets[] = { @@ -566,6 +569,9 @@ static const struct xadc_ops xadc_7s_axi_ops = { .interrupt_handler = xadc_axi_interrupt_handler, .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL, .type = XADC_TYPE_S7, + /* Temp in C = (val * 503.975) / 2**bits - 273.15 */ + .temp_scale = 503975, + .temp_offset = 273150, }; static const struct xadc_ops xadc_us_axi_ops = { @@ -577,6 +583,12 @@ static const struct xadc_ops xadc_us_axi_ops = { .interrupt_handler = xadc_axi_interrupt_handler, .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL, .type = XADC_TYPE_US, + /** + * Values below are for UltraScale+ (SYSMONE4) using internal reference. + * See https://docs.xilinx.com/v/u/en-US/ug580-ultrascale-sysmon + */ + .temp_scale = 509314, + .temp_offset = 280231, }; static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg, @@ -945,8 +957,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, *val2 = bits; return IIO_VAL_FRACTIONAL_LOG2; case IIO_TEMP: - /* Temp in C = (val * 503.975) / 2**bits - 273.15 */ - *val = 503975; + *val = xadc->ops->temp_scale; *val2 = bits; return IIO_VAL_FRACTIONAL_LOG2; default: @@ -954,7 +965,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, } case IIO_CHAN_INFO_OFFSET: /* Only the temperature channel has an offset */ - *val = -((273150 << bits) / 503975); + *val = -((xadc->ops->temp_offset << bits) / xadc->ops->temp_scale); return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: ret = xadc_read_samplerate(xadc); @@ -1423,28 +1434,6 @@ static int xadc_probe(struct platform_device *pdev) if (ret) return ret; - /* Disable all alarms */ - ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK, - XADC_CONF1_ALARM_MASK); - if (ret) - return ret; - - /* Set thresholds to min/max */ - for (i = 0; i < 16; i++) { - /* - * Set max voltage threshold and both temperature thresholds to - * 0xffff, min voltage threshold to 0. - */ - if (i % 8 < 4 || i == 7) - xadc->threshold[i] = 0xffff; - else - xadc->threshold[i] = 0; - ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i), - xadc->threshold[i]); - if (ret) - return ret; - } - /* Go to non-buffered mode */ xadc_postdisable(indio_dev); diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h index 7d78ce698967..3036f4d613ff 100644 --- a/drivers/iio/adc/xilinx-xadc.h +++ b/drivers/iio/adc/xilinx-xadc.h @@ -85,6 +85,8 @@ struct xadc_ops { unsigned int flags; enum xadc_type type; + int temp_scale; + int temp_offset; }; static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg, diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index 1f280c360701..56e5913ab82d 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -214,8 +214,18 @@ static int rescale_read_raw(struct iio_dev *indio_dev, return ret < 0 ? ret : -EOPNOTSUPP; } - ret = iio_read_channel_scale(rescale->source, &scale, &scale2); - return rescale_process_offset(rescale, ret, scale, scale2, + if (iio_channel_has_info(rescale->source->channel, + IIO_CHAN_INFO_SCALE)) { + ret = iio_read_channel_scale(rescale->source, &scale, &scale2); + return rescale_process_offset(rescale, ret, scale, scale2, + schan_off, val, val2); + } + + /* + * If we get here we have no scale so scale 1:1 but apply + * rescaler and offset, if any. + */ + return rescale_process_offset(rescale, IIO_VAL_FRACTIONAL, 1, 1, schan_off, val, val2); default: return -EINVAL; @@ -280,8 +290,9 @@ static int rescale_configure_channel(struct device *dev, chan->type = rescale->cfg->type; if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) && - iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) { - dev_info(dev, "using raw+scale source channel\n"); + (iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE) || + iio_channel_has_info(schan, IIO_CHAN_INFO_OFFSET))) { + dev_info(dev, "using raw+scale/offset source channel\n"); } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) { dev_info(dev, "using processed channel\n"); rescale->chan_processed = true; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3bfc56df4f78..c146378c7d03 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1108,7 +1108,8 @@ map_end: } - iommu_flush_iotlb_all(domain); + if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) + iommu_flush_iotlb_all(domain); out: iommu_put_resv_regions(dev, &mappings); diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 1efd17979f24..b82b89888a5e 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -678,7 +678,7 @@ ph_state(struct dchannel *dch) } /* - * disable/enable BChannel for desired protocoll + * disable/enable BChannel for desired protocol */ static int hfcsusb_setup_bch(struct bchannel *bch, int protocol) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index a66b7c111cd5..1c6c62a7f7f5 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -958,6 +958,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) if (err) return err; + memset(ctx->buf->virt, 0, pkt_size); rpra = ctx->buf->virt; list = fastrpc_invoke_buf_start(rpra, ctx->nscalars); pages = fastrpc_phy_page_start(list, ctx->nscalars); @@ -1090,6 +1091,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, } } + /* Clean up fdlist which is updated by DSP */ for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { if (!fdlist[i]) break; @@ -1156,11 +1158,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (ctx->nscalars) { - err = fastrpc_get_args(kernel, ctx); - if (err) - goto bail; - } + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; /* make sure that all CPU memory writes are seen by DSP */ dma_wmb(); @@ -1179,20 +1179,18 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, if (err) goto bail; + /* make sure that all memory writes by DSP are seen by CPU */ + dma_rmb(); + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + /* Check the response from remote dsp */ err = ctx->retval; if (err) goto bail; - if (ctx->nscalars) { - /* make sure that all memory writes by DSP are seen by CPU */ - dma_rmb(); - /* populate all the output buffers with results */ - err = fastrpc_put_args(ctx, kernel); - if (err) - goto bail; - } - bail: if (err != -ERESTARTSYS && err != -ETIMEDOUT) { /* We are done with this compute context */ @@ -1983,11 +1981,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0); err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]); - fastrpc_map_put(map); - if (err) + if (err) { dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr); + return err; + } + fastrpc_map_put(map); - return err; + return 0; } static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp) diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index ca66b747b7c5..d7c274af6d4d 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) { struct adin1110_priv *priv = port_priv->priv; u32 header_len = ADIN1110_RD_HEADER_LEN; - struct spi_transfer t; + struct spi_transfer t = {0}; u32 frame_size_no_fcs; struct sk_buff *rxb; u32 frame_size; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4d4140b7c450..f3305c434c95 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -2170,7 +2170,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev) static struct platform_driver xgene_enet_driver = { .driver = { .name = "xgene-enet", - .of_match_table = of_match_ptr(xgene_enet_of_match), + .of_match_table = xgene_enet_of_match, .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match), }, .probe = xgene_enet_probe, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8d719f82854a..76de55306c4d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win, FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 30000); + if (ret) + return ret; /* If we have version number support, then check to see that the new * firmware got loaded properly. diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 6e310a539467..55bb0b5310d5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -580,7 +580,6 @@ struct i40e_pf { #define I40E_FLAG_DISABLE_FW_LLDP BIT(24) #define I40E_FLAG_RS_FEC BIT(25) #define I40E_FLAG_BASE_R_FEC BIT(26) -#define I40E_FLAG_VF_VLAN_PRUNING BIT(27) /* TOTAL_PORT_SHUTDOWN * Allows to physically disable the link on the NIC's port. * If enabled, (after link down request from the OS) @@ -603,6 +602,7 @@ struct i40e_pf { * in abilities field of i40e_aq_set_phy_config structure */ #define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27) +#define I40E_FLAG_VF_VLAN_PRUNING BIT(28) struct i40e_client_instance *cinst; bool stat_offsets_loaded; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0b3a27f118fb..b047c587629b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget, rx_buffer = i40e_rx_bi(rx_ring, ntp); i40e_inc_ntp(rx_ring); i40e_reuse_rx_page(rx_ring, rx_buffer); - cleaned_count++; + /* Update ntc and bump cleaned count if not in the + * middle of mb packet. + */ + if (rx_ring->next_to_clean == ntp) { + rx_ring->next_to_clean = + rx_ring->next_to_process; + cleaned_count++; + } continue; } @@ -2847,7 +2854,7 @@ tx_only: return budget; } - if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; /* Exit the polling mode, but don't re-enable interrupts if stack might diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 37f41c8a682f..7d991e4d9b89 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -437,12 +437,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 next_to_process = rx_ring->next_to_process; u16 next_to_clean = rx_ring->next_to_clean; - u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; struct xdp_buff *first = NULL; + u32 count = rx_ring->count; struct bpf_prog *xdp_prog; + u32 entries_to_alloc; bool failure = false; - u16 cleaned_count; if (next_to_process != next_to_clean) first = *i40e_rx_bi(rx_ring, next_to_clean); @@ -475,7 +475,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) qword); bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_free(bi); - next_to_process = (next_to_process + 1) & count_mask; + if (++next_to_process == count) + next_to_process = 0; continue; } @@ -493,7 +494,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) else if (i40e_add_xsk_frag(rx_ring, first, bi, size)) break; - next_to_process = (next_to_process + 1) & count_mask; + if (++next_to_process == count) + next_to_process = 0; if (i40e_is_non_eop(rx_ring, rx_desc)) continue; @@ -513,10 +515,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) rx_ring->next_to_clean = next_to_clean; rx_ring->next_to_process = next_to_process; - cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; - if (cleaned_count >= I40E_RX_BUFFER_WRITE) - failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); + entries_to_alloc = I40E_DESC_UNUSED(rx_ring); + if (entries_to_alloc >= I40E_RX_BUFFER_WRITE) + failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc); i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); @@ -752,14 +754,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) { - u16 count_mask = rx_ring->count - 1; u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; - for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { + while (ntc != ntu) { struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); xsk_buff_free(rx_bi); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; } } diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 6a2e6d64bc3a..b3434dbc90d6 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; if (!list_empty(&adapter->adv_rss_list_head)) adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; - adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } + adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } @@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->finish_config, iavf_finish_config); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - queue_delayed_work(adapter->wq, &adapter->watchdog_task, - msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); @@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the wait queue for indicating virtchannel events */ init_waitqueue_head(&adapter->vc_waitqueue); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + /* Initialization goes on in the work. Do not add more of it below. */ return 0; err_ioremap: diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 319ed601eaa1..4ee849985e2b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if (err) goto err_out_w_lock; - igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + if (err) + goto err_out_input_filter; spin_unlock(&adapter->nfc_lock); return 0; +err_out_input_filter: + igb_erase_filter(adapter, input); err_out_w_lock: spin_unlock(&adapter->nfc_lock); err_out: diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 7ab6dd58e400..dd8a9d27a167 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev, struct igc_adapter *adapter = netdev_priv(netdev); struct net_device *dev = adapter->netdev; struct igc_hw *hw = &adapter->hw; - u32 advertising; + u16 advertised = 0; /* When adapter in resetting mode, autoneg/speed/duplex * cannot be changed @@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev, while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) usleep_range(1000, 2000); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. - * We have to check this and convert it to ADVERTISE_2500_FULL - * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. - */ - if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) - advertising |= ADVERTISE_2500_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 2500baseT_Full)) + advertised |= ADVERTISE_2500_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) + advertised |= ADVERTISE_1000_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) + advertised |= ADVERTISE_100_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Half)) + advertised |= ADVERTISE_100_HALF; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) + advertised |= ADVERTISE_10_FULL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Half)) + advertised |= ADVERTISE_10_HALF; if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; - hw->phy.autoneg_advertised = advertising; + hw->phy.autoneg_advertised = advertised; if (adapter->fc_autoneg) hw->fc.requested_mode = igc_fc_default; } else { diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 6351a2dc13bc..361b90007148 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, unsigned int entry = dirty_tx % NUM_TX_DESC; u32 status; - status = le32_to_cpu(tp->TxDescArray[entry].opts1); + status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1)); if (status & DescOwn) break; @@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * If skb is NULL then we come here again once a tx irq is * triggered after the last fragment is marked transmitted. */ - if (tp->cur_tx != dirty_tx && skb) + if (READ_ONCE(tp->cur_tx) != dirty_tx && skb) rtl8169_doorbell(tp); } } @@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget dma_addr_t addr; u32 status; - status = le32_to_cpu(desc->opts1); + status = le32_to_cpu(READ_ONCE(desc->opts1)); if (status & DescOwn) break; diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 834f000ba1c4..30ebef88248d 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx, } if (child_ip_tos_mask != old->child_ip_tos_mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x", + "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x", child_ip_tos_mask, old->child_ip_tos_mask); return -EEXIST; } if (child_udp_sport_mask != old->child_udp_sport_mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x", + "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x", child_udp_sport_mask, old->child_udp_sport_mask); return -EEXIST; @@ -1081,7 +1081,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act, /* check that we do not decrement ttl twice */ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DEC_TTL)) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl"); + NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported"); return -EOPNOTSUPP; } act->do_ttl_dec = 1; @@ -1106,7 +1106,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act, /* check that we do not decrement hoplimit twice */ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DEC_TTL)) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl"); + NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported"); return -EOPNOTSUPP; } act->do_ttl_dec = 1; @@ -1120,7 +1120,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act, } NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: ttl add action type %x %x %x/%x", + "ttl add action type %x %x %x/%x is not supported", fa->mangle.htype, fa->mangle.offset, fa->mangle.val, fa->mangle.mask); return -EOPNOTSUPP; @@ -1164,7 +1164,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, case 0: if (fa->mangle.mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: mask (%#x) of eth.dst32 mangle", + "mask (%#x) of eth.dst32 mangle is not supported", fa->mangle.mask); return -EOPNOTSUPP; } @@ -1184,7 +1184,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, mung->dst_mac_16 = 1; } else { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b", + "mask (%#x) of eth+4 mangle is not high or low 16b", fa->mangle.mask); return -EOPNOTSUPP; } @@ -1192,7 +1192,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, case 8: if (fa->mangle.mask) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: mask (%#x) of eth.src32 mangle", + "mask (%#x) of eth.src32 mangle is not supported", fa->mangle.mask); return -EOPNOTSUPP; } @@ -1201,7 +1201,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, mung->src_mac_32 = 1; return efx_tc_complete_mac_mangle(efx, act, mung, extack); default: - NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x", + NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported", fa->mangle.offset, fa->mangle.val, fa->mangle.mask); return -EOPNOTSUPP; } @@ -1217,7 +1217,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, /* check that pedit applies to ttl only */ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl", + "mask (%#x) out of range, only support mangle action on ipv4.ttl", fa->mangle.mask); return -EOPNOTSUPP; } @@ -1227,7 +1227,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, */ if (match->mask.ip_ttl != U8_MAX) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)", + "only support mangle ttl when we have an exact match, current mask (%#x)", match->mask.ip_ttl); return -EOPNOTSUPP; } @@ -1237,7 +1237,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, */ if (match->value.ip_ttl == 0) { NL_SET_ERR_MSG_MOD(extack, - "Unsupported: we cannot decrement ttl past 0"); + "decrement ttl past 0 is not supported"); return -EOPNOTSUPP; } @@ -1245,7 +1245,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DEC_TTL)) { NL_SET_ERR_MSG_MOD(extack, - "Unsupported: multiple dec ttl"); + "multiple dec ttl is not supported"); return -EOPNOTSUPP; } @@ -1259,7 +1259,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, fallthrough; default: NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: only support mangle on the ttl field (offset is %u)", + "only support mangle on the ttl field (offset is %u)", fa->mangle.offset); return -EOPNOTSUPP; } @@ -1275,7 +1275,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, /* check that pedit applies to ttl only */ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit", + "mask (%#x) out of range, only support mangle action on ipv6.hop_limit", fa->mangle.mask); return -EOPNOTSUPP; @@ -1286,7 +1286,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, */ if (match->mask.ip_ttl != U8_MAX) { NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)", + "only support hop_limit when we have an exact match, current mask (%#x)", match->mask.ip_ttl); return -EOPNOTSUPP; } @@ -1296,7 +1296,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, */ if (match->value.ip_ttl == 0) { NL_SET_ERR_MSG_MOD(extack, - "Unsupported: we cannot decrement hop_limit past 0"); + "decrementing hop_limit past 0 is not supported"); return -EOPNOTSUPP; } @@ -1304,7 +1304,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DEC_TTL)) { NL_SET_ERR_MSG_MOD(extack, - "Unsupported: multiple dec ttl"); + "multiple dec ttl is not supported"); return -EOPNOTSUPP; } @@ -1318,7 +1318,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act, fallthrough; default: NL_SET_ERR_MSG_FMT_MOD(extack, - "Unsupported: only support mangle on the hop_limit field"); + "only support mangle on the hop_limit field"); return -EOPNOTSUPP; } default: diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ed1a5a31a491..5801f4d50f95 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev) return ret; } +static void stmmac_set_half_duplex(struct stmmac_priv *priv) +{ + /* Half-Duplex can only work with single tx queue */ + if (priv->plat->tx_queues_to_use > 1) + priv->phylink_config.mac_capabilities &= + ~(MAC_10HD | MAC_100HD | MAC_1000HD); + else + priv->phylink_config.mac_capabilities |= + (MAC_10HD | MAC_100HD | MAC_1000HD); +} + static int stmmac_phy_setup(struct stmmac_priv *priv) { struct stmmac_mdio_bus_data *mdio_bus_data; @@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) MAC_10FD | MAC_100FD | MAC_1000FD; - /* Half-Duplex can only work with single queue */ - if (priv->plat->tx_queues_to_use <= 1) - priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD | - MAC_1000HD; + stmmac_set_half_duplex(priv); /* Get the MAC specific capabilities */ stmmac_mac_phylink_get_caps(priv); @@ -7172,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); + stmmac_set_half_duplex(priv); stmmac_napi_add(dev); if (netif_running(dev)) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index dc14a66583ff..44488c153ea2 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev, key_index = wl->current_key; if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { - /* reques to change default key index */ + /* request to change default key index */ pr_debug("%s: request to change default key to %d\n", __func__, key_index); wl->current_key = key_index; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 144ec626230d..b22596b18ee8 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && - mtu < ntohs(iph->tot_len)) { + if (iph->frag_off & htons(IP_DF) && + ((!skb_is_gso(skb) && skb->len > mtu) || + (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) { netdev_dbg(dev, "packet too big, fragmentation needed\n"); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index a03490ba2e5b..cc7ddc40020f 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset) static void adf7242_debugfs_init(struct adf7242_local *lp) { - char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-"; + char debugfs_dir_name[DNAME_INLINE_LEN + 1]; - strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN); + snprintf(debugfs_dir_name, sizeof(debugfs_dir_name), + "adf7242-%s", dev_name(&lp->spi->dev)); lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0c13d9950cd8..afb20c0ed688 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -764,7 +764,7 @@ enum rtl_register_content { /* rtl8152 flags */ enum rtl8152_flags { - RTL8152_UNPLUG = 0, + RTL8152_INACCESSIBLE = 0, RTL8152_SET_RX_MODE, WORK_ENABLE, RTL8152_LINK_CHG, @@ -773,6 +773,9 @@ enum rtl8152_flags { SCHEDULE_TASKLET, GREEN_ETHERNET, RX_EPROTO, + IN_PRE_RESET, + PROBED_WITH_NO_ERRORS, + PROBE_SHOULD_RETRY, }; #define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e @@ -953,6 +956,8 @@ struct r8152 { u8 version; u8 duplex; u8 autoneg; + + unsigned int reg_access_reset_count; }; /** @@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384; #define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc)) +/* If register access fails then we block access and issue a reset. If this + * happens too many times in a row without a successful access then we stop + * trying to reset and just leave access blocked. + */ +#define REGISTER_ACCESS_MAX_RESETS 3 + +static void rtl_set_inaccessible(struct r8152 *tp) +{ + set_bit(RTL8152_INACCESSIBLE, &tp->flags); + smp_mb__after_atomic(); +} + +static void rtl_set_accessible(struct r8152 *tp) +{ + clear_bit(RTL8152_INACCESSIBLE, &tp->flags); + smp_mb__after_atomic(); +} + +static +int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request, + __u8 requesttype, __u16 value, __u16 index, void *data, + __u16 size, const char *msg_tag) +{ + struct usb_device *udev = tp->udev; + int ret; + + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return -ENODEV; + + ret = usb_control_msg(udev, pipe, request, requesttype, + value, index, data, size, + USB_CTRL_GET_TIMEOUT); + + /* No need to issue a reset to report an error if the USB device got + * unplugged; just return immediately. + */ + if (ret == -ENODEV) + return ret; + + /* If the write was successful then we're done */ + if (ret >= 0) { + tp->reg_access_reset_count = 0; + return ret; + } + + dev_err(&udev->dev, + "Failed to %s %d bytes at %#06x/%#06x (%d)\n", + msg_tag, size, value, index, ret); + + /* Block all future register access until we reset. Much of the code + * in the driver doesn't check for errors. Notably, many parts of the + * driver do a read/modify/write of a register value without + * confirming that the read succeeded. Writing back modified garbage + * like this can fully wedge the adapter, requiring a power cycle. + */ + rtl_set_inaccessible(tp); + + /* If probe hasn't yet finished, then we'll request a retry of the + * whole probe routine if we get any control transfer errors. We + * never have to clear this bit since we free/reallocate the whole "tp" + * structure if we retry probe. + */ + if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) { + set_bit(PROBE_SHOULD_RETRY, &tp->flags); + return ret; + } + + /* Failing to access registers in pre-reset is not surprising since we + * wouldn't be resetting if things were behaving normally. The register + * access we do in pre-reset isn't truly mandatory--we're just reusing + * the disable() function and trying to be nice by powering the + * adapter down before resetting it. Thus, if we're in pre-reset, + * we'll return right away and not try to queue up yet another reset. + * We know the post-reset is already coming. + */ + if (test_bit(IN_PRE_RESET, &tp->flags)) + return ret; + + if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) { + usb_queue_reset_device(tp->intf); + tp->reg_access_reset_count++; + } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) { + dev_err(&udev->dev, + "Tried to reset %d times; giving up.\n", + REGISTER_ACCESS_MAX_RESETS); + } + + return ret; +} + static int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) { @@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) if (!tmp) return -ENOMEM; - ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in, - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - value, index, tmp, size, 500); + ret = r8152_control_msg(tp, tp->pipe_ctrl_in, + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, tmp, size, "read"); + if (ret < 0) memset(data, 0xff, size); else @@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) if (!tmp) return -ENOMEM; - ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out, - RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, - value, index, tmp, size, 500); + ret = r8152_control_msg(tp, tp->pipe_ctrl_out, + RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, + value, index, tmp, size, "write"); kfree(tmp); @@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) static void rtl_set_unplug(struct r8152 *tp) { - if (tp->udev->state == USB_STATE_NOTATTACHED) { - set_bit(RTL8152_UNPLUG, &tp->flags); - smp_mb__after_atomic(); - } + if (tp->udev->state == USB_STATE_NOTATTACHED) + rtl_set_inaccessible(tp); } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, @@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, u16 limit = 64; int ret = 0; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ @@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 byteen_start, byteen_end, byen; u16 limit = 512; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; /* both size and indix must be 4 bytes align */ @@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg) struct r8152 *tp = netdev_priv(netdev); int ret; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; if (phy_id != R8152_PHY_ID) @@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) { struct r8152 *tp = netdev_priv(netdev); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (phy_id != R8152_PHY_ID) @@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb) if (!tp) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) @@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb) if (!test_bit(WORK_ENABLE, &tp->flags)) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!skb_queue_empty(&tp->tx_queue)) @@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb) if (!test_bit(WORK_ENABLE, &tp->flags)) return; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; switch (status) { @@ -2615,7 +2709,7 @@ static void bottom_half(struct tasklet_struct *t) { struct r8152 *tp = from_tasklet(tp, t, tx_tl); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (!test_bit(WORK_ENABLE, &tp->flags)) @@ -2658,7 +2752,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) int ret; /* The rx would be stopped, so skip submitting */ - if (test_bit(RTL8152_UNPLUG, &tp->flags) || + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev)) return 0; @@ -3058,7 +3152,7 @@ static int rtl_enable(struct r8152 *tp) static int rtl8152_enable(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -3145,7 +3239,7 @@ static int rtl8153_enable(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -3177,7 +3271,7 @@ static void rtl_disable(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -3631,7 +3725,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired) } msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -3663,6 +3757,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable) int i; for (i = 0; i < 500; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; @@ -3703,6 +3799,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable) int i; for (i = 0; i < 500; i++) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return; if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) & AUTOLOAD_DONE) break; @@ -4046,6 +4144,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait) for (i = 0; wait && i < 5000; i++) { u32 ocp_data; + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + return -ENODEV; + usleep_range(1000, 2000); ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT); if ((ocp_data & PATCH_READY) ^ check) @@ -6002,7 +6103,7 @@ static int rtl8156_enable(struct r8152 *tp) u32 ocp_data; u16 speed; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; r8156_fc_parameter(tp); @@ -6060,7 +6161,7 @@ static int rtl8156b_enable(struct r8152 *tp) u32 ocp_data; u16 speed; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; set_tx_qlen(tp); @@ -6246,7 +6347,7 @@ out: static void rtl8152_up(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8152_aldps_en(tp, false); @@ -6256,7 +6357,7 @@ static void rtl8152_up(struct r8152 *tp) static void rtl8152_down(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6271,7 +6372,7 @@ static void rtl8153_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); @@ -6311,7 +6412,7 @@ static void rtl8153_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6332,7 +6433,7 @@ static void rtl8153b_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6356,7 +6457,7 @@ static void rtl8153b_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6393,7 +6494,7 @@ static void rtl8153c_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6474,7 +6575,7 @@ static void rtl8156_up(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -6547,7 +6648,7 @@ static void rtl8156_down(struct r8152 *tp) { u32 ocp_data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); return; } @@ -6685,7 +6786,7 @@ static void rtl_work_func_t(struct work_struct *work) /* If the device is unplugged or !netif_running(), the workqueue * doesn't need to wake the device, and could return directly. */ - if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev)) return; if (usb_autopm_get_interface(tp->intf) < 0) @@ -6724,7 +6825,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work) { struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (usb_autopm_get_interface(tp->intf) < 0) @@ -6851,7 +6952,7 @@ static int rtl8152_close(struct net_device *netdev) netif_stop_queue(netdev); res = usb_autopm_get_interface(tp->intf); - if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) { + if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) { rtl_drop_queued_tx(tp); rtl_stop_rx(tp); } else { @@ -6884,7 +6985,7 @@ static void r8152b_init(struct r8152 *tp) u32 ocp_data; u16 data; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; data = r8152_mdio_read(tp, MII_BMCR); @@ -6928,7 +7029,7 @@ static void r8153_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_u1u2en(tp, false); @@ -6939,7 +7040,7 @@ static void r8153_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -7068,7 +7169,7 @@ static void r8153b_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -7079,7 +7180,7 @@ static void r8153b_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) break; } @@ -7150,7 +7251,7 @@ static void r8153c_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_u1u2en(tp, false); @@ -7170,7 +7271,7 @@ static void r8153c_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -7999,7 +8100,7 @@ static void r8156_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); @@ -8020,7 +8121,7 @@ static void r8156_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -8095,7 +8196,7 @@ static void r8156b_init(struct r8152 *tp) u16 data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP); @@ -8129,7 +8230,7 @@ static void r8156b_init(struct r8152 *tp) break; msleep(20); - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; } @@ -8255,7 +8356,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf) struct r8152 *tp = usb_get_intfdata(intf); struct net_device *netdev; - if (!tp) + if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; netdev = tp->netdev; @@ -8270,7 +8371,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf) napi_disable(&tp->napi); if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); + set_bit(IN_PRE_RESET, &tp->flags); tp->rtl_ops.disable(tp); + clear_bit(IN_PRE_RESET, &tp->flags); mutex_unlock(&tp->control); } @@ -8283,9 +8386,11 @@ static int rtl8152_post_reset(struct usb_interface *intf) struct net_device *netdev; struct sockaddr sa; - if (!tp) + if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) return 0; + rtl_set_accessible(tp); + /* reset the MAC address in case of policy change */ if (determine_ethernet_addr(tp, &sa) >= 0) { rtnl_lock(); @@ -9158,7 +9263,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) struct mii_ioctl_data *data = if_mii(rq); int res; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return -ENODEV; res = usb_autopm_get_interface(tp->intf); @@ -9260,7 +9365,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { static void rtl8152_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; if (tp->version != RTL_VER_01) @@ -9269,7 +9374,7 @@ static void rtl8152_unload(struct r8152 *tp) static void rtl8153_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153_power_cut_en(tp, false); @@ -9277,7 +9382,7 @@ static void rtl8153_unload(struct r8152 *tp) static void rtl8153b_unload(struct r8152 *tp) { - if (test_bit(RTL8152_UNPLUG, &tp->flags)) + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) return; r8153b_power_cut_en(tp, false); @@ -9487,16 +9592,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev) __le32 *tmp; u8 version; int ret; + int i; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return 0; - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); - if (ret > 0) - ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; + /* Retry up to 3 times in case there is a transitory error. We do this + * since retrying a read of the version is always safe and this + * function doesn't take advantage of r8152_control_msg(). + */ + for (i = 0; i < 3; i++) { + ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), + USB_CTRL_GET_TIMEOUT); + if (ret > 0) { + ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK; + break; + } + } + + if (i != 0 && ret > 0) + dev_warn(&udev->dev, "Needed %d retries to read version\n", i); kfree(tmp); @@ -9595,25 +9713,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) return 0; } -static int rtl8152_probe(struct usb_interface *intf, - const struct usb_device_id *id) +static int rtl8152_probe_once(struct usb_interface *intf, + const struct usb_device_id *id, u8 version) { struct usb_device *udev = interface_to_usbdev(intf); struct r8152 *tp; struct net_device *netdev; - u8 version; int ret; - if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) - return -ENODEV; - - if (!rtl_check_vendor_ok(intf)) - return -ENODEV; - - version = rtl8152_get_version(intf); - if (version == RTL_VER_UNKNOWN) - return -ENODEV; - usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { @@ -9776,18 +9883,68 @@ static int rtl8152_probe(struct usb_interface *intf, else device_set_wakeup_enable(&udev->dev, false); + /* If we saw a control transfer error while probing then we may + * want to try probe() again. Consider this an error. + */ + if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) + goto out2; + + set_bit(PROBED_WITH_NO_ERRORS, &tp->flags); netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION); return 0; +out2: + unregister_netdev(netdev); + out1: tasklet_kill(&tp->tx_tl); + cancel_delayed_work_sync(&tp->hw_phy_work); + if (tp->rtl_ops.unload) + tp->rtl_ops.unload(tp); + rtl8152_release_firmware(tp); usb_set_intfdata(intf, NULL); out: + if (test_bit(PROBE_SHOULD_RETRY, &tp->flags)) + ret = -EAGAIN; + free_netdev(netdev); return ret; } +#define RTL8152_PROBE_TRIES 3 + +static int rtl8152_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + u8 version; + int ret; + int i; + + if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) + return -ENODEV; + + if (!rtl_check_vendor_ok(intf)) + return -ENODEV; + + version = rtl8152_get_version(intf); + if (version == RTL_VER_UNKNOWN) + return -ENODEV; + + for (i = 0; i < RTL8152_PROBE_TRIES; i++) { + ret = rtl8152_probe_once(intf, id, version); + if (ret != -EAGAIN) + break; + } + if (ret == -EAGAIN) { + dev_err(&intf->dev, + "r8152 failed probe after %d tries; giving up\n", i); + return -ENODEV; + } + + return ret; +} + static void rtl8152_disconnect(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 17da42fe605c..a530f20ee257 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (ret < 0) { + if (ret < 4) { + ret = ret < 0 ? ret : -ENODATA; + if (ret != -ENODEV) netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index a223d9537f22..e8b6f194925d 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -498,7 +498,7 @@ static const struct ocotp_params imx6sl_params = { }; static const struct ocotp_params imx6sll_params = { - .nregs = 128, + .nregs = 80, .bank_address_words = 0, .set_timing = imx_ocotp_set_imx6_timing, .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, @@ -512,14 +512,14 @@ static const struct ocotp_params imx6sx_params = { }; static const struct ocotp_params imx6ul_params = { - .nregs = 128, + .nregs = 144, .bank_address_words = 0, .set_timing = imx_ocotp_set_imx6_timing, .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, }; static const struct ocotp_params imx6ull_params = { - .nregs = 64, + .nregs = 80, .bank_address_words = 0, .set_timing = imx_ocotp_set_imx6_timing, .ctrl = IMX_OCOTP_BM_CTRL_DEFAULT, diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index ad702463a65d..6bbffb081053 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -111,6 +111,79 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), } }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */ + { + .ident = "V14 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82YT"), + } + }, + { + .ident = "V14 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83GE"), + } + }, + { + .ident = "V15 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82YU"), + } + }, + { + .ident = "V15 G4 AMN", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"), + } + }, + { + .ident = "IdeaPad 1 14AMN7", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82VF"), + } + }, + { + .ident = "IdeaPad 1 15AMN7", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82VG"), + } + }, + { + .ident = "IdeaPad 1 15AMN7", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82X5"), + } + }, + { + .ident = "IdeaPad Slim 3 14AMN8", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82XN"), + } + }, + { + .ident = "IdeaPad Slim 3 15AMN8", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"), + } + }, /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ { .ident = "HP Laptop 15s-eq2xxx", diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 83b6a3f3863b..6effa13039f3 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -209,7 +209,8 @@ manage_start_stop_show(struct device *dev, return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop && - sdp->manage_runtime_start_stop); + sdp->manage_runtime_start_stop && + sdp->manage_shutdown); } static DEVICE_ATTR_RO(manage_start_stop); @@ -275,6 +276,35 @@ manage_runtime_start_stop_store(struct device *dev, } static DEVICE_ATTR_RW(manage_runtime_start_stop); +static ssize_t manage_shutdown_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return sysfs_emit(buf, "%u\n", sdp->manage_shutdown); +} + +static ssize_t manage_shutdown_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + bool v; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + sdp->manage_shutdown = v; + + return count; +} +static DEVICE_ATTR_RW(manage_shutdown); + static ssize_t allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -607,6 +637,7 @@ static struct attribute *sd_disk_attrs[] = { &dev_attr_manage_start_stop.attr, &dev_attr_manage_system_start_stop.attr, &dev_attr_manage_runtime_start_stop.attr, + &dev_attr_manage_shutdown.attr, &dev_attr_protection_type.attr, &dev_attr_protection_mode.attr, &dev_attr_app_tag_own.attr, @@ -3819,8 +3850,10 @@ static void sd_shutdown(struct device *dev) sd_sync_cache(sdkp, NULL); } - if (system_state != SYSTEM_RESTART && - sdkp->device->manage_system_start_stop) { + if ((system_state != SYSTEM_RESTART && + sdkp->device->manage_system_start_stop) || + (system_state == SYSTEM_POWER_OFF && + sdkp->device->manage_shutdown)) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); sd_start_stop_device(sdkp, 0); } diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 12040ce116a5..acc812e490d0 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -334,12 +334,14 @@ if RISCV config ARCH_R9A07G043 bool "RISC-V Platform support for RZ/Five" depends on NONPORTABLE + depends on RISCV_ALTERNATIVE + depends on !RISCV_ISA_ZICBOM + depends on RISCV_SBI select ARCH_RZG2L - select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT + select AX45MP_L2_CACHE select DMA_GLOBAL_POOL - select ERRATA_ANDES if RISCV_SBI - select ERRATA_ANDES_CMO if ERRATA_ANDES - + select ERRATA_ANDES + select ERRATA_ANDES_CMO help This enables support for the Renesas RZ/Five SoC. diff --git a/drivers/vdpa/mlx5/net/debug.c b/drivers/vdpa/mlx5/net/debug.c index 60d6ac68cdc4..9c85162c19fc 100644 --- a/drivers/vdpa/mlx5/net/debug.c +++ b/drivers/vdpa/mlx5/net/debug.c @@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev) ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs); } -void mlx5_vdpa_remove_debugfs(struct dentry *dbg) +void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev) { - debugfs_remove_recursive(dbg); + debugfs_remove_recursive(ndev->debugfs); + ndev->debugfs = NULL; } diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 40a03b08d7cf..946488b8989f 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) mlx5_db_free(ndev->mvdev.mdev, &vcq->db); } +static int read_umem_params(struct mlx5_vdpa_net *ndev) +{ + u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; + u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01); + struct mlx5_core_dev *mdev = ndev->mvdev.mdev; + int out_size; + void *caps; + void *out; + int err; + + out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); + out = kzalloc(out_size, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, opmod); + err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, + "Failed reading vdpa umem capabilities with err %d\n", err); + goto out; + } + + caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); + + ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); + ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); + + ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); + ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); + + ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); + ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); + +out: + kfree(out); + return 0; +} + static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, struct mlx5_vdpa_umem **umemp) { - struct mlx5_core_dev *mdev = ndev->mvdev.mdev; - int p_a; - int p_b; + u32 p_a; + u32 p_b; switch (num) { case 1: - p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a); - p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b); + p_a = ndev->umem_1_buffer_param_a; + p_b = ndev->umem_1_buffer_param_b; *umemp = &mvq->umem1; break; case 2: - p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a); - p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b); + p_a = ndev->umem_2_buffer_param_a; + p_b = ndev->umem_2_buffer_param_b; *umemp = &mvq->umem2; break; case 3: - p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a); - p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b); + p_a = ndev->umem_3_buffer_param_a; + p_b = ndev->umem_3_buffer_param_b; *umemp = &mvq->umem3; break; } + (*umemp)->size = p_a * mvq->num_ent + p_b; } @@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev) goto out; } mlx5_vdpa_add_debugfs(ndev); + + err = read_umem_params(ndev); + if (err) + goto err_setup; + err = setup_virtqueues(mvdev); if (err) { mlx5_vdpa_warn(mvdev, "setup_virtqueues\n"); @@ -2713,7 +2758,7 @@ err_tir: err_rqt: teardown_virtqueues(ndev); err_setup: - mlx5_vdpa_remove_debugfs(ndev->debugfs); + mlx5_vdpa_remove_debugfs(ndev); out: return err; } @@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev) if (!ndev->setup) return; - mlx5_vdpa_remove_debugfs(ndev->debugfs); - ndev->debugfs = NULL; + mlx5_vdpa_remove_debugfs(ndev); teardown_steering(ndev); destroy_tir(ndev); destroy_rqt(ndev); @@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device * struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct workqueue_struct *wq; - mlx5_vdpa_remove_debugfs(ndev->debugfs); - ndev->debugfs = NULL; unregister_link_notifier(ndev); _vdpa_unregister_device(dev); wq = mvdev->wq; diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h index 36c44d9fdd16..90b556a57971 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.h +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h @@ -65,6 +65,15 @@ struct mlx5_vdpa_net { struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE]; struct mlx5_vdpa_irq_pool irqp; struct dentry *debugfs; + + u32 umem_1_buffer_param_a; + u32 umem_1_buffer_param_b; + + u32 umem_2_buffer_param_a; + u32 umem_2_buffer_param_b; + + u32 umem_3_buffer_param_a; + u32 umem_3_buffer_param_b; }; struct mlx5_vdpa_counter { @@ -88,7 +97,7 @@ struct macvlan_node { }; void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev); -void mlx5_vdpa_remove_debugfs(struct dentry *dbg); +void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev); void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev); void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev); void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev); diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index 00d7d72713be..b3a3cb165795 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void) GFP_KERNEL); if (!shared_buffer) { ret = -ENOMEM; - goto parent_err; + goto mgmt_dev_err; } } return 0; - +mgmt_dev_err: + vdpa_mgmtdev_unregister(&mgmt_dev); parent_err: device_unregister(&vdpasim_blk_mgmtdev); return ret; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c71d573f1c94..e0c181ad17e3 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, goto done; } - if ((msg.type == VHOST_IOTLB_UPDATE || - msg.type == VHOST_IOTLB_INVALIDATE) && - msg.size == 0) { + if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) { ret = -EINVAL; goto done; } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 5b15936a5214..2d5d252ef419 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb) virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages, &num_pages); - target = num_pages; + /* + * Aligned up to guest page size to avoid inflating and deflating + * balloon endlessly. + */ + target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE); return target - vb->num_pages; } diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 97760f611295..59892a31cf76 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev) spin_lock_init(&vm_dev->lock); vm_dev->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(vm_dev->base)) - return PTR_ERR(vm_dev->base); + if (IS_ERR(vm_dev->base)) { + rc = PTR_ERR(vm_dev->base); + goto free_vm_dev; + } /* Check magic value */ magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); - return -ENODEV; + rc = -ENODEV; + goto free_vm_dev; } /* Check device version */ @@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) if (vm_dev->version < 1 || vm_dev->version > 2) { dev_err(&pdev->dev, "Version %ld not supported!\n", vm_dev->version); - return -ENXIO; + rc = -ENXIO; + goto free_vm_dev; } vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); @@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) * virtio-mmio device with an ID 0 is a (dummy) placeholder * with no function. End probing now with no error reported. */ - return -ENODEV; + rc = -ENODEV; + goto free_vm_dev; } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); @@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev) put_device(&vm_dev->vdev.dev); return rc; + +free_vm_dev: + kfree(vm_dev); + return rc; } static int virtio_mmio_remove(struct platform_device *pdev) diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index aad7d9296e77..9cb601e16688 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev) err = -EINVAL; mdev->common = vp_modern_map_capability(mdev, common, sizeof(struct virtio_pci_common_cfg), 4, - 0, sizeof(struct virtio_pci_common_cfg), + 0, sizeof(struct virtio_pci_modern_common_cfg), NULL, NULL); if (!mdev->common) goto err_map_common; |