diff options
Diffstat (limited to 'drivers/gpu')
26 files changed, 189 insertions, 63 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index cdf0818088b3..7606e3b6361e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1342,9 +1342,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); +bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 0e12315fa0cb..98ac53ee6bb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1046,6 +1046,20 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) } /** + * amdgpu_acpi_should_gpu_reset + * + * @adev: amdgpu_device_pointer + * + * returns true if should reset GPU, false if not + */ +bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + return pm_suspend_target_state != PM_SUSPEND_TO_IDLE; +} + +/** * amdgpu_acpi_is_s0ix_active * * @adev: amdgpu_device_pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 8f0e6d93bb9c..c317078d1afd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -296,6 +296,7 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx, { struct amdgpu_device *adev = ctx->adev; enum amd_dpm_forced_level level; + u32 current_stable_pstate; int r; mutex_lock(&adev->pm.stable_pstate_ctx_lock); @@ -304,6 +305,10 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx, goto done; } + r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate); + if (r || (stable_pstate == current_stable_pstate)) + goto done; + switch (stable_pstate) { case AMDGPU_CTX_STABLE_PSTATE_NONE: level = AMD_DPM_FORCED_LEVEL_AUTO; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7fd0277b2805..46ef57b07c15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2336,7 +2336,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - if (!adev->in_s0ix) + if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 039b90cdc3bc..45f0188c4273 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -81,6 +81,10 @@ #include "mxgpu_vi.h" #include "amdgpu_dm.h" +#if IS_ENABLED(CONFIG_X86) +#include <asm/intel-family.h> +#endif + #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L @@ -1134,13 +1138,24 @@ static void vi_enable_aspm(struct amdgpu_device *adev) WREG32_PCIE(ixPCIE_LC_CNTL, data); } +static bool aspm_support_quirk_check(void) +{ +#if IS_ENABLED(CONFIG_X86) + struct cpuinfo_x86 *c = &cpu_data(0); + + return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); +#else + return true; +#endif +} + static void vi_program_aspm(struct amdgpu_device *adev) { u32 data, data1, orig; bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev)) + if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check()) return; if (adev->flags & AMD_IS_APU || diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index d7559e5a99ce..e708f07fe75a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss.init_hw = dcn20_fpga_init_hw; dc->hwseq->funcs.init_pipes = NULL; } - if (dc->debug.disable_z10) { - /*hw not support z10 or sw disable it*/ - dc->hwss.z10_restore = NULL; - dc->hwss.z10_save_init = NULL; - } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index f1544755d8b4..f10a0256413e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1351,14 +1351,8 @@ static int smu_disable_dpms(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; - /* - * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair - * the workaround which always reset the asic in suspend. - * It's likely that workaround will be dropped in the future. - * Then the change here should be dropped together. - */ bool use_baco = !smu->is_apu && - (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) && + ((amdgpu_in_reset(adev) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); diff --git a/drivers/gpu/drm/dp/drm_dp_mst_topology.c b/drivers/gpu/drm/dp/drm_dp_mst_topology.c index 11300b53d24f..7a7cc44686f9 100644 --- a/drivers/gpu/drm/dp/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/dp/drm_dp_mst_topology.c @@ -4852,6 +4852,7 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); drm_edid_get_monitor_name(mst_edid, name, namelen); + kfree(mst_edid); } /** diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 7616a3906b9e..1b774dcfb281 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -367,6 +367,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, } } +static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, + const u32 *mmioaddr, u32 mmio_count, + int header_ver, u8 dmc_id) +{ + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + u32 start_range, end_range; + int i; + + if (dmc_id >= DMC_FW_MAX) { + drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); + return false; + } + + if (header_ver == 1) { + start_range = DMC_MMIO_START_RANGE; + end_range = DMC_MMIO_END_RANGE; + } else if (dmc_id == DMC_FW_MAIN) { + start_range = TGL_MAIN_MMIO_START; + end_range = TGL_MAIN_MMIO_END; + } else if (DISPLAY_VER(i915) >= 13) { + start_range = ADLP_PIPE_MMIO_START; + end_range = ADLP_PIPE_MMIO_END; + } else if (DISPLAY_VER(i915) >= 12) { + start_range = TGL_PIPE_MMIO_START(dmc_id); + end_range = TGL_PIPE_MMIO_END(dmc_id); + } else { + drm_warn(&i915->drm, "Unknown mmio range for sanity check"); + return false; + } + + for (i = 0; i < mmio_count; i++) { + if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) + return false; + } + + return true; +} + static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, u8 dmc_id) @@ -436,6 +474,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, return 0; } + if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, + dmc_header->header_ver, dmc_id)) { + drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); + return 0; + } + for (i = 0; i < mmio_count; i++) { dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); dmc_info->mmiodata[i] = mmiodata[i]; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index d42f437149c9..6ca8929cf6e1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1252,14 +1252,12 @@ static void *reloc_iomap(struct i915_vma *batch, * Only attempt to pin the batch buffer to ggtt if the current batch * is not inside ggtt, or the batch buffer is not misplaced. */ - if (!i915_is_ggtt(batch->vm)) { + if (!i915_is_ggtt(batch->vm) || + !i915_vma_misplaced(batch, 0, 0, PIN_MAPPABLE)) { vma = i915_gem_object_ggtt_pin_ww(obj, &eb->ww, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK /* NOWARN */ | PIN_NOEVICT); - } else if (i915_vma_is_map_and_fenceable(batch)) { - __i915_vma_pin(batch); - vma = batch; } if (vma == ERR_PTR(-EDEADLK)) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 82713264b96c..b7c6d4462ec5 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -806,7 +806,7 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) __intel_engine_reset(engine, stalled_mask & engine->mask); local_bh_enable(); - intel_uc_reset(>->uc, true); + intel_uc_reset(>->uc, ALL_ENGINES); intel_ggtt_restore_fences(gt->ggtt); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index bf7079480d47..2488d1197f3e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -438,7 +438,7 @@ int intel_guc_global_policies_update(struct intel_guc *guc); void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); void intel_guc_submission_reset_prepare(struct intel_guc *guc); -void intel_guc_submission_reset(struct intel_guc *guc, bool stalled); +void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled); void intel_guc_submission_reset_finish(struct intel_guc *guc); void intel_guc_submission_cancel_requests(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 1ce7e04aa837..28f9aac0201d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1590,9 +1590,9 @@ __unwind_incomplete_requests(struct intel_context *ce) spin_unlock_irqrestore(&sched_engine->lock, flags); } -static void __guc_reset_context(struct intel_context *ce, bool stalled) +static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled) { - bool local_stalled; + bool guilty; struct i915_request *rq; unsigned long flags; u32 head; @@ -1620,7 +1620,7 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) if (!intel_context_is_pinned(ce)) goto next_context; - local_stalled = false; + guilty = false; rq = intel_context_find_active_request(ce); if (!rq) { head = ce->ring->tail; @@ -1628,14 +1628,14 @@ static void __guc_reset_context(struct intel_context *ce, bool stalled) } if (i915_request_started(rq)) - local_stalled = true; + guilty = stalled & ce->engine->mask; GEM_BUG_ON(i915_active_is_idle(&ce->active)); head = intel_ring_wrap(ce->ring, rq->head); - __i915_request_reset(rq, local_stalled && stalled); + __i915_request_reset(rq, guilty); out_replay: - guc_reset_state(ce, head, local_stalled && stalled); + guc_reset_state(ce, head, guilty); next_context: if (i != number_children) ce = list_next_entry(ce, parallel.child_link); @@ -1645,7 +1645,7 @@ next_context: intel_context_put(parent); } -void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) +void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled) { struct intel_context *ce; unsigned long index; @@ -4013,7 +4013,7 @@ static void guc_context_replay(struct intel_context *ce) { struct i915_sched_engine *sched_engine = ce->engine->sched_engine; - __guc_reset_context(ce, true); + __guc_reset_context(ce, ce->engine->mask); tasklet_hi_schedule(&sched_engine->tasklet); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index da199aa6989f..8eb34de2f20c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -593,7 +593,7 @@ sanitize: __uc_sanitize(uc); } -void intel_uc_reset(struct intel_uc *uc, bool stalled) +void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled) { struct intel_guc *guc = &uc->guc; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 866b462821c0..a8f38c2c60e2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -42,7 +42,7 @@ void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_driver_remove(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc); -void intel_uc_reset(struct intel_uc *uc, bool stalled); +void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled); void intel_uc_reset_finish(struct intel_uc *uc); void intel_uc_cancel_requests(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a9354f8f110d..fe960c204362 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5501,6 +5501,22 @@ /* MMIO address range for DMC program (0x80000 - 0x82FFF) */ #define DMC_MMIO_START_RANGE 0x80000 #define DMC_MMIO_END_RANGE 0x8FFFF +#define DMC_V1_MMIO_START_RANGE 0x80000 +#define TGL_MAIN_MMIO_START 0x8F000 +#define TGL_MAIN_MMIO_END 0x8FFFF +#define _TGL_PIPEA_MMIO_START 0x92000 +#define _TGL_PIPEA_MMIO_END 0x93FFF +#define _TGL_PIPEB_MMIO_START 0x96000 +#define _TGL_PIPEB_MMIO_END 0x97FFF +#define ADLP_PIPE_MMIO_START 0x5F000 +#define ADLP_PIPE_MMIO_END 0x5FFFF + +#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\ + _TGL_PIPEB_MMIO_START) + +#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\ + _TGL_PIPEB_MMIO_END) + #define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030) #define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 94fcdb7bd21d..eeaa8d0d0407 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1605,17 +1605,17 @@ void i915_vma_close(struct i915_vma *vma) static void __i915_vma_remove_closed(struct i915_vma *vma) { - struct intel_gt *gt = vma->vm->gt; - - spin_lock_irq(>->closed_lock); list_del_init(&vma->closed_link); - spin_unlock_irq(>->closed_lock); } void i915_vma_reopen(struct i915_vma *vma) { + struct intel_gt *gt = vma->vm->gt; + + spin_lock_irq(>->closed_lock); if (i915_vma_is_closed(vma)) __i915_vma_remove_closed(vma); + spin_unlock_irq(>->closed_lock); } void i915_vma_release(struct kref *ref) @@ -1641,6 +1641,7 @@ static void force_unbind(struct i915_vma *vma) static void release_references(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; + struct intel_gt *gt = vma->vm->gt; GEM_BUG_ON(i915_vma_is_active(vma)); @@ -1650,7 +1651,9 @@ static void release_references(struct i915_vma *vma) rb_erase(&vma->obj_node, &obj->vma.tree); spin_unlock(&obj->vma.lock); + spin_lock_irq(>->closed_lock); __i915_vma_remove_closed(vma); + spin_unlock_irq(>->closed_lock); __i915_vma_put(vma); } diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index daf9f87477ba..a2141d3d9b1d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -46,8 +46,9 @@ static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], struct nouveau_backlight *bl) { - const int nb = ida_simple_get(&bl_ida, 0, 0, GFP_KERNEL); - if (nb < 0 || nb >= 100) + const int nb = ida_alloc_max(&bl_ida, 99, GFP_KERNEL); + + if (nb < 0) return false; if (nb > 0) snprintf(backlight_name, BL_NAME_SIZE, "nv_backlight%d", nb); @@ -414,7 +415,7 @@ nouveau_backlight_init(struct drm_connector *connector) nv_encoder, ops, &props); if (IS_ERR(bl->dev)) { if (bl->id >= 0) - ida_simple_remove(&bl_ida, bl->id); + ida_free(&bl_ida, bl->id); ret = PTR_ERR(bl->dev); goto fail_alloc; } @@ -442,7 +443,7 @@ nouveau_backlight_fini(struct drm_connector *connector) return; if (bl->id >= 0) - ida_simple_remove(&bl_ida, bl->id); + ida_free(&bl_ida, bl->id); backlight_device_unregister(bl->dev); nv_conn->backlight = NULL; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 992cc285f2fe..2ed528c065fa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -123,7 +123,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); - if (iommu_present(&platform_bus_type)) { + if (device_iommu_mapped(dev)) { tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); if (!tdev->iommu.domain) goto error; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 6c58b0fd13fb..98b78ec6b37d 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -38,6 +38,7 @@ #include <drm/drm_scdc_helper.h> #include <linux/clk.h> #include <linux/component.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/of_address.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index a3bfbb6c3e14..162dfeb1cc5a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -528,7 +528,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) *seqno = atomic_add_return(1, &dev_priv->marker_seq); } while (*seqno == 0); - if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) { + if (!vmw_has_fences(dev_priv)) { /* * Don't request hardware to send a fence. The @@ -675,11 +675,14 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, */ bool vmw_cmd_supported(struct vmw_private *vmw) { - if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | - SVGA_CAP_CMD_BUFFERS_2)) != 0) - return true; + bool has_cmdbufs = + (vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | + SVGA_CAP_CMD_BUFFERS_2)) != 0; + if (vmw_is_svga_v3(vmw)) + return (has_cmdbufs && + (vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0); /* * We have FIFO cmd's */ - return vmw->fifo_mem != NULL; + return has_cmdbufs || vmw->fifo_mem != NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index ea3ecdda561d..6de0b9ef5c77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1679,4 +1679,12 @@ static inline void vmw_irq_status_write(struct vmw_private *vmw, outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); } +static inline bool vmw_has_fences(struct vmw_private *vmw) +{ + if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | + SVGA_CAP_CMD_BUFFERS_2)) != 0) + return true; + return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 8ee34576c7d0..adf17c740656 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -483,7 +483,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, static int vmw_fb_kms_framebuffer(struct fb_info *info) { - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {0}; struct vmw_fb_par *par = info->par; struct fb_var_screeninfo *var = &info->var; struct drm_framebuffer *cur_fb; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 59d6a2dd4c2e..66cc35dc223e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -82,6 +82,22 @@ fman_from_fence(struct vmw_fence_obj *fence) return container_of(fence->base.lock, struct vmw_fence_manager, lock); } +static u32 vmw_fence_goal_read(struct vmw_private *vmw) +{ + if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) + return vmw_read(vmw, SVGA_REG_FENCE_GOAL); + else + return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL); +} + +static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value) +{ + if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) + vmw_write(vmw, SVGA_REG_FENCE_GOAL, value); + else + vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value); +} + /* * Note on fencing subsystem usage of irqs: * Typically the vmw_fences_update function is called @@ -392,7 +408,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, if (likely(!fman->seqno_valid)) return false; - goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); + goal_seqno = vmw_fence_goal_read(fman->dev_priv); if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) return false; @@ -400,9 +416,8 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, list_for_each_entry(fence, &fman->fence_list, head) { if (!list_empty(&fence->seq_passed_actions)) { fman->seqno_valid = true; - vmw_fifo_mem_write(fman->dev_priv, - SVGA_FIFO_FENCE_GOAL, - fence->base.seqno); + vmw_fence_goal_write(fman->dev_priv, + fence->base.seqno); break; } } @@ -434,13 +449,12 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) if (dma_fence_is_signaled_locked(&fence->base)) return false; - goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); + goal_seqno = vmw_fence_goal_read(fman->dev_priv); if (likely(fman->seqno_valid && goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) return false; - vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL, - fence->base.seqno); + vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); fman->seqno_valid = true; return true; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index c5191de365ca..fe4732bf2c9d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -32,6 +32,14 @@ #define VMW_FENCE_WRAP (1 << 24) +static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw) +{ + if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) + return SVGA_IRQFLAG_REG_FENCE_GOAL; + else + return SVGA_IRQFLAG_FENCE_GOAL; +} + /** * vmw_thread_fn - Deferred (process context) irq handler * @@ -96,7 +104,7 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg) wake_up_all(&dev_priv->fifo_queue); if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | - SVGA_IRQFLAG_FENCE_GOAL)) && + vmw_irqflag_fence_goal(dev_priv))) && !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) ret = IRQ_WAKE_THREAD; @@ -137,8 +145,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) return true; - if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) && - vmw_fifo_idle(dev_priv, seqno)) + if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno)) return true; /** @@ -160,6 +167,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, unsigned long timeout) { struct vmw_fifo_state *fifo_state = dev_priv->fifo; + bool fifo_down = false; uint32_t count = 0; uint32_t signal_seq; @@ -176,12 +184,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, */ if (fifo_idle) { - down_read(&fifo_state->rwsem); if (dev_priv->cman) { ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 10*HZ); if (ret) goto out_err; + } else if (fifo_state) { + down_read(&fifo_state->rwsem); + fifo_down = true; } } @@ -218,12 +228,12 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, } } finish_wait(&dev_priv->fence_queue, &__wait); - if (ret == 0 && fifo_idle) + if (ret == 0 && fifo_idle && fifo_state) vmw_fence_write(dev_priv, signal_seq); wake_up_all(&dev_priv->fence_queue); out_err: - if (fifo_idle) + if (fifo_down) up_read(&fifo_state->rwsem); return ret; @@ -266,13 +276,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) void vmw_goal_waiter_add(struct vmw_private *dev_priv) { - vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, + vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv), &dev_priv->goal_queue_waiters); } void vmw_goal_waiter_remove(struct vmw_private *dev_priv) { - vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, + vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv), &dev_priv->goal_queue_waiters); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbd2f4ec08ec..93431e8f6606 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1344,7 +1344,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, mode_cmd, is_bo_proxy); - /* * vmw_create_bo_proxy() adds a reference that is no longer * needed @@ -1385,13 +1384,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, ret = vmw_user_lookup_handle(dev_priv, file_priv, mode_cmd->handles[0], &surface, &bo); - if (ret) + if (ret) { + DRM_ERROR("Invalid buffer object handle %u (0x%x).\n", + mode_cmd->handles[0], mode_cmd->handles[0]); goto err_out; + } if (!bo && !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { - DRM_ERROR("Surface size cannot exceed %dx%d", + DRM_ERROR("Surface size cannot exceed %dx%d\n", dev_priv->texture_max_width, dev_priv->texture_max_height); goto err_out; |