diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
76 files changed, 1713 insertions, 1298 deletions
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index d4f4452ce5ed..4270b5a34a83 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -85,14 +85,14 @@ static int gen6_drpc(struct seq_file *m) gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS); rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { gen9_powergate_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE); gen9_powergate_status = intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS); } - if (INTEL_GEN(i915) <= 7) + if (GRAPHICS_VER(i915) <= 7) sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); @@ -100,7 +100,7 @@ static int gen6_drpc(struct seq_file *m) yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); seq_printf(m, "RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { seq_printf(m, "Render Well Gating Enabled: %s\n", yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); seq_printf(m, "Media Well Gating Enabled: %s\n", @@ -134,7 +134,7 @@ static int gen6_drpc(struct seq_file *m) seq_printf(m, "Core Power Down: %s\n", yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { seq_printf(m, "Render Power Well: %s\n", (gen9_powergate_status & GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); @@ -150,7 +150,7 @@ static int gen6_drpc(struct seq_file *m) print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); - if (INTEL_GEN(i915) <= 7) { + if (GRAPHICS_VER(i915) <= 7) { seq_printf(m, "RC6 voltage: %dmV\n", GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); seq_printf(m, "RC6+ voltage: %dmV\n", @@ -230,7 +230,7 @@ static int drpc_show(struct seq_file *m, void *unused) with_intel_runtime_pm(gt->uncore->rpm, wakeref) { if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) err = vlv_drpc(m); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) err = gen6_drpc(m); else err = ilk_drpc(m); @@ -250,7 +250,7 @@ static int frequency_show(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(uncore->rpm); - if (IS_GEN(i915, 5)) { + if (GRAPHICS_VER(i915) == 5) { u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); @@ -296,7 +296,7 @@ static int frequency_show(struct seq_file *m, void *unused) seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(rps, rps->efficient_freq)); - } else if (INTEL_GEN(i915) >= 6) { + } else if (GRAPHICS_VER(i915) >= 6) { u32 rp_state_limits; u32 gt_perf_status; u32 rp_state_cap; @@ -321,7 +321,7 @@ static int frequency_show(struct seq_file *m, void *unused) intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { reqf >>= 23; } else { reqf &= ~GEN6_TURBO_DISABLE; @@ -354,7 +354,7 @@ static int frequency_show(struct seq_file *m, void *unused) intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); /* @@ -363,7 +363,7 @@ static int frequency_show(struct seq_file *m, void *unused) */ pm_isr = 0; pm_iir = 0; - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); @@ -386,14 +386,14 @@ static int frequency_show(struct seq_file *m, void *unused) seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_mask); - if (INTEL_GEN(i915) <= 10) + if (GRAPHICS_VER(i915) <= 10) seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", pm_isr, pm_iir); seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", rps->pm_intrmsk_mbz); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", - (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); + (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", @@ -437,20 +437,20 @@ static int frequency_show(struct seq_file *m, void *unused) max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; max_freq *= (IS_GEN9_BC(i915) || - INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; max_freq *= (IS_GEN9_BC(i915) || - INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; max_freq *= (IS_GEN9_BC(i915) || - INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1); + GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", @@ -488,7 +488,7 @@ static int llc_show(struct seq_file *m, void *data) { struct intel_gt *gt = m->private; struct drm_i915_private *i915 = gt->i915; - const bool edram = INTEL_GEN(i915) > 8; + const bool edram = GRAPHICS_VER(i915) > 8; struct intel_rps *rps = >->rps; unsigned int max_gpu_freq, min_gpu_freq; intel_wakeref_t wakeref; @@ -500,7 +500,7 @@ static int llc_show(struct seq_file *m, void *data) min_gpu_freq = rps->min_freq; max_gpu_freq = rps->max_freq; - if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq /= GEN9_FREQ_SCALER; max_gpu_freq /= GEN9_FREQ_SCALER; @@ -518,7 +518,7 @@ static int llc_show(struct seq_file *m, void *data) intel_gpu_freq(rps, (gpu_freq * (IS_GEN9_BC(i915) || - INTEL_GEN(i915) >= 10 ? + GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1))), ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); @@ -580,7 +580,7 @@ static int rps_boost_show(struct seq_file *m, void *data) seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); - if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) { + if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) { struct intel_uncore *uncore = gt->uncore; u32 rpup, rpupei; u32 rpdown, rpdownei; diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 9646200d2792..61383830505e 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -74,7 +74,7 @@ int gen4_emit_flush_rcs(struct i915_request *rq, u32 mode) cmd = MI_FLUSH; if (mode & EMIT_INVALIDATE) { cmd |= MI_EXE_FLUSH; - if (IS_G4X(rq->engine->i915) || IS_GEN(rq->engine->i915, 5)) + if (IS_G4X(rq->engine->i915) || GRAPHICS_VER(rq->engine->i915) == 5) cmd |= MI_INVALIDATE_ISP; } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index e08dff376339..1aee5e6b1b23 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -96,9 +96,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, * entries back to scratch. */ - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); memset32(vaddr + pte, scratch_pte, count); - kunmap_atomic(vaddr); pte = 0; } @@ -120,7 +119,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, GEM_BUG_ON(!pd->entry[act_pt]); - vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); + vaddr = px_vaddr(i915_pt_entry(pd, act_pt)); do { GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); @@ -136,12 +135,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, } if (++act_pte == GEN6_PTES) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt)); + vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt)); act_pte = 0; } } while (1); - kunmap_atomic(vaddr); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } @@ -235,7 +232,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) goto err_scratch0; } - ret = pin_pt_dma(vm, vm->scratch[1]); + ret = map_pt_dma(vm, vm->scratch[1]); if (ret) goto err_scratch1; @@ -346,7 +343,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - i915_active_init(&vma->active, NULL, NULL); + i915_active_init(&vma->active, NULL, NULL, 0); kref_init(&vma->ref); mutex_init(&vma->pages_mutex); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index de575fdb033f..21f08e53889c 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma, gen7_emit_pipeline_invalidate(&cmds); batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); - batch_add(&cmds, 0xffff0000); + batch_add(&cmds, 0xffff0000 | + ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ? + HIZ_RAW_STALL_OPT_DISABLE : + 0)); batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); gen7_emit_pipeline_invalidate(&cmds); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 732c2ed1d933..94e0a5669f90 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -38,7 +38,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN(rq->engine->i915, 9)) + if (GRAPHICS_VER(rq->engine->i915) == 9) vf_flush_wa = true; /* WaForGAMHang:kbl */ diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 74bf6fc8461f..da4f5eb43ac2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -242,11 +242,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, atomic_read(&pt->used)); GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); memset64(vaddr + gen8_pd_index(start, 0), vm->scratch[0]->encode, count); - kunmap_atomic(vaddr); atomic_sub(count, &pt->used); start += count; @@ -304,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, __i915_gem_object_pin_pages(pt->base); i915_gem_object_make_unshrinkable(pt->base); - if (lvl || - gen8_pt_count(*start, end) < I915_PDES || - intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch[lvl]->encode); + fill_px(pt, vm->scratch[lvl]->encode); spin_lock(&pd->lock); if (likely(!pd->entry[idx])) { @@ -375,7 +371,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, gen8_pte_t *vaddr; pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; @@ -402,12 +398,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, } clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); + vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); return idx; } @@ -442,7 +436,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, encode |= GEN8_PDE_PS_2M; page_size = I915_GTT_PAGE_SIZE_2M; - vaddr = kmap_atomic_px(pd); + vaddr = px_vaddr(pd); } else { struct i915_page_table *pt = i915_pt_entry(pd, __gen8_pte_index(start, 1)); @@ -457,7 +451,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) maybe_64K = __gen8_pte_index(start, 1); - vaddr = kmap_atomic_px(pt); + vaddr = px_vaddr(pt); } do { @@ -491,7 +485,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, } while (rem >= page_size && index < I915_PDES); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap_atomic(vaddr); /* * Is it safe to mark the 2M block as 64K? -- Either we have @@ -505,9 +498,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, !iter->sg && IS_ALIGNED(vma->node.start + vma->node.size, I915_GTT_PAGE_SIZE_2M)))) { - vaddr = kmap_atomic_px(pd); + vaddr = px_vaddr(pd); vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; - kunmap_atomic(vaddr); page_size = I915_GTT_PAGE_SIZE_64K; /* @@ -523,12 +515,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, u16 i; encode = vma->vm->scratch[0]->encode; - vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); + vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K)); for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); - kunmap_atomic(vaddr); } } @@ -602,7 +593,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) if (IS_ERR(obj)) goto free_scratch; - ret = pin_pt_dma(vm, obj); + ret = map_pt_dma(vm, obj); if (ret) { i915_gem_object_put(obj); goto free_scratch; @@ -639,7 +630,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pde)) return PTR_ERR(pde); - err = pin_pt_dma(vm, pde->pt.base); + err = map_pt_dma(vm, pde->pt.base); if (err) { free_pd(vm, pde); return err; @@ -674,7 +665,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) goto err_pd; } - err = pin_pt_dma(vm, pd->pt.base); + err = map_pt_dma(vm, pd->pt.base); if (err) goto err_pd; @@ -715,9 +706,12 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) * * Gen12 has inherited the same read-only fault issue from gen11. */ - ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12); + ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12); - ppgtt->vm.alloc_pt_dma = alloc_pt_dma; + if (HAS_LMEM(gt->i915)) + ppgtt->vm.alloc_pt_dma = alloc_pt_lmem; + else + ppgtt->vm.alloc_pt_dma = alloc_pt_dma; err = gen8_init_scratch(&ppgtt->vm); if (err) diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h index 76a08b9c1f5c..b9028c2ad3c7 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h @@ -6,8 +6,15 @@ #ifndef __GEN8_PPGTT_H__ #define __GEN8_PPGTT_H__ +#include <linux/kernel.h> + +struct i915_address_space; struct intel_gt; +enum i915_cache_level; struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt); +u64 gen8_ggtt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags); #endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 17cf2640b082..4033184f13b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -326,7 +326,6 @@ void intel_context_unpin(struct intel_context *ce) intel_context_put(ce); } -__i915_active_call static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); @@ -385,7 +384,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) mutex_init(&ce->pin_mutex); i915_active_init(&ce->active, - __intel_context_active, __intel_context_retire); + __intel_context_active, __intel_context_retire, 0); } void intel_context_fini(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c index 8dfd8f656aaa..e86d8255feec 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -76,7 +76,7 @@ intel_context_reconfigure_sseu(struct intel_context *ce, { int ret; - GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); + GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8); ret = intel_context_lock_pinned(ce); if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 47ee8578e511..8d9184920c51 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -13,8 +13,9 @@ #include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" -#include "gt/intel_timeline.h" #include "intel_engine_types.h" +#include "intel_gt_types.h" +#include "intel_timeline.h" #include "intel_workarounds.h" struct drm_printer; @@ -262,6 +263,11 @@ void intel_engine_init_active(struct intel_engine_cs *engine, #define ENGINE_MOCK 1 #define ENGINE_VIRTUAL 2 +static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine) +{ + return engine->gt->submission_method >= INTEL_SUBMISSION_GUC; +} + static inline bool intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index efe935f80c1a..7f03df236613 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -45,9 +45,9 @@ struct engine_info { unsigned int hw_id; u8 class; u8 instance; - /* mmio bases table *must* be sorted in reverse gen order */ + /* mmio bases table *must* be sorted in reverse graphics_ver order */ struct engine_mmio_base { - u32 gen : 8; + u32 graphics_ver : 8; u32 base : 24; } mmio_bases[MAX_MMIO_BASES]; }; @@ -58,7 +58,7 @@ static const struct engine_info intel_engines[] = { .class = RENDER_CLASS, .instance = 0, .mmio_bases = { - { .gen = 1, .base = RENDER_RING_BASE } + { .graphics_ver = 1, .base = RENDER_RING_BASE } }, }, [BCS0] = { @@ -66,7 +66,7 @@ static const struct engine_info intel_engines[] = { .class = COPY_ENGINE_CLASS, .instance = 0, .mmio_bases = { - { .gen = 6, .base = BLT_RING_BASE } + { .graphics_ver = 6, .base = BLT_RING_BASE } }, }, [VCS0] = { @@ -74,9 +74,9 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_DECODE_CLASS, .instance = 0, .mmio_bases = { - { .gen = 11, .base = GEN11_BSD_RING_BASE }, - { .gen = 6, .base = GEN6_BSD_RING_BASE }, - { .gen = 4, .base = BSD_RING_BASE } + { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE }, + { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE }, + { .graphics_ver = 4, .base = BSD_RING_BASE } }, }, [VCS1] = { @@ -84,8 +84,8 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_DECODE_CLASS, .instance = 1, .mmio_bases = { - { .gen = 11, .base = GEN11_BSD2_RING_BASE }, - { .gen = 8, .base = GEN8_BSD2_RING_BASE } + { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE }, + { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE } }, }, [VCS2] = { @@ -93,7 +93,7 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_DECODE_CLASS, .instance = 2, .mmio_bases = { - { .gen = 11, .base = GEN11_BSD3_RING_BASE } + { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE } }, }, [VCS3] = { @@ -101,7 +101,7 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_DECODE_CLASS, .instance = 3, .mmio_bases = { - { .gen = 11, .base = GEN11_BSD4_RING_BASE } + { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE } }, }, [VECS0] = { @@ -109,8 +109,8 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_ENHANCEMENT_CLASS, .instance = 0, .mmio_bases = { - { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, - { .gen = 7, .base = VEBOX_RING_BASE } + { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE }, + { .graphics_ver = 7, .base = VEBOX_RING_BASE } }, }, [VECS1] = { @@ -118,7 +118,7 @@ static const struct engine_info intel_engines[] = { .class = VIDEO_ENHANCEMENT_CLASS, .instance = 1, .mmio_bases = { - { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } + { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE } }, }, }; @@ -146,9 +146,9 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) switch (class) { case RENDER_CLASS: - switch (INTEL_GEN(gt->i915)) { + switch (GRAPHICS_VER(gt->i915)) { default: - MISSING_CASE(INTEL_GEN(gt->i915)); + MISSING_CASE(GRAPHICS_VER(gt->i915)); return DEFAULT_LR_CONTEXT_RENDER_SIZE; case 12: case 11: @@ -184,8 +184,8 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) */ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; drm_dbg(>->i915->drm, - "gen%d CXT_SIZE = %d bytes [0x%08x]\n", - INTEL_GEN(gt->i915), cxt_size * 64, + "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", + GRAPHICS_VER(gt->i915), cxt_size * 64, cxt_size - 1); return round_up(cxt_size * 64, PAGE_SIZE); case 3: @@ -201,7 +201,7 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: - if (INTEL_GEN(gt->i915) < 8) + if (GRAPHICS_VER(gt->i915) < 8) return 0; return GEN8_LR_CONTEXT_OTHER_SIZE; } @@ -213,7 +213,7 @@ static u32 __engine_mmio_base(struct drm_i915_private *i915, int i; for (i = 0; i < MAX_MMIO_BASES; i++) - if (INTEL_GEN(i915) >= bases[i].gen) + if (GRAPHICS_VER(i915) >= bases[i].graphics_ver) break; GEM_BUG_ON(i == MAX_MMIO_BASES); @@ -240,10 +240,10 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) * Though they added more rings on g4x/ilk, they did not add * per-engine HWSTAM until gen6. */ - if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS) + if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS) return; - if (INTEL_GEN(engine->i915) >= 3) + if (GRAPHICS_VER(engine->i915) >= 3) ENGINE_WRITE(engine, RING_HWSTAM, mask); else ENGINE_WRITE16(engine, RING_HWSTAM, mask); @@ -255,11 +255,17 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) intel_engine_set_hwsp_writemask(engine, ~0u); } +static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) +{ + GEM_DEBUG_WARN_ON(iir); +} + static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) { const struct engine_info *info = &intel_engines[id]; struct drm_i915_private *i915 = gt->i915; struct intel_engine_cs *engine; + u8 guc_class; BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); @@ -288,9 +294,12 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; - engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); engine->hw_id = info->hw_id; - engine->guc_id = MAKE_GUC_ID(info->class, info->instance); + guc_class = engine_class_to_guc_class(info->class); + engine->guc_id = MAKE_GUC_ID(guc_class, info->instance); + engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); + + engine->irq_handler = nop_irq_handler; engine->class = info->class; engine->instance = info->instance; @@ -308,7 +317,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) CONFIG_DRM_I915_TIMESLICE_DURATION; /* Override to uninterruptible for OpenCL workloads. */ - if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS) + if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS) engine->props.preempt_timeout_ms = 0; engine->defaults = engine->props; /* never to change again */ @@ -345,8 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * HEVC support is present on first engine instance * before Gen11 and on all instances afterwards. */ - if (INTEL_GEN(i915) >= 11 || - (INTEL_GEN(i915) >= 9 && engine->instance == 0)) + if (GRAPHICS_VER(i915) >= 11 || + (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_CLASS_CAPABILITY_HEVC; @@ -354,14 +363,14 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * SFC block is present only on even logical engine * instances. */ - if ((INTEL_GEN(i915) >= 11 && + if ((GRAPHICS_VER(i915) >= 11 && (engine->gt->info.vdbox_sfc_access & BIT(engine->instance))) || - (INTEL_GEN(i915) >= 9 && engine->instance == 0)) + (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; } @@ -459,7 +468,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; - if (INTEL_GEN(i915) < 11) + if (GRAPHICS_VER(i915) < 11) return info->engine_mask; media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); @@ -485,7 +494,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) * hooked up to an SFC (Scaler & Format Converter) unit. * In TGL each VDBOX has access to an SFC. */ - if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0) + if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0) gt->info.vdbox_sfc_access |= BIT(i); } drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", @@ -722,7 +731,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_whitelist(engine); intel_engine_init_ctx_wa(engine); - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; return 0; @@ -898,7 +907,7 @@ static int engine_init_common(struct intel_engine_cs *engine) return 0; err_context: - intel_context_put(ce); + destroy_pinned_context(ce); return ret; } @@ -909,12 +918,16 @@ int intel_engines_init(struct intel_gt *gt) enum intel_engine_id id; int err; - if (intel_uc_uses_guc_submission(>->uc)) + if (intel_uc_uses_guc_submission(>->uc)) { + gt->submission_method = INTEL_SUBMISSION_GUC; setup = intel_guc_submission_setup; - else if (HAS_EXECLISTS(gt->i915)) + } else if (HAS_EXECLISTS(gt->i915)) { + gt->submission_method = INTEL_SUBMISSION_ELSP; setup = intel_execlists_submission_setup; - else + } else { + gt->submission_method = INTEL_SUBMISSION_RING; setup = intel_ring_submission_setup; + } for_each_engine(engine, gt, id) { err = engine_setup_common(engine); @@ -986,9 +999,9 @@ u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) u64 acthd; - if (INTEL_GEN(i915) >= 8) + if (GRAPHICS_VER(i915) >= 8) acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); - else if (INTEL_GEN(i915) >= 4) + else if (GRAPHICS_VER(i915) >= 4) acthd = ENGINE_READ(engine, RING_ACTHD); else acthd = ENGINE_READ(engine, ACTHD); @@ -1000,7 +1013,7 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) { u64 bbaddr; - if (INTEL_GEN(engine->i915) >= 8) + if (GRAPHICS_VER(engine->i915) >= 8) bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); else bbaddr = ENGINE_READ(engine, RING_BBADDR); @@ -1047,7 +1060,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) { int err = 0; - if (INTEL_GEN(engine->i915) < 3) + if (GRAPHICS_VER(engine->i915) < 3) return -ENODEV; ENGINE_TRACE(engine, "\n"); @@ -1097,7 +1110,7 @@ read_subslice_reg(const struct intel_engine_cs *engine, u32 mcr_mask, mcr_ss, mcr, old_mcr, val; enum forcewake_domains fw_domains; - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); } else { @@ -1146,7 +1159,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, memset(instdone, 0, sizeof(*instdone)); - switch (INTEL_GEN(i915)) { + switch (GRAPHICS_VER(i915)) { default: instdone->instdone = intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); @@ -1156,7 +1169,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - if (INTEL_GEN(i915) >= 12) { + if (GRAPHICS_VER(i915) >= 12) { instdone->slice_common_extra[0] = intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA); instdone->slice_common_extra[1] = @@ -1219,7 +1232,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) idle = false; /* No bit for gen2, so assume the CS parser is idle */ - if (INTEL_GEN(engine->i915) > 2 && + if (GRAPHICS_VER(engine->i915) > 2 && !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; @@ -1266,7 +1279,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return true; /* Waiting to drain ELSP? */ - synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq); + intel_synchronize_hardirq(engine->i915); intel_engine_flush_submission(engine); /* ELSP is empty, but there are ready requests? E.g. after reset */ @@ -1316,7 +1329,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt) bool intel_engine_can_store_dword(struct intel_engine_cs *engine) { - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 2: return false; /* uses physical not virtual addresses */ case 3: @@ -1421,7 +1434,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, struct intel_engine_execlists * const execlists = &engine->execlists; u64 addr; - if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) + if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7)) drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); if (HAS_EXECLISTS(dev_priv)) { drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", @@ -1438,13 +1451,13 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, drm_printf(m, "\tRING_CTL: 0x%08x%s\n", ENGINE_READ(engine, RING_CTL), ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); - if (INTEL_GEN(engine->i915) > 2) { + if (GRAPHICS_VER(engine->i915) > 2) { drm_printf(m, "\tRING_MODE: 0x%08x%s\n", ENGINE_READ(engine, RING_MI_MODE), ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); } - if (INTEL_GEN(dev_priv) >= 6) { + if (GRAPHICS_VER(dev_priv) >= 6) { drm_printf(m, "\tRING_IMR: 0x%08x\n", ENGINE_READ(engine, RING_IMR)); drm_printf(m, "\tRING_ESR: 0x%08x\n", @@ -1461,15 +1474,15 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, addr = intel_engine_get_last_batch_head(engine); drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); - if (INTEL_GEN(dev_priv) >= 8) + if (GRAPHICS_VER(dev_priv) >= 8) addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); - else if (INTEL_GEN(dev_priv) >= 4) + else if (GRAPHICS_VER(dev_priv) >= 4) addr = ENGINE_READ(engine, RING_DMA_FADD); else addr = ENGINE_READ(engine, DMA_FADD_I8XX); drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); - if (INTEL_GEN(dev_priv) >= 4) { + if (GRAPHICS_VER(dev_priv) >= 4) { drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, RING_IPEIR)); drm_printf(m, "\tIPEHR: 0x%08x\n", @@ -1479,7 +1492,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); } - if (intel_engine_in_guc_submission_mode(engine)) { + if (intel_engine_uses_guc(engine)) { /* nothing to print yet */ } else if (HAS_EXECLISTS(dev_priv)) { struct i915_request * const *port, *rq; @@ -1548,7 +1561,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } rcu_read_unlock(); execlists_active_unlock_bh(execlists); - } else if (INTEL_GEN(dev_priv) > 6) { + } else if (GRAPHICS_VER(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", ENGINE_READ(engine, RING_PP_DIR_BASE)); drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 7c9af86fdb1e..47f4397095e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -23,7 +23,7 @@ static void dbg_poison_ce(struct intel_context *ce) if (ce->state) { struct drm_i915_gem_object *obj = ce->state->obj; - int type = i915_coherent_map_type(ce->engine->i915); + int type = i915_coherent_map_type(ce->engine->i915, obj, true); void *map; if (!i915_gem_object_trylock(obj)) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 883bafc44902..e113f93b3274 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -402,6 +402,7 @@ struct intel_engine_cs { u32 irq_enable_mask; /* bitmask to enable ring interrupt */ void (*irq_enable)(struct intel_engine_cs *engine); void (*irq_disable)(struct intel_engine_cs *engine); + void (*irq_handler)(struct intel_engine_cs *engine, u16 iir); void (*sanitize)(struct intel_engine_cs *engine); int (*resume)(struct intel_engine_cs *engine); @@ -481,10 +482,9 @@ struct intel_engine_cs { #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_HAS_TIMESLICES BIT(4) -#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5) -#define I915_ENGINE_IS_VIRTUAL BIT(6) -#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7) -#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8) +#define I915_ENGINE_IS_VIRTUAL BIT(5) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) unsigned int flags; /* @@ -594,12 +594,6 @@ intel_engine_has_timeslices(const struct intel_engine_cs *engine) } static inline bool -intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; -} - -static inline bool intel_engine_is_virtual(const struct intel_engine_cs *engine) { return engine->flags & I915_ENGINE_IS_VIRTUAL; @@ -612,10 +606,10 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) } #define instdone_has_slice(dev_priv___, sseu___, slice___) \ - ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) + ((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) #define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ - (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ + (GRAPHICS_VER(dev_priv__) == 7 ? (1 & BIT(subslice__)) : \ intel_sseu_has_subslice(sseu__, 0, subslice__)) #define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index de124870af44..fc77592d88a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -118,6 +118,7 @@ #include "intel_engine_stats.h" #include "intel_execlists_submission.h" #include "intel_gt.h" +#include "intel_gt_irq.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" #include "intel_lrc.h" @@ -1768,7 +1769,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) */ GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && !reset_in_progress(execlists)); - GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); /* * Note that csb_write, csb_status may be either in HWSP or mmio. @@ -1847,7 +1847,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", head, upper_32_bits(csb), lower_32_bits(csb)); - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) promote = gen12_csb_parse(csb); else promote = gen8_csb_parse(csb); @@ -2385,6 +2385,45 @@ static void execlists_submission_tasklet(struct tasklet_struct *t) rcu_read_unlock(); } +static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir) +{ + bool tasklet = false; + + if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { + u32 eir; + + /* Upper 16b are the enabling mask, rsvd for internal errors */ + eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); + ENGINE_TRACE(engine, "CS error: %x\n", eir); + + /* Disable the error interrupt until after the reset */ + if (likely(eir)) { + ENGINE_WRITE(engine, RING_EMR, ~0u); + ENGINE_WRITE(engine, RING_EIR, eir); + WRITE_ONCE(engine->execlists.error_interrupt, eir); + tasklet = true; + } + } + + if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { + WRITE_ONCE(engine->execlists.yield, + ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); + ENGINE_TRACE(engine, "semaphore yield: %08x\n", + engine->execlists.yield); + if (del_timer(&engine->execlists.timer)) + tasklet = true; + } + + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) + tasklet = true; + + if (iir & GT_RENDER_USER_INTERRUPT) + intel_engine_signal_breadcrumbs(engine); + + if (tasklet) + tasklet_hi_schedule(&engine->execlists.tasklet); +} + static void __execlists_kick(struct intel_engine_execlists *execlists) { /* Kick the tasklet for some interrupt coalescing and reset handling */ @@ -2733,7 +2772,7 @@ static void enable_execlists(struct intel_engine_cs *engine) intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ - if (INTEL_GEN(engine->i915) >= 11) + if (GRAPHICS_VER(engine->i915) >= 11) mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); else mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); @@ -3064,7 +3103,7 @@ static void execlists_park(struct intel_engine_cs *engine) static bool can_preempt(struct intel_engine_cs *engine) { - if (INTEL_GEN(engine->i915) > 8) + if (GRAPHICS_VER(engine->i915) > 8) return true; /* GPGPU on bdw requires extra w/a; not implemented */ @@ -3076,29 +3115,6 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) engine->submit_request = execlists_submit_request; engine->schedule = i915_schedule; engine->execlists.tasklet.callback = execlists_submission_tasklet; - - engine->reset.prepare = execlists_reset_prepare; - engine->reset.rewind = execlists_reset_rewind; - engine->reset.cancel = execlists_reset_cancel; - engine->reset.finish = execlists_reset_finish; - - engine->park = execlists_park; - engine->unpark = NULL; - - engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (!intel_vgpu_active(engine->i915)) { - engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (can_preempt(engine)) { - engine->flags |= I915_ENGINE_HAS_PREEMPTION; - if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - engine->flags |= I915_ENGINE_HAS_TIMESLICES; - } - } - - if (intel_engine_has_preemption(engine)) - engine->emit_bb_start = gen8_emit_bb_start; - else - engine->emit_bb_start = gen8_emit_bb_start_noarb; } static void execlists_shutdown(struct intel_engine_cs *engine) @@ -3129,16 +3145,24 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->cops = &execlists_context_ops; engine->request_alloc = execlists_request_alloc; + engine->reset.prepare = execlists_reset_prepare; + engine->reset.rewind = execlists_reset_rewind; + engine->reset.cancel = execlists_reset_cancel; + engine->reset.finish = execlists_reset_finish; + + engine->park = execlists_park; + engine->unpark = NULL; + engine->emit_flush = gen8_emit_flush_xcs; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; - if (INTEL_GEN(engine->i915) >= 12) { + if (GRAPHICS_VER(engine->i915) >= 12) { engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = execlists_set_default_submission; - if (INTEL_GEN(engine->i915) < 11) { + if (GRAPHICS_VER(engine->i915) < 11) { engine->irq_enable = gen8_logical_ring_enable_irq; engine->irq_disable = gen8_logical_ring_disable_irq; } else { @@ -3149,13 +3173,29 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) * until a more refined solution exists. */ } + intel_engine_set_irq_handler(engine, execlists_irq_handler); + + engine->flags |= I915_ENGINE_SUPPORTS_STATS; + if (!intel_vgpu_active(engine->i915)) { + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + if (can_preempt(engine)) { + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + engine->flags |= I915_ENGINE_HAS_TIMESLICES; + } + } + + if (intel_engine_has_preemption(engine)) + engine->emit_bb_start = gen8_emit_bb_start; + else + engine->emit_bb_start = gen8_emit_bb_start_noarb; } static void logical_ring_default_irqs(struct intel_engine_cs *engine) { unsigned int shift = 0; - if (INTEL_GEN(engine->i915) < 11) { + if (GRAPHICS_VER(engine->i915) < 11) { const u8 irq_shifts[] = { [RCS0] = GEN8_RCS_IRQ_SHIFT, [BCS0] = GEN8_BCS_IRQ_SHIFT, @@ -3175,7 +3215,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine) static void rcs_submission_override(struct intel_engine_cs *engine) { - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 12: engine->emit_flush = gen12_emit_flush_rcs; engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; @@ -3226,13 +3266,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) execlists->csb_write = &engine->status_page.addr[intel_hws_csb_write_index(i915)]; - if (INTEL_GEN(i915) < 11) + if (GRAPHICS_VER(i915) < 11) execlists->csb_size = GEN8_CSB_ENTRIES; else execlists->csb_size = GEN11_CSB_ENTRIES; engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); - if (INTEL_GEN(engine->i915) >= 11) { + if (GRAPHICS_VER(engine->i915) >= 11) { execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); } @@ -3884,13 +3924,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, spin_unlock_irqrestore(&engine->active.lock, flags); } -bool -intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) -{ - return engine->set_default_submission == - execlists_set_default_submission; -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_execlists.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h index fd61dae820e9..4ca9b475e252 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h @@ -43,7 +43,4 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, const struct intel_engine_cs *sibling); -bool -intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); - #endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 670c1271e7d5..20e46b843324 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -18,6 +18,7 @@ #include "i915_vgpu.h" #include "intel_gtt.h" +#include "gen8_ppgtt.h" static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -106,10 +107,10 @@ static bool needs_idle_maps(struct drm_i915_private *i915) if (!intel_vtd_active()) return false; - if (IS_GEN(i915, 5) && IS_MOBILE(i915)) + if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) return true; - if (IS_GEN(i915, 12)) + if (GRAPHICS_VER(i915) == 12) return true; /* XXX DMAR fault reason 7 */ return false; @@ -175,7 +176,7 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) gen8_ggtt_invalidate(ggtt); - if (INTEL_GEN(i915) >= 12) + if (GRAPHICS_VER(i915) >= 12) intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, GEN12_GUC_TLB_INV_CR_INVALIDATE); else @@ -187,9 +188,9 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) intel_gtt_chipset_flush(); } -static u64 gen8_ggtt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) +u64 gen8_ggtt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) { gen8_pte_t pte = addr | _PAGE_PRESENT; @@ -657,7 +658,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) goto err_ppgtt; i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); - err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash); + err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); i915_gem_object_unlock(ppgtt->vm.scratch[0]); if (err) goto err_stash; @@ -745,7 +746,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) mutex_unlock(&ggtt->vm.mutex); i915_address_space_fini(&ggtt->vm); - dma_resv_fini(&ggtt->vm.resv); arch_phys_wc_del(ggtt->mtrr); @@ -767,6 +767,19 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915) ggtt_cleanup_hw(ggtt); } +/** + * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after + * all free objects have been drained. + * @i915: i915 device + */ +void i915_ggtt_driver_late_release(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + + GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); + dma_resv_fini(&ggtt->vm._resv); +} + static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) { snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; @@ -819,7 +832,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) + if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10) ggtt->gsm = ioremap(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); @@ -828,6 +841,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } + kref_init(&ggtt->vm.resv_ref); ret = setup_scratch_page(&ggtt->vm); if (ret) { drm_err(&i915->drm, "Scratch setup failed\n"); @@ -906,9 +920,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_entries = gen8_ggtt_insert_entries; - /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ - if (intel_ggtt_update_needs_vtd_wa(i915) || - IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { + /* + * Serialize GTT updates with aperture access on BXT if VT-d is on, + * and always on CHV. + */ + if (intel_vm_no_concurrent_access_wa(i915)) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; ggtt->vm.bind_async_flags = @@ -1062,7 +1078,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(i915)) ggtt->vm.pte_encode = byt_pte_encode; - else if (INTEL_GEN(i915) >= 7) + else if (GRAPHICS_VER(i915) >= 7) ggtt->vm.pte_encode = ivb_pte_encode; else ggtt->vm.pte_encode = snb_pte_encode; @@ -1132,16 +1148,16 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) ggtt->vm.gt = gt; ggtt->vm.i915 = i915; ggtt->vm.dma = i915->drm.dev; - dma_resv_init(&ggtt->vm.resv); + dma_resv_init(&ggtt->vm._resv); - if (INTEL_GEN(i915) <= 5) + if (GRAPHICS_VER(i915) <= 5) ret = i915_gmch_probe(ggtt); - else if (INTEL_GEN(i915) < 8) + else if (GRAPHICS_VER(i915) < 8) ret = gen6_gmch_probe(ggtt); else ret = gen8_gmch_probe(ggtt); if (ret) { - dma_resv_fini(&ggtt->vm.resv); + dma_resv_fini(&ggtt->vm._resv); return ret; } @@ -1193,7 +1209,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) int i915_ggtt_enable_hw(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) < 6 && !intel_enable_gtt()) + if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt()) return -EIO; return 0; @@ -1258,7 +1274,7 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt) if (flush) wbinvd_on_all_cpus(); - if (INTEL_GEN(ggtt->vm.i915) >= 8) + if (GRAPHICS_VER(ggtt->vm.i915) >= 8) setup_private_pat(ggtt->vm.gt->uncore); intel_ggtt_restore_fences(ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 8a322594210c..f8948de72036 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -56,7 +56,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence) int fence_pitch_shift; u64 val; - if (INTEL_GEN(fence_to_i915(fence)) >= 6) { + if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) { fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; @@ -173,9 +173,9 @@ static void fence_write(struct i915_fence_reg *fence) * and explicitly managed for internal users. */ - if (IS_GEN(i915, 2)) + if (GRAPHICS_VER(i915) == 2) i830_write_fence_reg(fence); - else if (IS_GEN(i915, 3)) + else if (GRAPHICS_VER(i915) == 3) i915_write_fence_reg(fence); else i965_write_fence_reg(fence); @@ -188,7 +188,7 @@ static void fence_write(struct i915_fence_reg *fence) static bool gpu_uses_fence_registers(struct i915_fence_reg *fence) { - return INTEL_GEN(fence_to_i915(fence)) < 4; + return GRAPHICS_VER(fence_to_i915(fence)) < 4; } static int fence_update(struct i915_fence_reg *fence, @@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) if (intel_has_pending_fb_unpin(ggtt->vm.i915)) return ERR_PTR(-EAGAIN); - return ERR_PTR(-EDEADLK); + return ERR_PTR(-ENOBUFS); } int __i915_vma_pin_fence(struct i915_vma *vma) @@ -569,7 +569,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) { + if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) { /* * On BDW+, swizzling is not used. We leave the CPU memory * controller in charge of optimizing memory accesses without @@ -579,7 +579,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (INTEL_GEN(i915) >= 6) { + } else if (GRAPHICS_VER(i915) >= 6) { if (i915->preserve_bios_swizzle) { if (intel_uncore_read(uncore, DISP_ARB_CTL) & DISP_TILE_SURFACE_SWIZZLING) { @@ -611,14 +611,14 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } - } else if (IS_GEN(i915, 5)) { + } else if (GRAPHICS_VER(i915) == 5) { /* * On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (IS_GEN(i915, 2)) { + } else if (GRAPHICS_VER(i915) == 2) { /* * As far as we know, the 865 doesn't have these bit 6 * swizzling issues. @@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) * banks of memory are paired and unswizzled on the * uneven portion, so leave that as unknown. */ - if (intel_uncore_read16(uncore, C0DRB3) == - intel_uncore_read16(uncore, C1DRB3)) { + if (intel_uncore_read16(uncore, C0DRB3_BW) == + intel_uncore_read16(uncore, C1DRB3_BW)) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } @@ -697,7 +697,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) } /* check for L-shaped memory aka modified enhanced addressing */ - if (IS_GEN(i915, 4) && + if (GRAPHICS_VER(i915) == 4 && !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -844,10 +844,10 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt) if (!i915_ggtt_has_aperture(ggtt)) num_fences = 0; - else if (INTEL_GEN(i915) >= 7 && + else if (GRAPHICS_VER(i915) >= 7 && !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) num_fences = 32; - else if (INTEL_GEN(i915) >= 4 || + else if (GRAPHICS_VER(i915) >= 4 || IS_I945G(i915) || IS_I945GM(i915) || IS_G33(i915) || IS_PINEVIEW(i915)) num_fences = 16; @@ -867,7 +867,7 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt) for (i = 0; i < num_fences; i++) { struct i915_fence_reg *fence = &ggtt->fence_regs[i]; - i915_active_init(&fence->active, NULL, NULL); + i915_active_init(&fence->active, NULL, NULL, 0); fence->ggtt = ggtt; fence->id = i; list_add_tail(&fence->link, &ggtt->fence_list); @@ -895,29 +895,29 @@ void intel_gt_init_swizzling(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; - if (INTEL_GEN(i915) < 5 || + if (GRAPHICS_VER(i915) < 5 || i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) return; intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING); - if (IS_GEN(i915, 5)) + if (GRAPHICS_VER(i915) == 5) return; intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL); - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) intel_uncore_write(uncore, ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); - else if (IS_GEN(i915, 7)) + else if (GRAPHICS_VER(i915) == 7) intel_uncore_write(uncore, ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); - else if (IS_GEN(i915, 8)) + else if (GRAPHICS_VER(i915) == 8) intel_uncore_write(uncore, GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); else - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); } diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 14e2ffb6c0e5..2694dbb9967e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT*/ /* - * Copyright � 2003-2018 Intel Corporation + * Copyright © 2003-2018 Intel Corporation */ #ifndef _INTEL_GPU_COMMANDS_H_ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 8d77dcbad059..2161bf01ef8b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -68,8 +68,6 @@ int intel_gt_probe_lmem(struct intel_gt *gt) id = INTEL_REGION_LMEM; mem->id = id; - mem->type = INTEL_MEMORY_LOCAL; - mem->instance = 0; intel_memory_region_set_name(mem, "local%u", mem->instance); @@ -115,10 +113,10 @@ static void init_unused_rings(struct intel_gt *gt) init_unused_ring(gt, SRB1_BASE); init_unused_ring(gt, SRB2_BASE); init_unused_ring(gt, SRB3_BASE); - } else if (IS_GEN(i915, 2)) { + } else if (GRAPHICS_VER(i915) == 2) { init_unused_ring(gt, SRB0_BASE); init_unused_ring(gt, SRB1_BASE); - } else if (IS_GEN(i915, 3)) { + } else if (GRAPHICS_VER(i915) == 3) { init_unused_ring(gt, PRB1_BASE); init_unused_ring(gt, PRB2_BASE); } @@ -135,7 +133,7 @@ int intel_gt_init_hw(struct intel_gt *gt) /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9) intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); if (IS_HASWELL(i915)) @@ -208,10 +206,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt, struct intel_uncore *uncore = gt->uncore; u32 eir; - if (!IS_GEN(i915, 2)) + if (GRAPHICS_VER(i915) != 2) clear_register(uncore, PGTBL_ER); - if (INTEL_GEN(i915) < 4) + if (GRAPHICS_VER(i915) < 4) clear_register(uncore, IPEIR(RENDER_RING_BASE)); else clear_register(uncore, IPEIR_I965); @@ -229,13 +227,13 @@ intel_gt_clear_error_registers(struct intel_gt *gt, I915_MASTER_ERROR_INTERRUPT); } - if (INTEL_GEN(i915) >= 12) { + if (GRAPHICS_VER(i915) >= 12) { rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); - } else if (INTEL_GEN(i915) >= 6) { + } else if (GRAPHICS_VER(i915) >= 6) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -273,7 +271,7 @@ static void gen8_check_faults(struct intel_gt *gt) i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; u32 fault; - if (INTEL_GEN(gt->i915) >= 12) { + if (GRAPHICS_VER(gt->i915) >= 12) { fault_reg = GEN12_RING_FAULT_REG; fault_data0_reg = GEN12_FAULT_TLB_DATA0; fault_data1_reg = GEN12_FAULT_TLB_DATA1; @@ -313,9 +311,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (INTEL_GEN(i915) >= 8) + if (GRAPHICS_VER(i915) >= 8) gen8_check_faults(gt); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) gen6_check_faults(gt); else return; @@ -367,7 +365,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) void intel_gt_chipset_flush(struct intel_gt *gt) { wmb(); - if (INTEL_GEN(gt->i915) < 6) + if (GRAPHICS_VER(gt->i915) < 6) intel_gtt_chipset_flush(); } @@ -591,7 +589,8 @@ int intel_gt_init(struct intel_gt *gt) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); - err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + err = intel_gt_init_scratch(gt, + GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K); if (err) goto out_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index c59468107598..aa0a59c5b614 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -98,7 +98,6 @@ static void pool_free_work(struct work_struct *wrk) round_jiffies_up_relative(HZ)); } -__i915_active_call static void pool_retire(struct i915_active *ref) { struct intel_gt_buffer_pool_node *node = @@ -154,7 +153,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz, node->age = 0; node->pool = pool; node->pinned = false; - i915_active_init(&node->active, NULL, pool_retire); + i915_active_init(&node->active, NULL, pool_retire, 0); obj = i915_gem_object_create_internal(gt->i915, sz); if (IS_ERR(obj)) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 582fcaee11aa..9f0e729d2d15 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -76,7 +76,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore) u32 f19_2_mhz = 19200000; u32 f24_mhz = 24000000; - if (INTEL_GEN(uncore->i915) <= 4) { + if (GRAPHICS_VER(uncore->i915) <= 4) { /* * PRMs say: * @@ -85,7 +85,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore) * (“CLKCFG”) MCHBAR register) */ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; - } else if (INTEL_GEN(uncore->i915) <= 8) { + } else if (GRAPHICS_VER(uncore->i915) <= 8) { /* * PRMs say: * @@ -94,7 +94,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore) * rolling over every 1.5 hours). */ return f12_5_mhz; - } else if (INTEL_GEN(uncore->i915) <= 9) { + } else if (GRAPHICS_VER(uncore->i915) <= 9) { u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); u32 freq = 0; @@ -113,7 +113,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore) } return freq; - } else if (INTEL_GEN(uncore->i915) <= 12) { + } else if (GRAPHICS_VER(uncore->i915) <= 12) { u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); u32 freq = 0; @@ -128,7 +128,7 @@ static u32 read_clock_frequency(struct intel_uncore *uncore) } else { u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); - if (INTEL_GEN(uncore->i915) <= 10) + if (GRAPHICS_VER(uncore->i915) <= 10) freq = gen10_get_crystal_clock_freq(uncore, c0); else freq = gen11_get_crystal_clock_freq(uncore, c0); @@ -211,7 +211,7 @@ u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns) * frozen machine. */ val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16); - if (IS_GEN(gt->i915, 6)) + if (GRAPHICS_VER(gt->i915) == 6) val = div_u64_roundup(val, 25) * 25; return val; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 9fc6c912a4e5..c13462274fe8 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -20,48 +20,6 @@ static void guc_irq_handler(struct intel_guc *guc, u16 iir) intel_guc_to_host_event_handler(guc); } -static void -cs_irq_handler(struct intel_engine_cs *engine, u32 iir) -{ - bool tasklet = false; - - if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { - u32 eir; - - /* Upper 16b are the enabling mask, rsvd for internal errors */ - eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); - ENGINE_TRACE(engine, "CS error: %x\n", eir); - - /* Disable the error interrupt until after the reset */ - if (likely(eir)) { - ENGINE_WRITE(engine, RING_EMR, ~0u); - ENGINE_WRITE(engine, RING_EIR, eir); - WRITE_ONCE(engine->execlists.error_interrupt, eir); - tasklet = true; - } - } - - if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { - WRITE_ONCE(engine->execlists.yield, - ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); - ENGINE_TRACE(engine, "semaphore yield: %08x\n", - engine->execlists.yield); - if (del_timer(&engine->execlists.timer)) - tasklet = true; - } - - if (iir & GT_CONTEXT_SWITCH_INTERRUPT) - tasklet = true; - - if (iir & GT_RENDER_USER_INTERRUPT) { - intel_engine_signal_breadcrumbs(engine); - tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); - } - - if (tasklet) - tasklet_hi_schedule(&engine->execlists.tasklet); -} - static u32 gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) @@ -122,7 +80,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, engine = NULL; if (likely(engine)) - return cs_irq_handler(engine, iir); + return intel_engine_cs_irq(engine, iir); WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", class, instance); @@ -236,14 +194,18 @@ void gen11_gt_irq_reset(struct intel_gt *gt) void gen11_gt_irq_postinstall(struct intel_gt *gt) { - const u32 irqs = - GT_CS_MASTER_ERROR_INTERRUPT | - GT_RENDER_USER_INTERRUPT | - GT_CONTEXT_SWITCH_INTERRUPT | - GT_WAIT_SEMAPHORE_INTERRUPT; struct intel_uncore *uncore = gt->uncore; - const u32 dmask = irqs << 16 | irqs; - const u32 smask = irqs << 16; + u32 irqs = GT_RENDER_USER_INTERRUPT; + u32 dmask; + u32 smask; + + if (!intel_uc_wants_guc_submission(>->uc)) + irqs |= GT_CS_MASTER_ERROR_INTERRUPT | + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; + + dmask = irqs << 16 | irqs; + smask = irqs << 16; BUILD_BUG_ON(irqs & 0xffff0000); @@ -275,9 +237,12 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt) void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); + intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0], + gt_iir); + if (gt_iir & ILK_BSD_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); + intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0], + gt_iir); } static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) @@ -301,11 +266,16 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); + intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0], + gt_iir); + if (gt_iir & GT_BSD_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); + intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0], + gt_iir >> 12); + if (gt_iir & GT_BLT_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); + intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0], + gt_iir >> 22); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -324,10 +294,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { iir = raw_reg_read(regs, GEN8_GT_IIR(0)); if (likely(iir)) { - cs_irq_handler(gt->engine_class[RENDER_CLASS][0], - iir >> GEN8_RCS_IRQ_SHIFT); - cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], - iir >> GEN8_BCS_IRQ_SHIFT); + intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0], + iir >> GEN8_RCS_IRQ_SHIFT); + intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0], + iir >> GEN8_BCS_IRQ_SHIFT); raw_reg_write(regs, GEN8_GT_IIR(0), iir); } } @@ -335,10 +305,10 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { iir = raw_reg_read(regs, GEN8_GT_IIR(1)); if (likely(iir)) { - cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], - iir >> GEN8_VCS0_IRQ_SHIFT); - cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], - iir >> GEN8_VCS1_IRQ_SHIFT); + intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0], + iir >> GEN8_VCS0_IRQ_SHIFT); + intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1], + iir >> GEN8_VCS1_IRQ_SHIFT); raw_reg_write(regs, GEN8_GT_IIR(1), iir); } } @@ -346,8 +316,8 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) if (master_ctl & GEN8_GT_VECS_IRQ) { iir = raw_reg_read(regs, GEN8_GT_IIR(3)); if (likely(iir)) { - cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], - iir >> GEN8_VECS_IRQ_SHIFT); + intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], + iir >> GEN8_VECS_IRQ_SHIFT); raw_reg_write(regs, GEN8_GT_IIR(3), iir); } } @@ -429,7 +399,7 @@ void gen5_gt_irq_reset(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; GEN3_IRQ_RESET(uncore, GT); - if (INTEL_GEN(gt->i915) >= 6) + if (GRAPHICS_VER(gt->i915) >= 6) GEN3_IRQ_RESET(uncore, GEN6_PM); } @@ -447,14 +417,14 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) } gt_irqs |= GT_RENDER_USER_INTERRUPT; - if (IS_GEN(gt->i915, 5)) + if (GRAPHICS_VER(gt->i915) == 5) gt_irqs |= ILK_BSD_USER_INTERRUPT; else gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); - if (INTEL_GEN(gt->i915) >= 6) { + if (GRAPHICS_VER(gt->i915) >= 6) { /* * RPS interrupts will get enabled/disabled on demand when RPS * itself is enabled/disabled. diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h index f667e976fb2b..41cad38668c5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h @@ -8,6 +8,8 @@ #include <linux/types.h> +#include "intel_engine_types.h" + struct intel_gt; #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ @@ -39,4 +41,25 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl); void gen8_gt_irq_reset(struct intel_gt *gt); void gen8_gt_irq_postinstall(struct intel_gt *gt); +static inline void intel_engine_cs_irq(struct intel_engine_cs *engine, u16 iir) +{ + if (iir) + engine->irq_handler(engine, iir); +} + +static inline void +intel_engine_set_irq_handler(struct intel_engine_cs *engine, + void (*fn)(struct intel_engine_cs *engine, + u16 iir)) +{ + /* + * As the interrupt is live as allocate and setup the engines, + * err on the side of caution and apply barriers to updating + * the irq handler callback. This assures that when we do use + * the engine, we will receive interrupts only to ourselves, + * and not lose any. + */ + smp_store_mb(engine->irq_handler, fn); +} + #endif /* INTEL_GT_IRQ_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c index 811a11ed181c..fe51f894b073 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c @@ -16,10 +16,10 @@ static void write_pm_imr(struct intel_gt *gt) u32 mask = gt->pm_imr; i915_reg_t reg; - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_MASK; mask <<= 16; /* pm is in upper half */ - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { reg = GEN8_GT_IMR(2); } else { reg = GEN6_PMIMR; @@ -61,7 +61,7 @@ void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask) void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask) { struct intel_uncore *uncore = gt->uncore; - i915_reg_t reg = INTEL_GEN(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; + i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; lockdep_assert_held(>->irq_lock); @@ -77,10 +77,10 @@ static void write_pm_ier(struct intel_gt *gt) u32 mask = gt->pm_ier; i915_reg_t reg; - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; mask <<= 16; /* pm is in upper half */ - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { reg = GEN8_GT_IER(2); } else { reg = GEN6_PMIER; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 0caf6ca0a784..fecfacf551d5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -31,6 +31,12 @@ struct i915_ggtt; struct intel_engine_cs; struct intel_uncore; +enum intel_submission_method { + INTEL_SUBMISSION_RING, + INTEL_SUBMISSION_ELSP, + INTEL_SUBMISSION_GUC, +}; + struct intel_gt { struct drm_i915_private *i915; struct intel_uncore *uncore; @@ -118,6 +124,7 @@ struct intel_gt { struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; + enum intel_submission_method submission_method; /* * Default address space (either GGTT or ppGTT depending on arch). diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 941f8af016d6..084ea65d59c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -7,10 +7,29 @@ #include <linux/fault-inject.h> +#include "gem/i915_gem_lmem.h" #include "i915_trace.h" #include "intel_gt.h" #include "intel_gtt.h" +struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) +{ + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_create_lmem(vm->i915, sz, 0); + /* + * Ensure all paging structures for this vm share the same dma-resv + * object underneath, with the idea that one object_lock() will lock + * them all at once. + */ + if (!IS_ERR(obj)) { + obj->base.resv = i915_vm_resv_get(vm); + obj->shares_resv_from = vm; + } + + return obj; +} + struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) { struct drm_i915_gem_object *obj; @@ -19,33 +38,42 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz) i915_gem_shrink_all(vm->i915); obj = i915_gem_object_create_internal(vm->i915, sz); - /* ensure all dma objects have the same reservation class */ - if (!IS_ERR(obj)) - obj->base.resv = &vm->resv; + /* + * Ensure all paging structures for this vm share the same dma-resv + * object underneath, with the idea that one object_lock() will lock + * them all at once. + */ + if (!IS_ERR(obj)) { + obj->base.resv = i915_vm_resv_get(vm); + obj->shares_resv_from = vm; + } + return obj; } -int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) +int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj) { - int err; + enum i915_map_type type; + void *vaddr; - i915_gem_object_lock(obj, NULL); - err = i915_gem_object_pin_pages(obj); - i915_gem_object_unlock(obj); - if (err) - return err; + type = i915_coherent_map_type(vm->i915, obj, true); + vaddr = i915_gem_object_pin_map_unlocked(obj, type); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); i915_gem_object_make_unshrinkable(obj); return 0; } -int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) +int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj) { - int err; + enum i915_map_type type; + void *vaddr; - err = i915_gem_object_pin_pages(obj); - if (err) - return err; + type = i915_coherent_map_type(vm->i915, obj, true); + vaddr = i915_gem_object_pin_map(obj, type); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); i915_gem_object_make_unshrinkable(obj); return 0; @@ -80,7 +108,7 @@ void __i915_vm_close(struct i915_address_space *vm) int i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww) { - if (vm->scratch[0]->base.resv == &vm->resv) { + if (vm->scratch[0]->base.resv == &vm->_resv) { return i915_gem_object_lock(vm->scratch[0], ww); } else { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); @@ -96,6 +124,22 @@ void i915_address_space_fini(struct i915_address_space *vm) mutex_destroy(&vm->mutex); } +/** + * i915_vm_resv_release - Final struct i915_address_space destructor + * @kref: Pointer to the &i915_address_space.resv_ref member. + * + * This function is called when the last lock sharer no longer shares the + * &i915_address_space._resv lock. + */ +void i915_vm_resv_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, typeof(*vm), resv_ref); + + dma_resv_fini(&vm->_resv); + kfree(vm); +} + static void __i915_vm_release(struct work_struct *work) { struct i915_address_space *vm = @@ -103,9 +147,8 @@ static void __i915_vm_release(struct work_struct *work) vm->cleanup(vm); i915_address_space_fini(vm); - dma_resv_fini(&vm->resv); - kfree(vm); + i915_vm_resv_put(vm); } void i915_vm_release(struct kref *kref) @@ -122,6 +165,14 @@ void i915_vm_release(struct kref *kref) void i915_address_space_init(struct i915_address_space *vm, int subclass) { kref_init(&vm->ref); + + /* + * Special case for GGTT that has already done an early + * kref_init here. + */ + if (!kref_read(&vm->resv_ref)) + kref_init(&vm->resv_ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); atomic_set(&vm->open, 1); @@ -132,8 +183,23 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) */ mutex_init(&vm->mutex); lockdep_set_subclass(&vm->mutex, subclass); - i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); - dma_resv_init(&vm->resv); + + if (!intel_vm_no_concurrent_access_wa(vm->i915)) { + i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex); + } else { + /* + * CHV + BXT VTD workaround use stop_machine(), + * which is allowed to allocate memory. This means &vm->mutex + * is the outer lock, and in theory we can allocate memory inside + * it through stop_machine(). + * + * Add the annotation for this, we use trylock in shrinker. + */ + mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_); + might_alloc(GFP_KERNEL); + mutex_release(&vm->mutex.dep_map, _THIS_IP_); + } + dma_resv_init(&vm->_resv); GEM_BUG_ON(!vm->total); drm_mm_init(&vm->mm, 0, vm->total); @@ -155,6 +221,14 @@ void clear_pages(struct i915_vma *vma) memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); } +void *__px_vaddr(struct drm_i915_gem_object *p) +{ + enum i915_map_type type; + + GEM_BUG_ON(!i915_gem_object_has_pages(p)); + return page_unpack_bits(p->mm.mapping, &type); +} + dma_addr_t __px_dma(struct drm_i915_gem_object *p) { GEM_BUG_ON(!i915_gem_object_has_pages(p)); @@ -170,32 +244,22 @@ struct page *__px_page(struct drm_i915_gem_object *p) void fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count) { - struct page *page = __px_page(p); - void *vaddr; + void *vaddr = __px_vaddr(p); - vaddr = kmap(page); memset64(vaddr, val, count); clflush_cache_range(vaddr, PAGE_SIZE); - kunmap(page); } static void poison_scratch_page(struct drm_i915_gem_object *scratch) { - struct sgt_iter sgt; - struct page *page; + void *vaddr = __px_vaddr(scratch); u8 val; val = 0; if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) val = POISON_FREE; - for_each_sgt_page(page, sgt, scratch->mm.pages) { - void *vaddr; - - vaddr = kmap(page); - memset(vaddr, val, PAGE_SIZE); - kunmap(page); - } + memset(vaddr, val, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) @@ -225,7 +289,7 @@ int setup_scratch_page(struct i915_address_space *vm) if (IS_ERR(obj)) goto skip; - if (pin_pt_dma(vm, obj)) + if (map_pt_dma(vm, obj)) goto skip_obj; /* We need a single contiguous page for our scratch */ @@ -292,7 +356,7 @@ void gtt_write_workarounds(struct intel_gt *gt) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) + else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); @@ -309,13 +373,13 @@ void gtt_write_workarounds(struct intel_gt *gt) * driver. */ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && - INTEL_GEN(i915) <= 10) + GRAPHICS_VER(i915) <= 10) intel_uncore_rmw(uncore, GEN8_GAMW_ECO_DEV_RW_IA, 0, GAMW_ECO_ENABLE_64K_IPS_FIELD); - if (IS_GEN_RANGE(i915, 8, 11)) { + if (IS_GRAPHICS_VER(i915, 8, 11)) { bool can_use_gtt_cache = true; /* @@ -397,7 +461,7 @@ static void bdw_setup_private_ppat(struct intel_uncore *uncore) GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); /* for scanout with eLLC */ - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE); else pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); @@ -446,11 +510,11 @@ void setup_private_pat(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; - GEM_BUG_ON(INTEL_GEN(i915) < 8); + GEM_BUG_ON(GRAPHICS_VER(i915) < 8); - if (INTEL_GEN(i915) >= 12) + if (GRAPHICS_VER(i915) >= 12) tgl_setup_private_ppat(uncore); - else if (INTEL_GEN(i915) >= 10) + else if (GRAPHICS_VER(i915) >= 10) cnl_setup_private_ppat(uncore); else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) chv_setup_private_ppat(uncore); diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index e67e34e17913..edea95b97c36 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -180,6 +180,9 @@ struct page *__px_page(struct drm_i915_gem_object *p); dma_addr_t __px_dma(struct drm_i915_gem_object *p); #define px_dma(px) (__px_dma(px_base(px))) +void *__px_vaddr(struct drm_i915_gem_object *p); +#define px_vaddr(px) (__px_vaddr(px_base(px))) + #define px_pt(px) \ __px_choose_expr(px, struct i915_page_table *, __x, \ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ @@ -242,9 +245,12 @@ struct i915_address_space { atomic_t open; struct mutex mutex; /* protects vma and our lists */ - struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */ + + struct kref resv_ref; /* kref to keep the reservation lock alive. */ + struct dma_resv _resv; /* reservation lock for all pd objects, and buffer pool */ #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 +#define VM_CLASS_DPT 2 struct drm_i915_gem_object *scratch[4]; /** @@ -255,6 +261,9 @@ struct i915_address_space { /* Global GTT */ bool is_ggtt:1; + /* Display page table */ + bool is_dpt:1; + /* Some systems support read-only mappings for GGTT and/or PPGTT */ bool has_read_only:1; @@ -351,6 +360,8 @@ struct i915_ppgtt { }; #define i915_is_ggtt(vm) ((vm)->is_ggtt) +#define i915_is_dpt(vm) ((vm)->is_dpt) +#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm)) int __must_check i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww); @@ -385,7 +396,7 @@ static inline struct i915_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); - GEM_BUG_ON(i915_is_ggtt(vm)); + GEM_BUG_ON(i915_is_ggtt_or_dpt(vm)); return container_of(vm, struct i915_ppgtt, vm); } @@ -396,13 +407,36 @@ i915_vm_get(struct i915_address_space *vm) return vm; } +/** + * i915_vm_resv_get - Obtain a reference on the vm's reservation lock + * @vm: The vm whose reservation lock we want to share. + * + * Return: A pointer to the vm's reservation lock. + */ +static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm) +{ + kref_get(&vm->resv_ref); + return &vm->_resv; +} + void i915_vm_release(struct kref *kref); +void i915_vm_resv_release(struct kref *kref); + static inline void i915_vm_put(struct i915_address_space *vm) { kref_put(&vm->ref, i915_vm_release); } +/** + * i915_vm_resv_put - Release a reference on the vm's reservation lock + * @resv: Pointer to a reservation lock obtained from i915_vm_resv_get() + */ +static inline void i915_vm_resv_put(struct i915_address_space *vm) +{ + kref_put(&vm->resv_ref, i915_vm_resv_release); +} + static inline struct i915_address_space * i915_vm_open(struct i915_address_space *vm) { @@ -498,6 +532,7 @@ void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *i915); void i915_ggtt_driver_release(struct drm_i915_private *i915); +void i915_ggtt_driver_late_release(struct drm_i915_private *i915); static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) { @@ -511,8 +546,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); -#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px))) - void fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count); @@ -526,12 +559,13 @@ int setup_scratch_page(struct i915_address_space *vm); void free_scratch(struct i915_address_space *vm); struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz); +struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz); struct i915_page_table *alloc_pt(struct i915_address_space *vm); struct i915_page_directory *alloc_pd(struct i915_address_space *vm); struct i915_page_directory *__alloc_pd(int npde); -int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); -int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); +int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj); +int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj); void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl); @@ -578,7 +612,7 @@ void setup_private_pat(struct intel_uncore *uncore); int i915_vm_alloc_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash, u64 size); -int i915_vm_pin_pt_stash(struct i915_address_space *vm, +int i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash); void i915_vm_free_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash); diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index 075d741644ae..eb1a15deed22 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -64,7 +64,7 @@ static bool get_ia_constants(struct intel_llc *llc, consts->min_gpu_freq = rps->min_freq; consts->max_gpu_freq = rps->max_freq; - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { /* Convert GT frequency to 50 HZ units */ consts->min_gpu_freq /= GEN9_FREQ_SCALER; consts->max_gpu_freq /= GEN9_FREQ_SCALER; @@ -83,13 +83,13 @@ static void calc_ia_freq(struct intel_llc *llc, const int diff = consts->max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. */ ring_freq = gpu_freq; - } else if (INTEL_GEN(i915) >= 8) { + } else if (GRAPHICS_VER(i915) >= 8) { /* max(2 * GT, DDR). NB: GT is 50MHz units */ ring_freq = max(consts->min_ring_freq, gpu_freq); } else if (IS_HASWELL(i915)) { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e86897cde984..a27bac0a4bfb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -47,7 +47,7 @@ static void set_offsets(u32 *regs, *regs = MI_LOAD_REGISTER_IMM(count); if (flags & POSTED) *regs |= MI_LRI_FORCE_POSTED; - if (INTEL_GEN(engine->i915) >= 11) + if (GRAPHICS_VER(engine->i915) >= 11) *regs |= MI_LRI_LRM_CS_MMIO; regs++; @@ -70,7 +70,7 @@ static void set_offsets(u32 *regs, if (close) { /* Close the batch; used mainly by live_lrc_layout() */ *regs = MI_BATCH_BUFFER_END; - if (INTEL_GEN(engine->i915) >= 10) + if (GRAPHICS_VER(engine->i915) >= 10) *regs |= BIT(0); } } @@ -498,22 +498,22 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) * addressing to automatic fixup the register state between the * physical engines for virtual engine. */ - GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && + GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 && !intel_engine_has_relative_mmio(engine)); if (engine->class == RENDER_CLASS) { - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return gen12_rcs_offsets; - else if (INTEL_GEN(engine->i915) >= 11) + else if (GRAPHICS_VER(engine->i915) >= 11) return gen11_rcs_offsets; - else if (INTEL_GEN(engine->i915) >= 9) + else if (GRAPHICS_VER(engine->i915) >= 9) return gen9_rcs_offsets; else return gen8_rcs_offsets; } else { - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return gen12_xcs_offsets; - else if (INTEL_GEN(engine->i915) >= 9) + else if (GRAPHICS_VER(engine->i915) >= 9) return gen9_xcs_offsets; else return gen8_xcs_offsets; @@ -522,9 +522,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) { - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return 0x60; - else if (INTEL_GEN(engine->i915) >= 9) + else if (GRAPHICS_VER(engine->i915) >= 9) return 0x54; else if (engine->class == RENDER_CLASS) return 0x58; @@ -534,9 +534,9 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) static int lrc_ring_gpr0(const struct intel_engine_cs *engine) { - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return 0x74; - else if (INTEL_GEN(engine->i915) >= 9) + else if (GRAPHICS_VER(engine->i915) >= 9) return 0x68; else if (engine->class == RENDER_CLASS) return 0xd8; @@ -546,9 +546,9 @@ static int lrc_ring_gpr0(const struct intel_engine_cs *engine) static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) { - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return 0x12; - else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) + else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS) return 0x18; else return -1; @@ -581,9 +581,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) if (engine->class != RENDER_CLASS) return -1; - if (INTEL_GEN(engine->i915) >= 12) + if (GRAPHICS_VER(engine->i915) >= 12) return 0xb6; - else if (INTEL_GEN(engine->i915) >= 11) + else if (GRAPHICS_VER(engine->i915) >= 11) return 0xaa; else return -1; @@ -592,9 +592,9 @@ static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) static u32 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) { - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { default: - MISSING_CASE(INTEL_GEN(engine->i915)); + MISSING_CASE(GRAPHICS_VER(engine->i915)); fallthrough; case 12: return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; @@ -637,7 +637,7 @@ static void init_common_regs(u32 * const regs, ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); if (inhibit) ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; - if (INTEL_GEN(engine->i915) < 11) + if (GRAPHICS_VER(engine->i915) < 11) ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); regs[CTX_CONTEXT_CONTROL] = ctl; @@ -805,7 +805,7 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) context_size += I915_GTT_PAGE_SIZE; /* for redzone */ - if (INTEL_GEN(engine->i915) == 12) { + if (GRAPHICS_VER(engine->i915) == 12) { ce->wa_bb_page = context_size / PAGE_SIZE; context_size += PAGE_SIZE; } @@ -903,7 +903,9 @@ lrc_pre_pin(struct intel_context *ce, GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); *vaddr = i915_gem_object_pin_map(ce->state->obj, - i915_coherent_map_type(ce->engine->i915) | + i915_coherent_map_type(ce->engine->i915, + ce->state->obj, + false) | I915_MAP_OVERRIDE); return PTR_ERR_OR_ZERO(*vaddr); @@ -1112,7 +1114,7 @@ static u32 lrc_descriptor(const struct intel_context *ce) desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; - if (IS_GEN(ce->vm->i915, 8)) + if (GRAPHICS_VER(ce->vm->i915) == 8) desc |= GEN8_CTX_L3LLC_COHERENT; return i915_ggtt_offset(ce->state) | desc; @@ -1467,7 +1469,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine) if (engine->class != RENDER_CLASS) return; - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 12: case 11: return; @@ -1484,7 +1486,7 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine) wa_bb_fn[1] = NULL; break; default: - MISSING_CASE(INTEL_GEN(engine->i915)); + MISSING_CASE(GRAPHICS_VER(engine->i915)); return; } diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index b14138fd505c..17848807f111 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -344,11 +344,11 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, table->size = ARRAY_SIZE(dg1_mocs_table); table->table = dg1_mocs_table; table->n_entries = GEN9_NUM_MOCS_ENTRIES; - } else if (INTEL_GEN(i915) >= 12) { + } else if (GRAPHICS_VER(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; table->n_entries = GEN9_NUM_MOCS_ENTRIES; - } else if (IS_GEN(i915, 11)) { + } else if (GRAPHICS_VER(i915) == 11) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; table->n_entries = GEN9_NUM_MOCS_ENTRIES; @@ -361,7 +361,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; } else { - drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9, + drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9, "Platform that should have a MOCS table does not.\n"); return 0; } @@ -370,7 +370,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, return 0; /* WaDisableSkipCaching:skl,bxt,kbl,glk */ - if (IS_GEN(i915, 9)) { + if (GRAPHICS_VER(i915) == 9) { int i; for (i = 0; i < table->size; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 014ae8ac4480..886060f7e6fc 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -87,11 +87,10 @@ write_dma_entry(struct drm_i915_gem_object * const pdma, const unsigned short idx, const u64 encoded_entry) { - u64 * const vaddr = kmap_atomic(__px_page(pdma)); + u64 * const vaddr = __px_vaddr(pdma); vaddr[idx] = encoded_entry; clflush_cache_range(&vaddr[idx], sizeof(u64)); - kunmap_atomic(vaddr); } void @@ -147,9 +146,9 @@ int i915_ppgtt_init_hw(struct intel_gt *gt) gtt_write_workarounds(gt); - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) gen6_ppgtt_enable(gt); - else if (IS_GEN(i915, 7)) + else if (GRAPHICS_VER(i915) == 7) gen7_ppgtt_enable(gt); return 0; @@ -158,7 +157,7 @@ int i915_ppgtt_init_hw(struct intel_gt *gt) static struct i915_ppgtt * __ppgtt_create(struct intel_gt *gt) { - if (INTEL_GEN(gt->i915) < 8) + if (GRAPHICS_VER(gt->i915) < 8) return gen6_ppgtt_create(gt); else return gen8_ppgtt_create(gt); @@ -258,7 +257,7 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm, return 0; } -int i915_vm_pin_pt_stash(struct i915_address_space *vm, +int i915_vm_map_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash) { struct i915_page_table *pt; @@ -266,7 +265,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm, for (n = 0; n < ARRAY_SIZE(stash->pt); n++) { for (pt = stash->pt[n]; pt; pt = pt->stash) { - err = pin_pt_dma_locked(vm, pt->base); + err = map_pt_dma_locked(vm, pt->base); if (err) return err; } @@ -308,7 +307,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.dma = i915->drm.dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); - dma_resv_init(&ppgtt->vm.resv); + dma_resv_init(&ppgtt->vm._resv); i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 3b7e62debe7e..259d7eb4e165 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -109,7 +109,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN9_MEDIA_PG_ENABLE | GEN11_MEDIA_SAMPLER_PG_ENABLE; - if (INTEL_GEN(gt->i915) >= 12) { + if (GRAPHICS_VER(gt->i915) >= 12) { for (i = 0; i < I915_MAX_VCS; i++) if (HAS_ENGINE(gt, _VCS(i))) pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) | @@ -126,7 +126,7 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6) enum intel_engine_id id; /* 2b: Program RC6 thresholds.*/ - if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) { + if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) { set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); } else if (IS_SKYLAKE(rc6_to_i915(rc6))) { @@ -249,9 +249,9 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) rc6vids = 0; ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); - if (IS_GEN(i915, 6) && ret) { + if (GRAPHICS_VER(i915) == 6 && ret) { drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n"); - } else if (IS_GEN(i915, 6) && + } else if (GRAPHICS_VER(i915) == 6 && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { drm_dbg(&i915->drm, "You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", @@ -515,7 +515,7 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6) struct intel_uncore *uncore = rc6_to_uncore(rc6); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) set(uncore, GEN9_PG_ENABLE, 0); set(uncore, GEN6_RC_CONTROL, 0); set(uncore, GEN6_RC_STATE, 0); @@ -575,13 +575,13 @@ void intel_rc6_enable(struct intel_rc6 *rc6) chv_rc6_enable(rc6); else if (IS_VALLEYVIEW(i915)) vlv_rc6_enable(rc6); - else if (INTEL_GEN(i915) >= 11) + else if (GRAPHICS_VER(i915) >= 11) gen11_rc6_enable(rc6); - else if (INTEL_GEN(i915) >= 9) + else if (GRAPHICS_VER(i915) >= 9) gen9_rc6_enable(rc6); else if (IS_BROADWELL(i915)) gen8_rc6_enable(rc6); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) gen6_rc6_enable(rc6); rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE; diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index be6f2c8f5184..f7366b054f8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -5,6 +5,8 @@ #include "i915_drv.h" #include "intel_memory_region.h" +#include "intel_region_lmem.h" +#include "intel_region_ttm.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "intel_region_lmem.h" @@ -66,9 +68,9 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) static void region_lmem_release(struct intel_memory_region *mem) { - release_fake_lmem_bar(mem); + intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); - intel_memory_region_release_buddy(mem); + release_fake_lmem_bar(mem); } static int @@ -83,12 +85,21 @@ region_lmem_init(struct intel_memory_region *mem) if (!io_mapping_init_wc(&mem->iomap, mem->io_start, - resource_size(&mem->region))) - return -EIO; + resource_size(&mem->region))) { + ret = -EIO; + goto out_no_io; + } - ret = intel_memory_region_init_buddy(mem); + ret = intel_region_ttm_init(mem); if (ret) - io_mapping_fini(&mem->iomap); + goto out_no_buddy; + + return 0; + +out_no_buddy: + io_mapping_fini(&mem->iomap); +out_no_io: + release_fake_lmem_bar(mem); return ret; } @@ -127,6 +138,8 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) mappable_end, PAGE_SIZE, io_start, + INTEL_MEMORY_LOCAL, + 0, &intel_region_lmem_ops); if (!IS_ERR(mem)) { drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n", @@ -177,7 +190,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_memory_region *mem; resource_size_t io_start; resource_size_t lmem_size; @@ -198,6 +211,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) lmem_size, I915_GTT_PAGE_SIZE_4K, io_start, + INTEL_MEMORY_LOCAL, + 0, &intel_region_lmem_ops); if (IS_ERR(mem)) return mem; diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index b03e197b1d99..b575cd6e0b7a 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -15,7 +15,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine) if (engine->class != RENDER_CLASS) return NULL; - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 6: return &gen6_null_state; case 7: diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index a377c4588aaa..72251638d4ea 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -338,15 +338,69 @@ static int gen6_reset_engines(struct intel_gt *gt, return gen6_hw_domain_reset(gt, hw_mask); } -static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) +static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine) +{ + int vecs_id; + + GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS); + + vecs_id = _VECS((engine->instance) / 2); + + return engine->gt->engine[vecs_id]; +} + +struct sfc_lock_data { + i915_reg_t lock_reg; + i915_reg_t ack_reg; + i915_reg_t usage_reg; + u32 lock_bit; + u32 ack_bit; + u32 usage_bit; + u32 reset_bit; +}; + +static void get_sfc_forced_lock_data(struct intel_engine_cs *engine, + struct sfc_lock_data *sfc_lock) +{ + switch (engine->class) { + default: + MISSING_CASE(engine->class); + fallthrough; + case VIDEO_DECODE_CLASS: + sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine); + sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; + + sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; + + sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine); + sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT; + sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); + + break; + case VIDEO_ENHANCEMENT_CLASS: + sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine); + sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + + sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine); + sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; + + sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine); + sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT; + sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); + + break; + } +} + +static int gen11_lock_sfc(struct intel_engine_cs *engine, + u32 *reset_mask, + u32 *unlock_mask) { struct intel_uncore *uncore = engine->uncore; u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; - i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; - u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; - i915_reg_t sfc_usage; - u32 sfc_usage_bit; - u32 sfc_reset_bit; + struct sfc_lock_data sfc_lock; + bool lock_obtained, lock_to_other = false; int ret; switch (engine->class) { @@ -354,53 +408,72 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) if ((BIT(engine->instance) & vdbox_sfc_access) == 0) return 0; - sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; - - sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine); - sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT; + fallthrough; + case VIDEO_ENHANCEMENT_CLASS: + get_sfc_forced_lock_data(engine, &sfc_lock); - sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine); - sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT; - sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance); break; + default: + return 0; + } - case VIDEO_ENHANCEMENT_CLASS: - sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; + if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) { + struct intel_engine_cs *paired_vecs; - sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine); - sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT; + if (engine->class != VIDEO_DECODE_CLASS || + GRAPHICS_VER(engine->i915) != 12) + return 0; - sfc_usage = GEN11_VECS_SFC_USAGE(engine); - sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT; - sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance); - break; + /* + * Wa_14010733141 + * + * If the VCS-MFX isn't using the SFC, we also need to check + * whether VCS-HCP is using it. If so, we need to issue a *VE* + * forced lock on the VE engine that shares the same SFC. + */ + if (!(intel_uncore_read_fw(uncore, + GEN12_HCP_SFC_LOCK_STATUS(engine)) & + GEN12_HCP_SFC_USAGE_BIT)) + return 0; - default: - return 0; + paired_vecs = find_sfc_paired_vecs_engine(engine); + get_sfc_forced_lock_data(paired_vecs, &sfc_lock); + lock_to_other = true; + *unlock_mask |= paired_vecs->mask; + } else { + *unlock_mask |= engine->mask; } /* - * If the engine is using a SFC, tell the engine that a software reset + * If the engine is using an SFC, tell the engine that a software reset * is going to happen. The engine will then try to force lock the SFC. * If SFC ends up being locked to the engine we want to reset, we have * to reset it as well (we will unlock it once the reset sequence is * completed). */ - if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) - return 0; - - rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); + rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); ret = __intel_wait_for_register_fw(uncore, - sfc_forced_lock_ack, - sfc_forced_lock_ack_bit, - sfc_forced_lock_ack_bit, + sfc_lock.ack_reg, + sfc_lock.ack_bit, + sfc_lock.ack_bit, 1000, 0, NULL); - /* Was the SFC released while we were trying to lock it? */ - if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) + /* + * Was the SFC released while we were trying to lock it? + * + * We should reset both the engine and the SFC if: + * - We were locking the SFC to this engine and the lock succeeded + * OR + * - We were locking the SFC to a different engine (Wa_14010733141) + * but the SFC was released before the lock was obtained. + * + * Otherwise we need only reset the engine by itself and we can + * leave the SFC alone. + */ + lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & + sfc_lock.usage_bit) != 0; + if (lock_obtained == lock_to_other) return 0; if (ret) { @@ -408,7 +481,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) return ret; } - *hw_mask |= sfc_reset_bit; + *reset_mask |= sfc_lock.reset_bit; return 0; } @@ -416,28 +489,19 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; - i915_reg_t sfc_forced_lock; - u32 sfc_forced_lock_bit; - - switch (engine->class) { - case VIDEO_DECODE_CLASS: - if ((BIT(engine->instance) & vdbox_sfc_access) == 0) - return; + struct sfc_lock_data sfc_lock = {}; - sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT; - break; - - case VIDEO_ENHANCEMENT_CLASS: - sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine); - sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT; - break; + if (engine->class != VIDEO_DECODE_CLASS && + engine->class != VIDEO_ENHANCEMENT_CLASS) + return; - default: + if (engine->class == VIDEO_DECODE_CLASS && + (BIT(engine->instance) & vdbox_sfc_access) == 0) return; - } - rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); + get_sfc_forced_lock_data(engine, &sfc_lock); + + rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit); } static int gen11_reset_engines(struct intel_gt *gt, @@ -456,23 +520,23 @@ static int gen11_reset_engines(struct intel_gt *gt, }; struct intel_engine_cs *engine; intel_engine_mask_t tmp; - u32 hw_mask; + u32 reset_mask, unlock_mask = 0; int ret; if (engine_mask == ALL_ENGINES) { - hw_mask = GEN11_GRDOM_FULL; + reset_mask = GEN11_GRDOM_FULL; } else { - hw_mask = 0; + reset_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - hw_mask |= hw_engine_mask[engine->id]; - ret = gen11_lock_sfc(engine, &hw_mask); + reset_mask |= hw_engine_mask[engine->id]; + ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); if (ret) goto sfc_unlock; } } - ret = gen6_hw_domain_reset(gt, hw_mask); + ret = gen6_hw_domain_reset(gt, reset_mask); sfc_unlock: /* @@ -480,10 +544,14 @@ sfc_unlock: * gen11_lock_sfc to make sure that we clean properly if something * wrong happened during the lock (e.g. lock acquired after timeout * expiration). + * + * Due to Wa_14010733141, we may have locked an SFC to an engine that + * wasn't being reset. So instead of calling gen11_unlock_sfc() + * on engine_mask, we instead call it on the mask of engines that our + * gen11_lock_sfc() calls told us actually had locks attempted. */ - if (engine_mask != ALL_ENGINES) - for_each_engine_masked(engine, gt, engine_mask, tmp) - gen11_unlock_sfc(engine); + for_each_engine_masked(engine, gt, unlock_mask, tmp) + gen11_unlock_sfc(engine); return ret; } @@ -565,7 +633,7 @@ static int gen8_reset_engines(struct intel_gt *gt, */ } - if (INTEL_GEN(gt->i915) >= 11) + if (GRAPHICS_VER(gt->i915) >= 11) ret = gen11_reset_engines(gt, engine_mask, retry); else ret = gen6_reset_engines(gt, engine_mask, retry); @@ -594,17 +662,17 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt) if (is_mock_gt(gt)) return mock_reset; - else if (INTEL_GEN(i915) >= 8) + else if (GRAPHICS_VER(i915) >= 8) return gen8_reset_engines; - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) return gen6_reset_engines; - else if (INTEL_GEN(i915) >= 5) + else if (GRAPHICS_VER(i915) >= 5) return ilk_do_reset; else if (IS_G4X(i915)) return g4x_do_reset; else if (IS_G33(i915) || IS_PINEVIEW(i915)) return g33_do_reset; - else if (INTEL_GEN(i915) >= 3) + else if (GRAPHICS_VER(i915) >= 3) return i915_do_reset; else return NULL; @@ -656,7 +724,7 @@ bool intel_has_reset_engine(const struct intel_gt *gt) int intel_reset_guc(struct intel_gt *gt) { u32 guc_domain = - INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; int ret; GEM_BUG_ON(!HAS_GT_UC(gt->i915)); @@ -1118,7 +1186,6 @@ static int intel_gt_reset_engine(struct intel_engine_cs *engine) int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) { struct intel_gt *gt = engine->gt; - bool uses_guc = intel_engine_in_guc_submission_mode(engine); int ret; ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); @@ -1134,10 +1201,10 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) "Resetting %s for %s\n", engine->name, msg); atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); - if (!uses_guc) - ret = intel_gt_reset_engine(engine); - else + if (intel_engine_uses_guc(engine)) ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); + else + ret = intel_gt_reset_engine(engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret); diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index aee0a77c77e0..7c4d5158e03b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -51,11 +51,14 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) if (unlikely(ret)) goto err_unpin; - if (i915_vma_is_map_and_fenceable(vma)) + if (i915_vma_is_map_and_fenceable(vma)) { addr = (void __force *)i915_vma_pin_iomap(vma); - else - addr = i915_gem_object_pin_map(vma->obj, - i915_coherent_map_type(vma->vm->i915)); + } else { + int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false); + + addr = i915_gem_object_pin_map(vma->obj, type); + } + if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto err_ring; diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 9585546556ee..37d74d4ed59b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -12,6 +12,7 @@ #include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_gt.h" +#include "intel_gt_irq.h" #include "intel_reset.h" #include "intel_ring.h" #include "shmem_utils.h" @@ -28,7 +29,7 @@ static void set_hwstam(struct intel_engine_cs *engine, u32 mask) * lost interrupts following a reset. */ if (engine->class == RENDER_CLASS) { - if (INTEL_GEN(engine->i915) >= 6) + if (GRAPHICS_VER(engine->i915) >= 6) mask &= ~BIT(0); else mask &= ~I915_USER_INTERRUPT; @@ -42,7 +43,7 @@ static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) u32 addr; addr = lower_32_bits(phys); - if (INTEL_GEN(engine->i915) >= 4) + if (GRAPHICS_VER(engine->i915) >= 4) addr |= (phys >> 28) & 0xf0; intel_uncore_write(engine->uncore, HWS_PGA, addr); @@ -70,7 +71,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) * The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN(engine->i915, 7)) { + if (GRAPHICS_VER(engine->i915) == 7) { switch (engine->id) { /* * No more rings exist on Gen7. Default case is only to shut up @@ -92,7 +93,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) hwsp = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN(engine->i915, 6)) { + } else if (GRAPHICS_VER(engine->i915) == 6) { hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); } else { hwsp = RING_HWS_PGA(engine->mmio_base); @@ -104,7 +105,7 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) static void flush_cs_tlb(struct intel_engine_cs *engine) { - if (!IS_GEN_RANGE(engine->i915, 6, 7)) + if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) return; /* ring should be idle before issuing a sync flush*/ @@ -152,7 +153,7 @@ static void set_pp_dir(struct intel_engine_cs *engine) ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); - if (INTEL_GEN(engine->i915) >= 7) { + if (GRAPHICS_VER(engine->i915) >= 7) { ENGINE_WRITE_FW(engine, RING_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); @@ -183,8 +184,11 @@ static int xcs_resume(struct intel_engine_cs *engine) ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", ring->head, ring->tail); - /* Double check the ring is empty & disabled before we resume */ - synchronize_hardirq(engine->i915->drm.irq); + /* + * Double check the ring is empty & disabled before we resume. Called + * from atomic context during PCI probe, so _hardirq(). + */ + intel_synchronize_hardirq(engine->i915); if (!stop_ring(engine)) goto err; @@ -228,7 +232,7 @@ static int xcs_resume(struct intel_engine_cs *engine) 5000, 0, NULL)) goto err; - if (INTEL_GEN(engine->i915) > 2) + if (GRAPHICS_VER(engine->i915) > 2) ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); @@ -645,9 +649,9 @@ static int mi_set_context(struct i915_request *rq, u32 *cs; len = 4; - if (IS_GEN(i915, 7)) + if (GRAPHICS_VER(i915) == 7) len += 2 + (num_engines ? 4 * num_engines + 6 : 0); - else if (IS_GEN(i915, 5)) + else if (GRAPHICS_VER(i915) == 5) len += 2; if (flags & MI_FORCE_RESTORE) { GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); @@ -661,7 +665,7 @@ static int mi_set_context(struct i915_request *rq, return PTR_ERR(cs); /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (IS_GEN(i915, 7)) { + if (GRAPHICS_VER(i915) == 7) { *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; if (num_engines) { struct intel_engine_cs *signaller; @@ -677,7 +681,7 @@ static int mi_set_context(struct i915_request *rq, GEN6_PSMI_SLEEP_MSG_DISABLE); } } - } else if (IS_GEN(i915, 5)) { + } else if (GRAPHICS_VER(i915) == 5) { /* * This w/a is only listed for pre-production ilk a/b steppings, * but is also mentioned for programming the powerctx. To be @@ -715,7 +719,7 @@ static int mi_set_context(struct i915_request *rq, */ *cs++ = MI_NOOP; - if (IS_GEN(i915, 7)) { + if (GRAPHICS_VER(i915) == 7) { if (num_engines) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ @@ -739,7 +743,7 @@ static int mi_set_context(struct i915_request *rq, *cs++ = MI_NOOP; } *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - } else if (IS_GEN(i915, 5)) { + } else if (GRAPHICS_VER(i915) == 5) { *cs++ = MI_SUSPEND_FLUSH; } @@ -989,14 +993,10 @@ static void gen6_bsd_submit_request(struct i915_request *request) static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; - - engine->park = NULL; - engine->unpark = NULL; } static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) { - i9xx_set_default_submission(engine); engine->submit_request = gen6_bsd_submit_request; } @@ -1004,7 +1004,7 @@ static void ring_release(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && + drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 && (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); intel_engine_cleanup_common(engine); @@ -1021,17 +1021,24 @@ static void ring_release(struct intel_engine_cs *engine) intel_timeline_put(engine->legacy.timeline); } +static void irq_handler(struct intel_engine_cs *engine, u16 iir) +{ + intel_engine_signal_breadcrumbs(engine); +} + static void setup_irq(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - if (INTEL_GEN(i915) >= 6) { + intel_engine_set_irq_handler(engine, irq_handler); + + if (GRAPHICS_VER(i915) >= 6) { engine->irq_enable = gen6_irq_enable; engine->irq_disable = gen6_irq_disable; - } else if (INTEL_GEN(i915) >= 5) { + } else if (GRAPHICS_VER(i915) >= 5) { engine->irq_enable = gen5_irq_enable; engine->irq_disable = gen5_irq_disable; - } else if (INTEL_GEN(i915) >= 3) { + } else if (GRAPHICS_VER(i915) >= 3) { engine->irq_enable = gen3_irq_enable; engine->irq_disable = gen3_irq_disable; } else { @@ -1045,7 +1052,7 @@ static void setup_common(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; /* gen8+ are only supported with execlists */ - GEM_BUG_ON(INTEL_GEN(i915) >= 8); + GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); setup_irq(engine); @@ -1066,14 +1073,14 @@ static void setup_common(struct intel_engine_cs *engine) * engine->emit_init_breadcrumb(). */ engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; - if (IS_GEN(i915, 5)) + if (GRAPHICS_VER(i915) == 5) engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; engine->set_default_submission = i9xx_set_default_submission; - if (INTEL_GEN(i915) >= 6) + if (GRAPHICS_VER(i915) >= 6) engine->emit_bb_start = gen6_emit_bb_start; - else if (INTEL_GEN(i915) >= 4) + else if (GRAPHICS_VER(i915) >= 4) engine->emit_bb_start = gen4_emit_bb_start; else if (IS_I830(i915) || IS_I845G(i915)) engine->emit_bb_start = i830_emit_bb_start; @@ -1090,16 +1097,16 @@ static void setup_rcs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - if (INTEL_GEN(i915) >= 7) { + if (GRAPHICS_VER(i915) >= 7) { engine->emit_flush = gen7_emit_flush_rcs; engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; - } else if (IS_GEN(i915, 6)) { + } else if (GRAPHICS_VER(i915) == 6) { engine->emit_flush = gen6_emit_flush_rcs; engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; - } else if (IS_GEN(i915, 5)) { + } else if (GRAPHICS_VER(i915) == 5) { engine->emit_flush = gen4_emit_flush_rcs; } else { - if (INTEL_GEN(i915) < 4) + if (GRAPHICS_VER(i915) < 4) engine->emit_flush = gen2_emit_flush; else engine->emit_flush = gen4_emit_flush_rcs; @@ -1114,20 +1121,20 @@ static void setup_vcs(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - if (INTEL_GEN(i915) >= 6) { + if (GRAPHICS_VER(i915) >= 6) { /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) engine->set_default_submission = gen6_bsd_set_default_submission; engine->emit_flush = gen6_emit_flush_vcs; engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; else engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; } else { engine->emit_flush = gen4_emit_flush_vcs; - if (IS_GEN(i915, 5)) + if (GRAPHICS_VER(i915) == 5) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; @@ -1141,7 +1148,7 @@ static void setup_bcs(struct intel_engine_cs *engine) engine->emit_flush = gen6_emit_flush_xcs; engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - if (IS_GEN(i915, 6)) + if (GRAPHICS_VER(i915) == 6) engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; else engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; @@ -1151,7 +1158,7 @@ static void setup_vecs(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - GEM_BUG_ON(INTEL_GEN(i915) < 7); + GEM_BUG_ON(GRAPHICS_VER(i915) < 7); engine->emit_flush = gen6_emit_flush_xcs; engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; @@ -1199,7 +1206,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) struct i915_vma *vma; int size, err; - if (!IS_GEN(engine->i915, 7) || engine->class != RENDER_CLASS) + if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) return 0; err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 405d814e9040..06e9a8ed4e03 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -196,7 +196,7 @@ static void rps_reset_interrupts(struct intel_rps *rps) struct intel_gt *gt = rps_to_gt(rps); spin_lock_irq(>->irq_lock); - if (INTEL_GEN(gt->i915) >= 11) + if (GRAPHICS_VER(gt->i915) >= 11) gen11_rps_reset_interrupts(rps); else gen6_rps_reset_interrupts(rps); @@ -630,7 +630,7 @@ static u32 rps_limits(struct intel_rps *rps, u8 val) * frequency, if the down threshold expires in that window we will not * receive a down interrupt. */ - if (INTEL_GEN(rps_to_i915(rps)) >= 9) { + if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { limits = rps->max_freq_softlimit << 23; if (val <= rps->min_freq_softlimit) limits |= rps->min_freq_softlimit << 14; @@ -697,7 +697,7 @@ static void rps_set_power(struct intel_rps *rps, int new_power) intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); set(uncore, GEN6_RP_CONTROL, - (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | + (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | GEN6_RP_MEDIA_HW_NORMAL_MODE | GEN6_RP_MEDIA_IS_GFX | GEN6_RP_ENABLE | @@ -771,7 +771,7 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val) struct drm_i915_private *i915 = rps_to_i915(rps); u32 swreq; - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) swreq = GEN9_FREQUENCY(val); else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) swreq = HSW_FREQUENCY(val); @@ -812,14 +812,14 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update) if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) err = vlv_rps_set(rps, val); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) err = gen6_rps_set(rps, val); else err = gen5_rps_set(rps, val); if (err) return err; - if (update && INTEL_GEN(i915) >= 6) + if (update && GRAPHICS_VER(i915) >= 6) gen6_rps_set_thresholds(rps, val); rps->last_freq = val; @@ -853,7 +853,7 @@ void intel_rps_unpark(struct intel_rps *rps) if (intel_rps_uses_timer(rps)) rps_start_timer(rps); - if (IS_GEN(rps_to_i915(rps), 5)) + if (GRAPHICS_VER(rps_to_i915(rps)) == 5) gen5_rps_update(rps); } @@ -999,7 +999,7 @@ static void gen6_rps_init(struct intel_rps *rps) rps->efficient_freq = rps->rp1_freq; if (IS_HASWELL(i915) || IS_BROADWELL(i915) || - IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) { u32 ddcc_status = 0; if (sandybridge_pcode_read(i915, @@ -1012,7 +1012,7 @@ static void gen6_rps_init(struct intel_rps *rps) rps->max_freq); } - if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) { /* Store the frequency values in 16.66 MHZ units, which is * the natural hardware unit for SKL */ @@ -1048,7 +1048,7 @@ static bool gen9_rps_enable(struct intel_rps *rps) struct intel_uncore *uncore = gt->uncore; /* Program defaults and thresholds for RPS */ - if (IS_GEN(gt->i915, 9)) + if (GRAPHICS_VER(gt->i915) == 9) intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, GEN9_FREQUENCY(rps->rp1_freq)); @@ -1365,16 +1365,16 @@ void intel_rps_enable(struct intel_rps *rps) enabled = chv_rps_enable(rps); else if (IS_VALLEYVIEW(i915)) enabled = vlv_rps_enable(rps); - else if (INTEL_GEN(i915) >= 9) + else if (GRAPHICS_VER(i915) >= 9) enabled = gen9_rps_enable(rps); - else if (INTEL_GEN(i915) >= 8) + else if (GRAPHICS_VER(i915) >= 8) enabled = gen8_rps_enable(rps); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) enabled = gen6_rps_enable(rps); else if (IS_IRONLAKE_M(i915)) enabled = gen5_rps_enable(rps); else - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); if (!enabled) return; @@ -1393,7 +1393,7 @@ void intel_rps_enable(struct intel_rps *rps) if (has_busy_stats(rps)) intel_rps_set_timer(rps); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) intel_rps_set_interrupts(rps); else /* Ironlake currently uses intel_ips.ko */ {} @@ -1414,7 +1414,7 @@ void intel_rps_disable(struct intel_rps *rps) intel_rps_clear_interrupts(rps); intel_rps_clear_timer(rps); - if (INTEL_GEN(i915) >= 6) + if (GRAPHICS_VER(i915) >= 6) gen6_rps_disable(rps); else if (IS_IRONLAKE_M(i915)) gen5_rps_disable(rps); @@ -1453,14 +1453,14 @@ int intel_gpu_freq(struct intel_rps *rps, int val) { struct drm_i915_private *i915 = rps_to_i915(rps); - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); else if (IS_CHERRYVIEW(i915)) return chv_gpu_freq(rps, val); else if (IS_VALLEYVIEW(i915)) return byt_gpu_freq(rps, val); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) return val * GT_FREQUENCY_MULTIPLIER; else return val; @@ -1470,14 +1470,14 @@ int intel_freq_opcode(struct intel_rps *rps, int val) { struct drm_i915_private *i915 = rps_to_i915(rps); - if (INTEL_GEN(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9) return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, GT_FREQUENCY_MULTIPLIER); else if (IS_CHERRYVIEW(i915)) return chv_freq_opcode(rps, val); else if (IS_VALLEYVIEW(i915)) return byt_freq_opcode(rps, val); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); else return val; @@ -1770,11 +1770,11 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) spin_unlock(>->irq_lock); } - if (INTEL_GEN(gt->i915) >= 8) + if (GRAPHICS_VER(gt->i915) >= 8) return; if (pm_iir & PM_VEBOX_USER_INTERRUPT) - intel_engine_signal_breadcrumbs(gt->engine[VECS0]); + intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); @@ -1833,7 +1833,7 @@ void intel_rps_init(struct intel_rps *rps) chv_rps_init(rps); else if (IS_VALLEYVIEW(i915)) vlv_rps_init(rps); - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) gen6_rps_init(rps); else if (IS_IRONLAKE_M(i915)) gen5_rps_init(rps); @@ -1843,7 +1843,7 @@ void intel_rps_init(struct intel_rps *rps) rps->min_freq_softlimit = rps->min_freq; /* After setting max-softlimit, find the overclock max freq */ - if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { + if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { u32 params = 0; sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, @@ -1872,16 +1872,16 @@ void intel_rps_init(struct intel_rps *rps) * * TODO: verify if this can be reproduced on VLV,CHV. */ - if (INTEL_GEN(i915) <= 7) + if (GRAPHICS_VER(i915) <= 7) rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; - if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) + if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11) rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; } void intel_rps_sanitize(struct intel_rps *rps) { - if (INTEL_GEN(rps_to_i915(rps)) >= 6) + if (GRAPHICS_VER(rps_to_i915(rps)) >= 6) rps_disable_interrupts(rps); } @@ -1892,11 +1892,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) cagf = (rpstat >> 8) & 0xff; - else if (INTEL_GEN(i915) >= 9) + else if (GRAPHICS_VER(i915) >= 9) cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; - else if (INTEL_GEN(i915) >= 6) + else if (GRAPHICS_VER(i915) >= 6) cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; else cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >> @@ -1915,7 +1915,7 @@ static u32 read_cagf(struct intel_rps *rps) vlv_punit_get(i915); freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); vlv_punit_put(i915); - } else if (INTEL_GEN(i915) >= 6) { + } else if (GRAPHICS_VER(i915) >= 6) { freq = intel_uncore_read(uncore, GEN6_RPSTAT1); } else { freq = intel_uncore_read(uncore, MEMSTAT_ILK); @@ -1968,7 +1968,7 @@ void intel_rps_driver_register(struct intel_rps *rps) * We only register the i915 ips part with intel-ips once everything is * set up, to avoid intel-ips sneaking in and reading bogus values. */ - if (IS_GEN(gt->i915, 5)) { + if (GRAPHICS_VER(gt->i915) == 5) { GEM_BUG_ON(ips_mchdev); rcu_assign_pointer(ips_mchdev, gt->i915); ips_ping_for_i915_load(); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 0d9f74aec8fe..367fd44b81c8 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -590,13 +590,13 @@ void intel_sseu_info_init(struct intel_gt *gt) cherryview_sseu_info_init(gt); else if (IS_BROADWELL(i915)) bdw_sseu_info_init(gt); - else if (IS_GEN(i915, 9)) + else if (GRAPHICS_VER(i915) == 9) gen9_sseu_info_init(gt); - else if (IS_GEN(i915, 10)) + else if (GRAPHICS_VER(i915) == 10) gen10_sseu_info_init(gt); - else if (IS_GEN(i915, 11)) + else if (GRAPHICS_VER(i915) == 11) gen11_sseu_info_init(gt); - else if (INTEL_GEN(i915) >= 12) + else if (GRAPHICS_VER(i915) >= 12) gen12_sseu_info_init(gt); } @@ -613,7 +613,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt, * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_GEN(i915) < 9) + if (GRAPHICS_VER(i915) < 9) return 0; /* @@ -651,7 +651,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt, * subslices are enabled, or a count between one and four on the first * slice. */ - if (IS_GEN(i915, 11) && + if (GRAPHICS_VER(i915) == 11 && slices == 1 && subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) { GEM_BUG_ON(subslices & 1); @@ -669,7 +669,7 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt, if (sseu->has_slice_pg) { u32 mask, val = slices; - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { mask = GEN11_RPCS_S_CNT_MASK; val <<= GEN11_RPCS_S_CNT_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c index 51780282d872..714fe8495775 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -248,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt) struct sseu_dev_info sseu; intel_wakeref_t wakeref; - if (INTEL_GEN(i915) < 8) + if (GRAPHICS_VER(i915) < 8) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); @@ -265,9 +265,9 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt) cherryview_sseu_device_status(gt, &sseu); else if (IS_BROADWELL(i915)) bdw_sseu_device_status(gt, &sseu); - else if (IS_GEN(i915, 9)) + else if (GRAPHICS_VER(i915) == 9) gen9_sseu_device_status(gt, &sseu); - else if (INTEL_GEN(i915) >= 10) + else if (GRAPHICS_VER(i915) >= 10) gen10_sseu_device_status(gt, &sseu); } diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index f19cf6d2fa85..c4a126c8caef 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -32,7 +32,6 @@ static struct i915_vma *hwsp_alloc(struct intel_gt *gt) return vma; } -__i915_active_call static void __timeline_retire(struct i915_active *active) { struct intel_timeline *tl = @@ -104,7 +103,8 @@ static int intel_timeline_init(struct intel_timeline *timeline, INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); - i915_active_init(&timeline->active, __timeline_active, __timeline_retire); + i915_active_init(&timeline->active, __timeline_active, + __timeline_retire, 0); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 2c6f7217469f..b62d1e31a645 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -607,9 +607,38 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); } +/* + * These settings aren't actually workarounds, but general tuning settings that + * need to be programmed on several platforms. + */ +static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + /* + * Although some platforms refer to it as Wa_1604555607, we need to + * program it even on those that don't explicitly list that + * workaround. + * + * Note that the programming of this register is further modified + * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. + * Wa_1608008084 tells us the FF_MODE2 register will return the wrong + * value when read. The default value for this register is zero for all + * fields and there are no bit masks. So instead of doing a RMW we + * should just write TDS timer value. For the same reason read + * verification is ignored. + */ + wa_add(wal, + FF_MODE2, + FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, + 0); +} + static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + gen12_ctx_gt_tuning_init(engine, wal); + /* * Wa_1409142259:tgl * Wa_1409347922:tgl @@ -628,27 +657,17 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, wa_masked_field_set(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); -} - -static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, - struct i915_wa_list *wal) -{ - gen12_ctx_workarounds_init(engine, wal); /* - * Wa_1604555607:tgl,rkl + * Wa_16011163337 * - * Note that the implementation of this workaround is further modified - * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. - * FF_MODE2 register will return the wrong value when read. The default - * value for this register is zero for all fields and there are no bit - * masks. So instead of doing a RMW we should just write the GS Timer - * and TDS timer values for Wa_1604555607 and Wa_16011163337. + * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due + * to Wa_1608008084. */ wa_add(wal, FF_MODE2, - FF_MODE2_GS_TIMER_MASK | FF_MODE2_TDS_TIMER_MASK, - FF_MODE2_GS_TIMER_224 | FF_MODE2_TDS_TIMER_128, + FF_MODE2_GS_TIMER_MASK, + FF_MODE2_GS_TIMER_224, 0); } @@ -664,16 +683,6 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_22010493298 */ wa_masked_en(wal, HIZ_CHICKEN, DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); - - /* - * Wa_16011163337 - * - * Like in tgl_ctx_workarounds_init(), read verification is ignored due - * to Wa_1608008084. - */ - wa_add(wal, - FF_MODE2, - FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0); } static void @@ -690,12 +699,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); - else if (IS_ALDERLAKE_S(i915) || IS_ROCKETLAKE(i915) || - IS_TIGERLAKE(i915)) - tgl_ctx_workarounds_init(engine, wal); - else if (IS_GEN(i915, 12)) + else if (GRAPHICS_VER(i915) == 12) gen12_ctx_workarounds_init(engine, wal); - else if (IS_GEN(i915, 11)) + else if (GRAPHICS_VER(i915) == 11) icl_ctx_workarounds_init(engine, wal); else if (IS_CANNONLAKE(i915)) cnl_ctx_workarounds_init(engine, wal); @@ -713,14 +719,14 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, chv_ctx_workarounds_init(engine, wal); else if (IS_BROADWELL(i915)) bdw_ctx_workarounds_init(engine, wal); - else if (IS_GEN(i915, 7)) + else if (GRAPHICS_VER(i915) == 7) gen7_ctx_workarounds_init(engine, wal); - else if (IS_GEN(i915, 6)) + else if (GRAPHICS_VER(i915) == 6) gen6_ctx_workarounds_init(engine, wal); - else if (INTEL_GEN(i915) < 8) + else if (GRAPHICS_VER(i915) < 8) ; else - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); wa_init_finish(wal); } @@ -944,7 +950,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) unsigned int slice, subslice; u32 l3_en, mcr, mcr_mask; - GEM_BUG_ON(INTEL_GEN(i915) < 10); + GEM_BUG_ON(GRAPHICS_VER(i915) < 10); /* * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl @@ -974,7 +980,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * of every MMIO read. */ - if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) { + if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) { u32 l3_fuse = intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) & GEN10_L3BANK_MASK; @@ -996,7 +1002,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) } subslice--; - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; } else { @@ -1078,11 +1084,37 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); } +/* + * Though there are per-engine instances of these registers, + * they retain their value through engine resets and should + * only be provided on the GT workaround list rather than + * the engine-specific workaround list. + */ +static void +wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + struct intel_engine_cs *engine; + struct intel_gt *gt = &i915->gt; + int id; + + for_each_engine(engine, gt, id) { + if (engine->class != VIDEO_DECODE_CLASS || + (engine->instance % 2)) + continue; + + wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base), + IECPUNIT_CLKGATE_DIS); + } +} + static void gen12_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { wa_init_mcr(i915, wal); + + /* Wa_14011060649:tgl,rkl,dg1,adls */ + wa_14011060649(i915, wal); } static void @@ -1139,9 +1171,9 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) dg1_gt_workarounds_init(i915, wal); else if (IS_TIGERLAKE(i915)) tgl_gt_workarounds_init(i915, wal); - else if (IS_GEN(i915, 12)) + else if (GRAPHICS_VER(i915) == 12) gen12_gt_workarounds_init(i915, wal); - else if (IS_GEN(i915, 11)) + else if (GRAPHICS_VER(i915) == 11) icl_gt_workarounds_init(i915, wal); else if (IS_CANNONLAKE(i915)) cnl_gt_workarounds_init(i915, wal); @@ -1161,18 +1193,18 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) vlv_gt_workarounds_init(i915, wal); else if (IS_IVYBRIDGE(i915)) ivb_gt_workarounds_init(i915, wal); - else if (IS_GEN(i915, 6)) + else if (GRAPHICS_VER(i915) == 6) snb_gt_workarounds_init(i915, wal); - else if (IS_GEN(i915, 5)) + else if (GRAPHICS_VER(i915) == 5) ilk_gt_workarounds_init(i915, wal); else if (IS_G4X(i915)) g4x_gt_workarounds_init(i915, wal); - else if (IS_GEN(i915, 4)) + else if (GRAPHICS_VER(i915) == 4) gen4_gt_workarounds_init(i915, wal); - else if (INTEL_GEN(i915) <= 8) + else if (GRAPHICS_VER(i915) <= 8) ; else - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); } void intel_gt_init_workarounds(struct drm_i915_private *i915) @@ -1526,9 +1558,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) if (IS_DG1(i915)) dg1_whitelist_build(engine); - else if (IS_GEN(i915, 12)) + else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); - else if (IS_GEN(i915, 11)) + else if (GRAPHICS_VER(i915) == 11) icl_whitelist_build(engine); else if (IS_CANNONLAKE(i915)) cnl_whitelist_build(engine); @@ -1544,10 +1576,10 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) bxt_whitelist_build(engine); else if (IS_SKYLAKE(i915)) skl_whitelist_build(engine); - else if (INTEL_GEN(i915) <= 8) + else if (GRAPHICS_VER(i915) <= 8) ; else - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(GRAPHICS_VER(i915)); wa_init_finish(w); } @@ -1663,7 +1695,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) ENABLE_SMALLPL); } - if (IS_GEN(i915, 11)) { + if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, _3D_CHICKEN3, @@ -1755,14 +1787,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); - /* Wa_22010271021:ehl */ - if (IS_JSL_EHL(i915)) - wa_masked_en(wal, - GEN9_CS_DEBUG_MODE1, - FF_DOP_CLOCK_GATE_DISABLE); + /* Wa_22010271021 */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); } - if (IS_GEN_RANGE(i915, 9, 12)) { + if (IS_GRAPHICS_VER(i915, 9, 12)) { /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */ wa_masked_en(wal, GEN7_FF_SLICE_CS_CHICKEN1, @@ -1786,7 +1817,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); } - if (IS_GEN(i915, 9)) { + if (GRAPHICS_VER(i915) == 9) { /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, @@ -1828,9 +1859,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) CACHE_MODE_0_GEN7, /* enable HiZ Raw Stall Optimization */ HIZ_RAW_STALL_OPT_DISABLE); - - /* WaDisable4x2SubspanOptimization:hsw */ - wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); } if (IS_VALLEYVIEW(i915)) { @@ -1893,7 +1921,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); } - if (IS_GEN(i915, 7)) { + if (GRAPHICS_VER(i915) == 7) { /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ wa_masked_en(wal, GFX_MODE_GEN7, @@ -1925,7 +1953,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN6_WIZ_HASHING_16x4); } - if (IS_GEN_RANGE(i915, 6, 7)) + if (IS_GRAPHICS_VER(i915, 6, 7)) /* * We need to disable the AsyncFlip performance optimisations in * order to use MI_WAIT_FOR_EVENT within the CS. It should @@ -1937,7 +1965,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) MI_MODE, ASYNC_FLIP_PERF_DISABLE); - if (IS_GEN(i915, 6)) { + if (GRAPHICS_VER(i915) == 6) { /* * Required for the hardware to program scanline values for * waiting @@ -1991,14 +2019,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) CM0_STC_EVICT_DISABLE_LRA_SNB); } - if (IS_GEN_RANGE(i915, 4, 6)) + if (IS_GRAPHICS_VER(i915, 4, 6)) /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ wa_add(wal, MI_MODE, 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH), /* XXX bit doesn't stick on Broadwater */ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH); - if (IS_GEN(i915, 4)) + if (GRAPHICS_VER(i915) == 4) /* * Disable CONSTANT_BUFFER before it is loaded from the context * image. For as it is loaded, it is executed and the stored @@ -2030,7 +2058,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) static void engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4)) + if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4)) return; if (engine->class == RENDER_CLASS) @@ -2043,7 +2071,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; - if (INTEL_GEN(engine->i915) < 4) + if (GRAPHICS_VER(engine->i915) < 4) return; wa_init_start(wal, "engine", engine->name); @@ -2084,9 +2112,9 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) const struct mcr_range *mcr_ranges; int i; - if (INTEL_GEN(i915) >= 12) + if (GRAPHICS_VER(i915) >= 12) mcr_ranges = mcr_ranges_gen12; - else if (INTEL_GEN(i915) >= 8) + else if (GRAPHICS_VER(i915) >= 8) mcr_ranges = mcr_ranges_gen8; else return false; @@ -2115,7 +2143,7 @@ wa_list_srm(struct i915_request *rq, u32 srm, *cs; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(i915) >= 8) + if (GRAPHICS_VER(i915) >= 8) srm++; for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index e1ba03b93ffa..32589c6625e1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -55,7 +55,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL); + i915_active_init(&ring->vma->active, NULL, NULL, 0); __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); ring->vma->node.size = sz; diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index b9bdd1d23243..26685b927169 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -88,7 +88,8 @@ static int __live_context_size(struct intel_engine_cs *engine) goto err; vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj, - i915_coherent_map_type(engine->i915)); + i915_coherent_map_type(engine->i915, + ce->state->obj, false)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); intel_context_unpin(ce); diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index b32814a1f20b..64abf5feabfa 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -52,7 +52,7 @@ static int write_timestamp(struct i915_request *rq, int slot) return PTR_ERR(cs); cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; - if (INTEL_GEN(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->engine->i915) >= 8) cmd++; *cs++ = cmd; *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); @@ -125,7 +125,7 @@ static int perf_mi_bb_start(void *arg) enum intel_engine_id id; int err = 0; - if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ return 0; perf_begin(gt); @@ -249,7 +249,7 @@ static int perf_mi_noop(void *arg) enum intel_engine_id id; int err = 0; - if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ + if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ return 0; perf_begin(gt); @@ -376,34 +376,34 @@ static int intel_mmio_bases_check(void *arg) u8 prev = U8_MAX; for (j = 0; j < MAX_MMIO_BASES; j++) { - u8 gen = info->mmio_bases[j].gen; + u8 ver = info->mmio_bases[j].graphics_ver; u32 base = info->mmio_bases[j].base; - if (gen >= prev) { - pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n", + if (ver >= prev) { + pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n", __func__, intel_engine_class_repr(info->class), info->class, info->instance, - prev, gen); + prev, ver); return -EINVAL; } - if (gen == 0) + if (ver == 0) break; if (!base) { - pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n", + pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n", __func__, intel_engine_class_repr(info->class), info->class, info->instance, - base, gen, j); + base, ver, j); return -EINVAL; } - prev = gen; + prev = ver; } - pr_debug("%s: min gen supported for %s%d is %d\n", + pr_debug("%s: min graphics version supported for %s%d is %u\n", __func__, intel_engine_class_repr(info->class), info->instance, diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index b2c369317bf1..4896e4ccad50 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -77,7 +77,7 @@ static struct pulse *pulse_create(void) return p; kref_init(&p->kref); - i915_active_init(&p->active, pulse_active, pulse_retire); + i915_active_init(&p->active, pulse_active, pulse_retire, 0); return p; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 2c898622bdfb..72cca3f0da21 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -198,7 +198,7 @@ static int live_engine_timestamps(void *arg) * the same CS clock. */ - if (INTEL_GEN(gt->i915) < 8) + if (GRAPHICS_VER(gt->i915) < 8) return 0; for_each_engine(engine, gt, id) { diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index 1081cd36a2bd..1c8108d30b85 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -551,6 +551,32 @@ static int live_pin_rewind(void *arg) return err; } +static int engine_lock_reset_tasklet(struct intel_engine_cs *engine) +{ + tasklet_disable(&engine->execlists.tasklet); + local_bh_disable(); + + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + &engine->gt->reset.flags)) { + local_bh_enable(); + tasklet_enable(&engine->execlists.tasklet); + + intel_gt_set_wedged(engine->gt); + return -EBUSY; + } + + return 0; +} + +static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine) +{ + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + &engine->gt->reset.flags); + + local_bh_enable(); + tasklet_enable(&engine->execlists.tasklet); +} + static int live_hold_reset(void *arg) { struct intel_gt *gt = arg; @@ -598,15 +624,9 @@ static int live_hold_reset(void *arg) /* We have our request executing, now remove it and reset */ - local_bh_disable(); - if (test_and_set_bit(I915_RESET_ENGINE + id, - >->reset.flags)) { - local_bh_enable(); - intel_gt_set_wedged(gt); - err = -EBUSY; + err = engine_lock_reset_tasklet(engine); + if (err) goto out; - } - tasklet_disable(&engine->execlists.tasklet); engine->execlists.tasklet.callback(&engine->execlists.tasklet); GEM_BUG_ON(execlists_active(&engine->execlists) != rq); @@ -618,10 +638,7 @@ static int live_hold_reset(void *arg) __intel_engine_reset_bh(engine, NULL); GEM_BUG_ON(rq->fence.error != -EIO); - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(I915_RESET_ENGINE + id, - >->reset.flags); - local_bh_enable(); + engine_unlock_reset_tasklet(engine); /* Check that we do not resubmit the held request */ if (!i915_request_wait(rq, 0, HZ / 5)) { @@ -3269,7 +3286,7 @@ static int live_preempt_user(void *arg) if (!intel_engine_has_preemption(engine)) continue; - if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS) + if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS) continue; /* we need per-context GPR */ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { @@ -4293,7 +4310,7 @@ static int live_virtual_preserved(void *arg) return 0; /* As we use CS_GPR we cannot run before they existed on all engines. */ - if (INTEL_GEN(gt->i915) < 9) + if (GRAPHICS_VER(gt->i915) < 9) return 0; for (class = 0; class <= MAX_ENGINE_CLASS; class++) { @@ -4585,15 +4602,9 @@ static int reset_virtual_engine(struct intel_gt *gt, GEM_BUG_ON(engine == ve->engine); /* Take ownership of the reset and tasklet */ - local_bh_disable(); - if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) { - local_bh_enable(); - intel_gt_set_wedged(gt); - err = -EBUSY; + err = engine_lock_reset_tasklet(engine); + if (err) goto out_heartbeat; - } - tasklet_disable(&engine->execlists.tasklet); engine->execlists.tasklet.callback(&engine->execlists.tasklet); GEM_BUG_ON(execlists_active(&engine->execlists) != rq); @@ -4612,9 +4623,7 @@ static int reset_virtual_engine(struct intel_gt *gt, GEM_BUG_ON(rq->fence.error != -EIO); /* Release our grasp on the engine, letting CS flow again */ - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags); - local_bh_enable(); + engine_unlock_reset_tasklet(engine); /* Check that we do not resubmit the held request */ i915_request_get(rq); @@ -4716,7 +4725,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_virtual_reset), }; - if (!HAS_EXECLISTS(i915)) + if (i915->gt.submission_method != INTEL_SUBMISSION_ELSP) return 0; if (intel_gt_is_wedged(&i915->gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index c0845bf72dd3..b9441217ca3d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -74,10 +74,10 @@ static int live_gt_clocks(void *arg) return 0; } - if (INTEL_GEN(gt->i915) < 4) /* Any CS_TIMESTAMP? */ + if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */ return 0; - if (IS_GEN(gt->i915, 5)) + if (GRAPHICS_VER(gt->i915) == 5) /* * XXX CS_TIMESTAMP low dword is dysfunctional? * @@ -86,7 +86,7 @@ static int live_gt_clocks(void *arg) */ return 0; - if (IS_GEN(gt->i915, 4)) + if (GRAPHICS_VER(gt->i915) == 4) /* * XXX CS_TIMESTAMP appears gibberish * @@ -105,7 +105,7 @@ static int live_gt_clocks(void *arg) u64 time; u64 dt; - if (INTEL_GEN(engine->i915) < 7 && engine->id != RCS0) + if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0) continue; measure_clocks(engine, &cycles, &dt); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 746985971c3a..853246fad05f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -69,7 +69,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt) h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr = i915_gem_object_pin_map_unlocked(h->obj, - i915_coherent_map_type(gt->i915)); + i915_coherent_map_type(gt->i915, h->obj, false)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; @@ -130,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) return ERR_CAST(obj); } - vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915)); + vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); i915_vm_put(vm); @@ -180,7 +180,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) goto cancel_rq; batch = h->batch; - if (INTEL_GEN(gt->i915) >= 8) { + if (GRAPHICS_VER(gt->i915) >= 8) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); @@ -194,7 +194,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; *batch++ = lower_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start); - } else if (INTEL_GEN(gt->i915) >= 6) { + } else if (GRAPHICS_VER(gt->i915) >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -207,7 +207,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_NOOP; *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); - } else if (INTEL_GEN(gt->i915) >= 4) { + } else if (GRAPHICS_VER(gt->i915) >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -243,7 +243,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) } flags = 0; - if (INTEL_GEN(gt->i915) <= 5) + if (GRAPHICS_VER(gt->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index 94006f117bbd..459b775f163a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -44,7 +44,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) if (found != ia_freq) { pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, - intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), found, ia_freq); err = -EINVAL; break; @@ -54,7 +54,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) if (found != ring_freq) { pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, - intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), found, ring_freq); err = -EINVAL; break; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 85e7df6a5123..3119016d9910 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -584,7 +584,7 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine, int err; int n; - if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS) + if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS) return 0; /* GPR only on rcs0 for gen8 */ err = gpr_make_dirty(engine->kernel_context); @@ -1221,7 +1221,9 @@ static int compare_isolation(struct intel_engine_cs *engine, } lrc = i915_gem_object_pin_map_unlocked(ce->state->obj, - i915_coherent_map_type(engine->i915)); + i915_coherent_map_type(engine->i915, + ce->state->obj, + false)); if (IS_ERR(lrc)) { err = PTR_ERR(lrc); goto err_B1; @@ -1387,10 +1389,10 @@ err_A: static bool skip_isolation(const struct intel_engine_cs *engine) { - if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9) + if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9) return true; - if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11) + if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11) return true; return false; @@ -1549,7 +1551,7 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine) /* We use the already reserved extra page in context state */ if (!a->wa_bb_page) { GEM_BUG_ON(b->wa_bb_page); - GEM_BUG_ON(INTEL_GEN(engine->i915) == 12); + GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12); goto unpin_b; } diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index e55a887d11e2..b9bb0e6e97f7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -183,7 +183,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; + return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff; } static int check_l3cc_table(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index f097e420ac45..8c70b7e12074 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -34,6 +34,7 @@ int live_rc6_manual(void *arg) struct intel_rc6 *rc6 = >->rc6; u64 rc0_power, rc6_power; intel_wakeref_t wakeref; + bool has_power; ktime_t dt; u64 res[2]; int err = 0; @@ -50,6 +51,7 @@ int live_rc6_manual(void *arg) if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) return 0; + has_power = librapl_supported(gt->i915); wakeref = intel_runtime_pm_get(gt->uncore->rpm); /* Force RC6 off for starters */ @@ -71,11 +73,14 @@ int live_rc6_manual(void *arg) goto out_unlock; } - rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, ktime_to_ns(dt)); - if (!rc0_power) { - pr_err("No power measured while in RC0\n"); - err = -EINVAL; - goto out_unlock; + if (has_power) { + rc0_power = div64_u64(NSEC_PER_SEC * rc0_power, + ktime_to_ns(dt)); + if (!rc0_power) { + pr_err("No power measured while in RC0\n"); + err = -EINVAL; + goto out_unlock; + } } /* Manually enter RC6 */ @@ -97,13 +102,16 @@ int live_rc6_manual(void *arg) err = -EINVAL; } - rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, ktime_to_ns(dt)); - pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n", - rc0_power, rc6_power); - if (2 * rc6_power > rc0_power) { - pr_err("GPU leaked energy while in RC6!\n"); - err = -EINVAL; - goto out_unlock; + if (has_power) { + rc6_power = div64_u64(NSEC_PER_SEC * rc6_power, + ktime_to_ns(dt)); + pr_info("GPU consumed %llduW in RC0 and %llduW in RC6\n", + rc0_power, rc6_power); + if (2 * rc6_power > rc0_power) { + pr_err("GPU leaked energy while in RC6!\n"); + err = -EINVAL; + goto out_unlock; + } } /* Restore what should have been the original state! */ @@ -132,7 +140,7 @@ static const u32 *__live_rc6_ctx(struct intel_context *ce) } cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT; - if (INTEL_GEN(rq->engine->i915) >= 8) + if (GRAPHICS_VER(rq->engine->i915) >= 8) cmd++; *cs++ = cmd; @@ -185,7 +193,7 @@ int live_rc6_ctx_wa(void *arg) int err = 0; /* A read of CTX_INFO upsets rc6. Poke the bear! */ - if (INTEL_GEN(gt->i915) < 8) + if (GRAPHICS_VER(gt->i915) < 8) return 0; engines = randomised_engines(gt, &prng, &count); diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c index 99609271c3a7..041954408d0f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c @@ -41,10 +41,10 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine) return ERR_CAST(cs); } - if (INTEL_GEN(engine->i915) >= 6) { + if (GRAPHICS_VER(engine->i915) >= 6) { *cs++ = MI_STORE_DWORD_IMM_GEN4; *cs++ = 0; - } else if (INTEL_GEN(engine->i915) >= 4) { + } else if (GRAPHICS_VER(engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; } else { @@ -266,7 +266,7 @@ static int live_ctx_switch_wa(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - if (IS_GEN_RANGE(gt->i915, 4, 5)) + if (IS_GRAPHICS_VER(gt->i915, 4, 5)) continue; /* MI_STORE_DWORD is privileged! */ saved_wa = fetch_and_zero(&engine->wa_ctx.vma); @@ -291,7 +291,7 @@ int intel_ring_submission_live_selftests(struct drm_i915_private *i915) SUBTEST(live_ctx_switch_wa), }; - if (HAS_EXECLISTS(i915)) + if (i915->gt.submission_method > INTEL_SUBMISSION_RING) return 0; return intel_gt_live_subtests(tests, &i915->gt); diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 967641fee42a..7ee2513e15f9 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -204,7 +204,7 @@ static void show_pstate_limits(struct intel_rps *rps) i915_mmio_reg_offset(BXT_RP_STATE_CAP), intel_uncore_read(rps_to_uncore(rps), BXT_RP_STATE_CAP)); - } else if (IS_GEN(i915, 9)) { + } else if (GRAPHICS_VER(i915) == 9) { pr_info("P_STATE_LIMITS[%x]: 0x%08x\n", i915_mmio_reg_offset(GEN9_RP_STATE_LIMITS), intel_uncore_read(rps_to_uncore(rps), @@ -222,7 +222,7 @@ int live_rps_clock_interval(void *arg) struct igt_spinner spin; int err = 0; - if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) + if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) return 0; if (igt_spinner_init(&spin, gt)) @@ -506,7 +506,7 @@ static void show_pcu_config(struct intel_rps *rps) min_gpu_freq = rps->min_freq; max_gpu_freq = rps->max_freq; - if (INTEL_GEN(i915) >= 9) { + if (GRAPHICS_VER(i915) >= 9) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq /= GEN9_FREQ_SCALER; max_gpu_freq /= GEN9_FREQ_SCALER; @@ -606,7 +606,7 @@ int live_rps_frequency_cs(void *arg) int err = 0; /* - * The premise is that the GPU does change freqency at our behest. + * The premise is that the GPU does change frequency at our behest. * Let's check there is a correspondence between the requested * frequency, the actual frequency, and the observed clock rate. */ @@ -614,7 +614,7 @@ int live_rps_frequency_cs(void *arg) if (!intel_rps_is_enabled(rps)) return 0; - if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ + if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */ return 0; if (CPU_LATENCY >= 0) @@ -747,7 +747,7 @@ int live_rps_frequency_srm(void *arg) int err = 0; /* - * The premise is that the GPU does change freqency at our behest. + * The premise is that the GPU does change frequency at our behest. * Let's check there is a correspondence between the requested * frequency, the actual frequency, and the observed clock rate. */ @@ -755,7 +755,7 @@ int live_rps_frequency_srm(void *arg) if (!intel_rps_is_enabled(rps)) return 0; - if (INTEL_GEN(gt->i915) < 8) /* for CS simplicity */ + if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */ return 0; if (CPU_LATENCY >= 0) @@ -1031,7 +1031,7 @@ int live_rps_interrupt(void *arg) * First, let's check whether or not we are receiving interrupts. */ - if (!intel_rps_has_interrupts(rps) || INTEL_GEN(gt->i915) < 6) + if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6) return 0; intel_gt_pm_get(gt); @@ -1136,10 +1136,10 @@ int live_rps_power(void *arg) * that theory. */ - if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) + if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) return 0; - if (!librapl_energy_uJ()) + if (!librapl_supported(gt->i915)) return 0; if (igt_spinner_init(&spin, gt)) @@ -1240,7 +1240,7 @@ int live_rps_dynamic(void *arg) * moving parts into dynamic reclocking based on load. */ - if (!intel_rps_is_enabled(rps) || INTEL_GEN(gt->i915) < 6) + if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) return 0; if (igt_spinner_init(&spin, gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 9adbd9d147be..64da0c91dec1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -457,12 +457,12 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) if (IS_ERR(cs)) return PTR_ERR(cs); - if (INTEL_GEN(rq->engine->i915) >= 8) { + if (GRAPHICS_VER(rq->engine->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = addr; *cs++ = 0; *cs++ = value; - } else if (INTEL_GEN(rq->engine->i915) >= 4) { + } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = addr; @@ -992,7 +992,7 @@ static int live_hwsp_read(void *arg) * even across multiple wraps. */ - if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */ + if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */ return 0; tl = intel_timeline_create(gt); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 19850489a3fc..c30754daf4b1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -145,7 +145,7 @@ read_nonprivs(struct intel_context *ce) goto err_req; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(engine->i915) >= 8) + if (GRAPHICS_VER(engine->i915) >= 8) srm++; cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); @@ -546,7 +546,7 @@ retry: srm = MI_STORE_REGISTER_MEM; lrm = MI_LOAD_REGISTER_MEM; - if (INTEL_GEN(engine->i915) >= 8) + if (GRAPHICS_VER(engine->i915) >= 8) lrm++, srm++; pr_debug("%s: Writing garbage to %x\n", @@ -749,7 +749,7 @@ static int live_dirty_whitelist(void *arg) /* Can the user write to the whitelisted registers? */ - if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ + if (GRAPHICS_VER(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; for_each_engine(engine, gt, id) { @@ -829,7 +829,7 @@ static int read_whitelisted_registers(struct intel_context *ce, goto err_req; srm = MI_STORE_REGISTER_MEM; - if (INTEL_GEN(engine->i915) >= 8) + if (GRAPHICS_VER(engine->i915) >= 8) srm++; cs = intel_ring_begin(rq, 4 * engine->whitelist.count); @@ -927,7 +927,7 @@ err_batch: struct regmask { i915_reg_t reg; - unsigned long gen_mask; + u8 graphics_ver; }; static bool find_reg(struct drm_i915_private *i915, @@ -938,7 +938,7 @@ static bool find_reg(struct drm_i915_private *i915, u32 offset = i915_mmio_reg_offset(reg); while (count--) { - if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask && + if (GRAPHICS_VER(i915) == tbl->graphics_ver && i915_mmio_reg_offset(tbl->reg) == offset) return true; tbl++; @@ -951,8 +951,8 @@ static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg) { /* Alas, we must pardon some whitelists. Mistakes already made */ static const struct regmask pardon[] = { - { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) }, - { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) }, + { GEN9_CTX_PREEMPT_REG, 9 }, + { GEN8_L3SQCREG4, 9 }, }; return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon)); @@ -974,7 +974,7 @@ static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg) { /* Some registers do not seem to behave and our writes unreadable */ static const struct regmask wo[] = { - { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) }, + { GEN9_SLICE_COMMON_ECO_CHICKEN1, 9 }, }; return find_reg(i915, reg, wo, ARRAY_SIZE(wo)); diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index f8f02aab842b..0683b27a3890 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -8,6 +8,7 @@ #include <linux/shmem_fs.h> #include "gem/i915_gem_object.h" +#include "gem/i915_gem_lmem.h" #include "shmem_utils.h" struct file *shmem_create_from_data(const char *name, void *data, size_t len) @@ -39,7 +40,8 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj) return file; } - ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); + ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ? + I915_MAP_WC : I915_MAP_WB); if (IS_ERR(ptr)) return ERR_CAST(ptr); diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h new file mode 100644 index 000000000000..90efef8a73e4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_ACTIONS_ABI_H +#define _ABI_GUC_ACTIONS_ABI_H + +enum intel_guc_action { + INTEL_GUC_ACTION_DEFAULT = 0x0, + INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, + INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, + INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, + INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, + INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, + INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, + INTEL_GUC_ACTION_LIMIT +}; + +enum intel_guc_preempt_options { + INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, + INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, +}; + +enum intel_guc_report_status { + INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, + INTEL_GUC_REPORT_STATUS_ACKED = 0x1, + INTEL_GUC_REPORT_STATUS_ERROR = 0x2, + INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, +}; + +enum intel_guc_sleep_state_status { + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 +#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 +}; + +#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) +#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 +#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) +#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) + +#endif /* _ABI_GUC_ACTIONS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h new file mode 100644 index 000000000000..d38935f47ecf --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_COMMUNICATION_CTB_ABI_H +#define _ABI_GUC_COMMUNICATION_CTB_ABI_H + +#include <linux/types.h> + +/** + * DOC: CTB based communication + * + * The CTB (command transport buffer) communication between Host and GuC + * is based on u32 data stream written to the shared buffer. One buffer can + * be used to transmit data only in one direction (one-directional channel). + * + * Current status of the each buffer is stored in the buffer descriptor. + * Buffer descriptor holds tail and head fields that represents active data + * stream. The tail field is updated by the data producer (sender), and head + * field is updated by the data consumer (receiver):: + * + * +------------+ + * | DESCRIPTOR | +=================+============+========+ + * +============+ | | MESSAGE(s) | | + * | address |--------->+=================+============+========+ + * +------------+ + * | head | ^-----head--------^ + * +------------+ + * | tail | ^---------tail-----------------^ + * +------------+ + * | size | ^---------------size--------------------^ + * +------------+ + * + * Each message in data stream starts with the single u32 treated as a header, + * followed by optional set of u32 data that makes message specific payload:: + * + * +------------+---------+---------+---------+ + * | MESSAGE | + * +------------+---------+---------+---------+ + * | msg[0] | [1] | ... | [n-1] | + * +------------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+ + * | | 0 | ... | n | + * +======+=====+=========+=========+=========+ + * | 31:16| code| | | | + * +------+-----+ | | | + * | 15:5|flags| | | | + * +------+-----+ | | | + * | 4:0| len| | | | + * +------+-----+---------+---------+---------+ + * + * ^-------------len-------------^ + * + * The message header consists of: + * + * - **len**, indicates length of the message payload (in u32) + * - **code**, indicates message code + * - **flags**, holds various bits to control message handling + */ + +/* + * Describes single command transport buffer. + * Used by both guc-master and clients. + */ +struct guc_ct_buffer_desc { + u32 addr; /* gfx address */ + u64 host_private; /* host private data */ + u32 size; /* size in bytes */ + u32 head; /* offset updated by GuC*/ + u32 tail; /* offset updated by owner */ + u32 is_in_error; /* error indicator */ + u32 reserved1; + u32 reserved2; + u32 owner; /* id of the channel owner */ + u32 owner_sub_id; /* owner-defined field for extra tracking */ + u32 reserved[5]; +} __packed; + +/* Type of command transport buffer */ +#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u +#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u + +/* + * Definition of the command transport message header (DW0) + * + * bit[4..0] message len (in dwords) + * bit[7..5] reserved + * bit[8] response (G2H only) + * bit[8] write fence to desc (H2G only) + * bit[9] write status to H2G buff (H2G only) + * bit[10] send status back via G2H (H2G only) + * bit[15..11] reserved + * bit[31..16] action code + */ +#define GUC_CT_MSG_LEN_SHIFT 0 +#define GUC_CT_MSG_LEN_MASK 0x1F +#define GUC_CT_MSG_IS_RESPONSE (1 << 8) +#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) +#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) +#define GUC_CT_MSG_SEND_STATUS (1 << 10) +#define GUC_CT_MSG_ACTION_SHIFT 16 +#define GUC_CT_MSG_ACTION_MASK 0xFFFF + +#endif /* _ABI_GUC_COMMUNICATION_CTB_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h new file mode 100644 index 000000000000..be066a62e9e0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_COMMUNICATION_MMIO_ABI_H +#define _ABI_GUC_COMMUNICATION_MMIO_ABI_H + +/** + * DOC: MMIO based communication + * + * The MMIO based communication between Host and GuC uses software scratch + * registers, where first register holds data treated as message header, + * and other registers are used to hold message payload. + * + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, + * but no H2G command takes more than 8 parameters and the GuC FW + * itself uses an 8-element array to store the H2G message. + * + * +-----------+---------+---------+---------+ + * | MMIO[0] | MMIO[1] | ... | MMIO[n] | + * +-----------+---------+---------+---------+ + * | header | optional payload | + * +======+====+=========+=========+=========+ + * | 31:28|type| | | | + * +------+----+ | | | + * | 27:16|data| | | | + * +------+----+ | | | + * | 15:0|code| | | | + * +------+----+---------+---------+---------+ + * + * The message header consists of: + * + * - **type**, indicates message type + * - **code**, indicates message code, is specific for **type** + * - **data**, indicates message data, optional, depends on **code** + * + * The following message **types** are supported: + * + * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code + * must be priovided in **code** field. Optional action specific parameters + * can be provided in remaining payload registers or **data** field. + * + * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, + * action response status will be provided in **code** field. Optional + * response data can be returned in remaining payload registers or **data** + * field. + */ + +#define GUC_MAX_MMIO_MSG_LEN 8 + +#endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h new file mode 100644 index 000000000000..488b6061ee89 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_ERRORS_ABI_H +#define _ABI_GUC_ERRORS_ABI_H + +enum intel_guc_response_status { + INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, +}; + +#endif /* _ABI_GUC_ERRORS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h new file mode 100644 index 000000000000..775e21f3058c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014-2021 Intel Corporation + */ + +#ifndef _ABI_GUC_MESSAGES_ABI_H +#define _ABI_GUC_MESSAGES_ABI_H + +#define INTEL_GUC_MSG_TYPE_SHIFT 28 +#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) +#define INTEL_GUC_MSG_DATA_SHIFT 16 +#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) +#define INTEL_GUC_MSG_CODE_SHIFT 0 +#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) + +enum intel_guc_msg_type { + INTEL_GUC_MSG_TYPE_REQUEST = 0x0, + INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, +}; + +#endif /* _ABI_GUC_MESSAGES_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 78305b2ec89d..f147cb389a20 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -60,15 +60,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc) enum forcewake_domains fw_domains = 0; unsigned int i; - if (INTEL_GEN(gt->i915) >= 11) { - guc->send_regs.base = - i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); - guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; - } else { - guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; - BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); - } + GEM_BUG_ON(!guc->send_regs.base); + GEM_BUG_ON(!guc->send_regs.count); for (i = 0; i < guc->send_regs.count; i++) { fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, @@ -96,12 +89,9 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & - gt->pm_guc_events); - guc->interrupts.enabled = true; - gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - } + WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); + gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); spin_unlock_irq(>->irq_lock); } @@ -112,7 +102,6 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); @@ -134,18 +123,14 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc) static void gen11_enable_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); - intel_uncore_write(gt->uncore, - GEN11_GUC_SG_INTR_ENABLE, events); - intel_uncore_write(gt->uncore, - GEN11_GUC_SG_INTR_MASK, ~events); - guc->interrupts.enabled = true; - } + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_MASK, ~events); spin_unlock_irq(>->irq_lock); } @@ -154,7 +139,6 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc) struct intel_gt *gt = guc_to_gt(guc); spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); @@ -176,16 +160,23 @@ void intel_guc_init_early(struct intel_guc *guc) mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { guc->notify_reg = GEN11_GUC_HOST_INTERRUPT; guc->interrupts.reset = gen11_reset_guc_interrupts; guc->interrupts.enable = gen11_enable_guc_interrupts; guc->interrupts.disable = gen11_disable_guc_interrupts; + guc->send_regs.base = + i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); + guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; + } else { guc->notify_reg = GUC_SEND_INTERRUPT; guc->interrupts.reset = gen9_reset_guc_interrupts; guc->interrupts.enable = gen9_enable_guc_interrupts; guc->interrupts.disable = gen9_disable_guc_interrupts; + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); } } @@ -469,22 +460,6 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, return 0; } -int intel_guc_sample_forcewake(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; - u32 action[2]; - - action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; - /* WaRsDisableCoarsePowerGating:skl,cnl */ - if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - action[1] = 0; - else - /* bit 0 and 1 are for Render and Media domain separately */ - action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - /** * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode * @guc: intel_guc structure @@ -682,7 +657,9 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, if (IS_ERR(vma)) return PTR_ERR(vma); - vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB); + vaddr = i915_gem_object_pin_map_unlocked(vma->obj, + i915_coherent_map_type(guc_to_gt(guc)->i915, + vma->obj, true)); if (IS_ERR(vaddr)) { i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index bc2ba7d0626c..4abc59f6f3cd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -33,7 +33,6 @@ struct intel_guc { unsigned int msg_enabled_mask; struct { - bool enabled; void (*reset)(struct intel_guc *guc); void (*enable)(struct intel_guc *guc); void (*disable)(struct intel_guc *guc); @@ -128,7 +127,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len); -int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 17526717368c..9abfbc6edbd6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -6,6 +6,7 @@ #include "gt/intel_gt.h" #include "gt/intel_lrc.h" #include "intel_guc_ads.h" +#include "intel_guc_fwif.h" #include "intel_uc.h" #include "i915_drv.h" @@ -104,7 +105,7 @@ static void guc_mapping_table_init(struct intel_gt *gt, GUC_MAX_INSTANCES_PER_CLASS; for_each_engine(engine, gt, id) { - u8 guc_class = engine->class; + u8 guc_class = engine_class_to_guc_class(engine->class); system_info->mapping_table[guc_class][engine->instance] = engine->instance; @@ -124,7 +125,7 @@ static void __guc_ads_init(struct intel_guc *guc) struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; - u8 engine_class; + u8 engine_class, guc_class; /* GuC scheduling policies */ guc_policies_init(&blob->policies); @@ -140,29 +141,32 @@ static void __guc_ads_init(struct intel_guc *guc) for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { if (engine_class == OTHER_CLASS) continue; + + guc_class = engine_class_to_guc_class(engine_class); + /* * TODO: Set context pointer to default state to allow * GuC to re-init guilty contexts after internal reset. */ - blob->ads.golden_context_lrca[engine_class] = 0; - blob->ads.eng_state_size[engine_class] = + blob->ads.golden_context_lrca[guc_class] = 0; + blob->ads.eng_state_size[guc_class] = intel_engine_context_size(guc_to_gt(guc), engine_class) - skipped_size; } /* System info */ - blob->system_info.engine_enabled_masks[RENDER_CLASS] = 1; - blob->system_info.engine_enabled_masks[COPY_ENGINE_CLASS] = 1; - blob->system_info.engine_enabled_masks[VIDEO_DECODE_CLASS] = VDBOX_MASK(gt); - blob->system_info.engine_enabled_masks[VIDEO_ENHANCEMENT_CLASS] = VEBOX_MASK(gt); + blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1; + blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1; + blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt); + blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] = hweight8(gt->info.sseu.slice_mask); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK] = gt->info.vdbox_sfc_access; - if (INTEL_GEN(i915) >= 12 && !IS_DGFX(i915)) { + if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) { u32 distdbreg = intel_uncore_read(gt->uncore, GEN12_DIST_DBS_POPULATED); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI] = diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index fa9e048cc65f..8f7b148fef58 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -7,14 +7,62 @@ #include "intel_guc_ct.h" #include "gt/intel_gt.h" +static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) +{ + return container_of(ct, struct intel_guc, ct); +} + +static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) +{ + return guc_to_gt(ct_to_guc(ct)); +} + +static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) +{ + return ct_to_gt(ct)->i915; +} + +static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct) +{ + return &ct_to_i915(ct)->drm; +} + #define CT_ERROR(_ct, _fmt, ...) \ - DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) + drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) #ifdef CONFIG_DRM_I915_DEBUG_GUC #define CT_DEBUG(_ct, _fmt, ...) \ - DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) + drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__) #else #define CT_DEBUG(...) do { } while (0) #endif +#define CT_PROBE_ERROR(_ct, _fmt, ...) \ + i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__) + +/** + * DOC: CTB Blob + * + * We allocate single blob to hold both CTB descriptors and buffers: + * + * +--------+-----------------------------------------------+------+ + * | offset | contents | size | + * +========+===============================================+======+ + * | 0x0000 | H2G `CTB Descriptor`_ (send) | | + * +--------+-----------------------------------------------+ 4K | + * | 0x0800 | G2H `CTB Descriptor`_ (recv) | | + * +--------+-----------------------------------------------+------+ + * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K | + * | | | | + * +--------+-----------------------------------------------+------+ + * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K | + * | + n*4K | | | + * +--------+-----------------------------------------------+------+ + * + * Size of each `CT Buffer`_ must be multiple of 4K. + * As we don't expect too many messages, for now use minimum sizes. + */ +#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) +#define CTB_H2G_BUFFER_SIZE (SZ_4K) +#define CTB_G2H_BUFFER_SIZE (SZ_4K) struct ct_request { struct list_head link; @@ -24,8 +72,9 @@ struct ct_request { u32 *response_buf; }; -struct ct_incoming_request { +struct ct_incoming_msg { struct list_head link; + u32 size; u32 msg[]; }; @@ -33,6 +82,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 }; enum { CTB_OWNER_HOST = 0 }; +static void ct_receive_tasklet_func(struct tasklet_struct *t); static void ct_incoming_request_worker_func(struct work_struct *w); /** @@ -41,30 +91,13 @@ static void ct_incoming_request_worker_func(struct work_struct *w); */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { + spin_lock_init(&ct->ctbs.send.lock); + spin_lock_init(&ct->ctbs.recv.lock); spin_lock_init(&ct->requests.lock); INIT_LIST_HEAD(&ct->requests.pending); INIT_LIST_HEAD(&ct->requests.incoming); INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); -} - -static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) -{ - return container_of(ct, struct intel_guc, ct); -} - -static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) -{ - return guc_to_gt(ct_to_guc(ct)); -} - -static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) -{ - return ct_to_gt(ct)->i915; -} - -static inline struct device *ct_to_dev(struct intel_guc_ct *ct) -{ - return ct_to_i915(ct)->drm.dev; + tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func); } static inline const char *guc_ct_buffer_type_to_str(u32 type) @@ -88,11 +121,22 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, desc->owner = CTB_OWNER_HOST; } -static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) +static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr) { - desc->head = 0; - desc->tail = 0; - desc->is_in_error = 0; + guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size); +} + +static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb, + struct guc_ct_buffer_desc *desc, + u32 *cmds, u32 size) +{ + GEM_BUG_ON(size % 4); + + ctb->desc = desc; + ctb->cmds = cmds; + ctb->size = size; + + guc_ct_buffer_reset(ctb, 0); } static int guc_action_register_ct_buffer(struct intel_guc *guc, @@ -153,48 +197,42 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) int intel_guc_ct_init(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); + struct guc_ct_buffer_desc *desc; + u32 blob_size; + u32 cmds_size; void *blob; + u32 *cmds; int err; - int i; GEM_BUG_ON(ct->vma); - /* We allocate 1 page to hold both descriptors and both buffers. - * ___________..................... - * |desc (SEND)| : - * |___________| PAGE/4 - * :___________....................: - * |desc (RECV)| : - * |___________| PAGE/4 - * :_______________________________: - * |cmds (SEND) | - * | PAGE/4 - * |_______________________________| - * |cmds (RECV) | - * | PAGE/4 - * |_______________________________| - * - * Each message can use a maximum of 32 dwords and we don't expect to - * have more than 1 in flight at any time, so we have enough space. - * Some logic further ahead will rely on the fact that there is only 1 - * page and that it is always mapped, so if the size is changed the - * other code will need updating as well. - */ - - err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob); + blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; + err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob); if (unlikely(err)) { - CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err); + CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n", + blob_size, ERR_PTR(err)); return err; } - CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma)); + CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size); - /* store pointers to desc and cmds */ - for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i; - ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; - } + /* store pointers to desc and cmds for send ctb */ + desc = blob; + cmds = blob + 2 * CTB_DESC_SIZE; + cmds_size = CTB_H2G_BUFFER_SIZE; + CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size); + + guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size); + + /* store pointers to desc and cmds for recv ctb */ + desc = blob + CTB_DESC_SIZE; + cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE; + cmds_size = CTB_G2H_BUFFER_SIZE; + CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv", + ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size); + + guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size); return 0; } @@ -209,6 +247,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) { GEM_BUG_ON(ct->enabled); + tasklet_kill(&ct->receive_tasklet); i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); memset(ct, 0, sizeof(*ct)); } @@ -222,37 +261,38 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - u32 base, cmds, size; + u32 base, cmds; + void *blob; int err; - int i; GEM_BUG_ON(ct->enabled); /* vma should be already allocated and map'ed */ GEM_BUG_ON(!ct->vma); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj)); base = intel_guc_ggtt_offset(guc, ct->vma); - /* (re)initialize descriptors - * cmds buffers are in the second half of the blob page - */ - for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2; - size = PAGE_SIZE / 4; - CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size); - guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size); - } + /* blob should start with send descriptor */ + blob = __px_vaddr(ct->vma->obj); + GEM_BUG_ON(blob != ct->ctbs.send.desc); + + /* (re)initialize descriptors */ + cmds = base + ptrdiff(ct->ctbs.send.cmds, blob); + guc_ct_buffer_reset(&ct->ctbs.send, cmds); + + cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob); + guc_ct_buffer_reset(&ct->ctbs.recv, cmds); /* * Register both CT buffers starting with RECV buffer. * Descriptors are in first half of the blob. */ - err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV, + err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob), INTEL_GUC_CT_BUFFER_TYPE_RECV); if (unlikely(err)) goto err_out; - err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND, + err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob), INTEL_GUC_CT_BUFFER_TYPE_SEND); if (unlikely(err)) goto err_deregister; @@ -264,7 +304,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) err_deregister: ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV); err_out: - CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err); + CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); return err; } @@ -292,6 +332,28 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) return ++ct->requests.last_fence; } +static void write_barrier(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_gt *gt = guc_to_gt(guc); + + if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { + GEM_BUG_ON(guc->send_regs.fw_domains); + /* + * This register is used by the i915 and GuC for MMIO based + * communication. Once we are in this code CTBs are the only + * method the i915 uses to communicate with the GuC so it is + * safe to write to this register (a value of 0 is NOP for MMIO + * communication). If we ever start mixing CTBs and MMIOs a new + * register will have to be chosen. + */ + intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); + } else { + /* wmb() sufficient for a barrier if in smem */ + wmb(); + } +} + /** * DOC: CTB Host to GuC request * @@ -313,14 +375,13 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) static int ct_write(struct intel_guc_ct *ct, const u32 *action, u32 len /* in dwords */, - u32 fence, - bool want_response) + u32 fence) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; + struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head; u32 tail = desc->tail; - u32 size = desc->size; + u32 size = ctb->size; u32 used; u32 header; u32 *cmds = ctb->cmds; @@ -329,7 +390,7 @@ static int ct_write(struct intel_guc_ct *ct, if (unlikely(desc->is_in_error)) return -EPIPE; - if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + if (unlikely(!IS_ALIGNED(head | tail, 4) || (tail | head) >= size)) goto corrupted; @@ -358,8 +419,7 @@ static int ct_write(struct intel_guc_ct *ct, * DW2+: action data */ header = (len << GUC_CT_MSG_LEN_SHIFT) | - (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | - (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | + GUC_CT_MSG_SEND_STATUS | (action[0] << GUC_CT_MSG_ACTION_SHIFT); CT_DEBUG(ct, "writing %*ph %*ph %*ph\n", @@ -377,6 +437,12 @@ static int ct_write(struct intel_guc_ct *ct, } GEM_BUG_ON(tail > size); + /* + * make sure H2G buffer update and LRC tail update (if this triggering a + * submission) are visible before updating the descriptor tail + */ + write_barrier(ct); + /* now update desc tail (back in bytes) */ desc->tail = tail * 4; return 0; @@ -389,56 +455,6 @@ corrupted: } /** - * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. - * @desc: buffer descriptor - * @fence: response fence - * @status: placeholder for status - * - * Guc will update CT buffer descriptor with new fence and status - * after processing the command identified by the fence. Wait for - * specified fence and then read from the descriptor status of the - * command. - * - * Return: - * * 0 response received (status is valid) - * * -ETIMEDOUT no response within hardcoded timeout - * * -EPROTO no response, CT buffer is in error - */ -static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, - u32 fence, - u32 *status) -{ - int err; - - /* - * Fast commands should complete in less than 10us, so sample quickly - * up to that length of time, then switch to a slower sleep-wait loop. - * No GuC command should ever take longer than 10ms. - */ -#define done (READ_ONCE(desc->fence) == fence) - err = wait_for_us(done, 10); - if (err) - err = wait_for(done, 10); -#undef done - - if (unlikely(err)) { - DRM_ERROR("CT: fence %u failed; reported fence=%u\n", - fence, desc->fence); - - if (WARN_ON(desc->is_in_error)) { - /* Something went wrong with the messaging, try to reset - * the buffer and hope for the best - */ - guc_ct_buffer_desc_reset(desc); - err = -EPROTO; - } - } - - *status = desc->status; - return err; -} - -/** * wait_for_ct_request_update - Wait for CT request state update. * @req: pointer to pending request * @status: placeholder for status @@ -481,8 +497,6 @@ static int ct_send(struct intel_guc_ct *ct, u32 response_buf_size, u32 *status) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; - struct guc_ct_buffer_desc *desc = ctb->desc; struct ct_request request; unsigned long flags; u32 fence; @@ -493,26 +507,28 @@ static int ct_send(struct intel_guc_ct *ct, GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); + spin_lock_irqsave(&ct->ctbs.send.lock, flags); + fence = ct_get_next_fence(ct); request.fence = fence; request.status = 0; request.response_len = response_buf_size; request.response_buf = response_buf; - spin_lock_irqsave(&ct->requests.lock, flags); + spin_lock(&ct->requests.lock); list_add_tail(&request.link, &ct->requests.pending); - spin_unlock_irqrestore(&ct->requests.lock, flags); + spin_unlock(&ct->requests.lock); + + err = ct_write(ct, action, len, fence); + + spin_unlock_irqrestore(&ct->ctbs.send.lock, flags); - err = ct_write(ct, action, len, fence, !!response_buf); if (unlikely(err)) goto unlink; intel_guc_notify(ct_to_guc(ct)); - if (response_buf) - err = wait_for_ct_request_update(&request, status); - else - err = wait_for_ctb_desc_update(desc, fence, status); + err = wait_for_ct_request_update(&request, status); if (unlikely(err)) goto unlink; @@ -547,7 +563,6 @@ unlink: int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct intel_guc *guc = ct_to_guc(ct); u32 status = ~0; /* undefined */ int ret; @@ -556,8 +571,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, return -ENODEV; } - mutex_lock(&guc->send_mutex); - ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n", @@ -567,7 +580,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, action[0], ret, ret); } - mutex_unlock(&guc->send_mutex); return ret; } @@ -586,22 +598,42 @@ static inline bool ct_header_is_response(u32 header) return !!(header & GUC_CT_MSG_IS_RESPONSE); } -static int ct_read(struct intel_guc_ct *ct, u32 *data) +static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV]; + struct ct_incoming_msg *msg; + + msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC); + if (msg) + msg->size = num_dwords; + return msg; +} + +static void ct_free_msg(struct ct_incoming_msg *msg) +{ + kfree(msg); +} + +/* + * Return: number available remaining dwords to read (0 if empty) + * or a negative error code on failure + */ +static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) +{ + struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv; struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head; u32 tail = desc->tail; - u32 size = desc->size; + u32 size = ctb->size; u32 *cmds = ctb->cmds; s32 available; unsigned int len; unsigned int i; + u32 header; if (unlikely(desc->is_in_error)) return -EPIPE; - if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + if (unlikely(!IS_ALIGNED(head | tail, 4) || (tail | head) >= size)) goto corrupted; @@ -612,8 +644,10 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) /* tail == head condition indicates empty */ available = tail - head; - if (unlikely(available == 0)) - return -ENODATA; + if (unlikely(available == 0)) { + *msg = NULL; + return 0; + } /* beware of buffer wrap case */ if (unlikely(available < 0)) @@ -621,14 +655,14 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail); GEM_BUG_ON(available < 0); - data[0] = cmds[head]; + header = cmds[head]; head = (head + 1) % size; /* message len with header */ - len = ct_header_get_len(data[0]) + 1; + len = ct_header_get_len(header) + 1; if (unlikely(len > (u32)available)) { CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n", - 4, data, + 4, &header, 4 * (head + available - 1 > size ? size - head : available - 1), &cmds[head], 4 * (head + available - 1 > size ? @@ -636,14 +670,27 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) goto corrupted; } + *msg = ct_alloc_msg(len); + if (!*msg) { + CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n", + 4, &header, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + return available; + } + + (*msg)->msg[0] = header; + for (i = 1; i < len; i++) { - data[i] = cmds[head]; + (*msg)->msg[i] = cmds[head]; head = (head + 1) % size; } - CT_DEBUG(ct, "received %*ph\n", 4 * len, data); + CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg); desc->head = head * 4; - return 0; + return available - len; corrupted: CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n", @@ -670,39 +717,39 @@ corrupted: * ^-----------------------len-----------------------^ */ -static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) +static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response) { - u32 header = msg[0]; + u32 header = response->msg[0]; u32 len = ct_header_get_len(header); - u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ u32 fence; u32 status; u32 datalen; struct ct_request *req; + unsigned long flags; bool found = false; + int err = 0; GEM_BUG_ON(!ct_header_is_response(header)); - GEM_BUG_ON(!in_irq()); /* Response payload shall at least include fence and status */ if (unlikely(len < 2)) { - CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); + CT_ERROR(ct, "Corrupted response (len %u)\n", len); return -EPROTO; } - fence = msg[1]; - status = msg[2]; + fence = response->msg[1]; + status = response->msg[2]; datalen = len - 2; /* Format of the status follows RESPONSE message */ if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { - CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); + CT_ERROR(ct, "Corrupted response (status %#x)\n", status); return -EPROTO; } CT_DEBUG(ct, "response fence %u status %#x\n", fence, status); - spin_lock(&ct->requests.lock); + spin_lock_irqsave(&ct->requests.lock, flags); list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { CT_DEBUG(ct, "request %u awaits response\n", @@ -710,58 +757,75 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) continue; } if (unlikely(datalen > req->response_len)) { - CT_ERROR(ct, "Response for %u is too long %*ph\n", - req->fence, msgsize, msg); - datalen = 0; + CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n", + req->fence, datalen, req->response_len); + datalen = min(datalen, req->response_len); + err = -EMSGSIZE; } if (datalen) - memcpy(req->response_buf, msg + 3, 4 * datalen); + memcpy(req->response_buf, response->msg + 3, 4 * datalen); req->response_len = datalen; WRITE_ONCE(req->status, status); found = true; break; } - spin_unlock(&ct->requests.lock); + spin_unlock_irqrestore(&ct->requests.lock, flags); + + if (!found) { + CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence); + return -ENOKEY; + } + + if (unlikely(err)) + return err; - if (!found) - CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg); + ct_free_msg(response); return 0; } -static void ct_process_request(struct intel_guc_ct *ct, - u32 action, u32 len, const u32 *payload) +static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) { struct intel_guc *guc = ct_to_guc(ct); + u32 header, action, len; + const u32 *payload; int ret; + header = request->msg[0]; + payload = &request->msg[1]; + action = ct_header_get_action(header); + len = ct_header_get_len(header); + CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: ret = intel_guc_to_host_process_recv_msg(guc, payload, len); - if (unlikely(ret)) - goto fail_unexpected; break; - default: -fail_unexpected: - CT_ERROR(ct, "Unexpected request %x %*ph\n", - action, 4 * len, payload); + ret = -EOPNOTSUPP; break; } + + if (unlikely(ret)) { + CT_ERROR(ct, "Failed to process request %04x (%pe)\n", + action, ERR_PTR(ret)); + return ret; + } + + ct_free_msg(request); + return 0; } static bool ct_process_incoming_requests(struct intel_guc_ct *ct) { unsigned long flags; - struct ct_incoming_request *request; - u32 header; - u32 *payload; + struct ct_incoming_msg *request; bool done; + int err; spin_lock_irqsave(&ct->requests.lock, flags); request = list_first_entry_or_null(&ct->requests.incoming, - struct ct_incoming_request, link); + struct ct_incoming_msg, link); if (request) list_del(&request->link); done = !!list_empty(&ct->requests.incoming); @@ -770,14 +834,13 @@ static bool ct_process_incoming_requests(struct intel_guc_ct *ct) if (!request) return true; - header = request->msg[0]; - payload = &request->msg[1]; - ct_process_request(ct, - ct_header_get_action(header), - ct_header_get_len(header), - payload); + err = ct_process_request(ct, request); + if (unlikely(err)) { + CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", + ERR_PTR(err), 4 * request->size, request->msg); + ct_free_msg(request); + } - kfree(request); return done; } @@ -810,22 +873,11 @@ static void ct_incoming_request_worker_func(struct work_struct *w) * ^-----------------------len-----------------------^ */ -static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) +static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) { - u32 header = msg[0]; - u32 len = ct_header_get_len(header); - u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ - struct ct_incoming_request *request; unsigned long flags; - GEM_BUG_ON(ct_header_is_response(header)); - - request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC); - if (unlikely(!request)) { - CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg); - return 0; /* XXX: -ENOMEM ? */ - } - memcpy(request->msg, msg, msgsize); + GEM_BUG_ON(ct_header_is_response(request->msg[0])); spin_lock_irqsave(&ct->requests.lock, flags); list_add_tail(&request->link, &ct->requests.incoming); @@ -835,28 +887,74 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) return 0; } +static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) +{ + u32 header = msg->msg[0]; + int err; + + if (ct_header_is_response(header)) + err = ct_handle_response(ct, msg); + else + err = ct_handle_request(ct, msg); + + if (unlikely(err)) { + CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", + ERR_PTR(err), 4 * msg->size, msg->msg); + ct_free_msg(msg); + } +} + +/* + * Return: number available remaining dwords to read (0 if empty) + * or a negative error code on failure + */ +static int ct_receive(struct intel_guc_ct *ct) +{ + struct ct_incoming_msg *msg = NULL; + unsigned long flags; + int ret; + + spin_lock_irqsave(&ct->ctbs.recv.lock, flags); + ret = ct_read(ct, &msg); + spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags); + if (ret < 0) + return ret; + + if (msg) + ct_handle_msg(ct, msg); + + return ret; +} + +static void ct_try_receive_message(struct intel_guc_ct *ct) +{ + int ret; + + if (GEM_WARN_ON(!ct->enabled)) + return; + + ret = ct_receive(ct); + if (ret > 0) + tasklet_hi_schedule(&ct->receive_tasklet); +} + +static void ct_receive_tasklet_func(struct tasklet_struct *t) +{ + struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet); + + ct_try_receive_message(ct); +} + /* * When we're communicating with the GuC over CT, GuC uses events * to notify us about new messages being posted on the RECV buffer. */ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { - u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ - int err = 0; - if (unlikely(!ct->enabled)) { WARN(1, "Unexpected GuC event received while CT disabled!\n"); return; } - do { - err = ct_read(ct, msg); - if (err) - break; - - if (ct_header_is_response(msg[0])) - err = ct_handle_response(ct, msg); - else - err = ct_handle_request(ct, msg); - } while (!err); + ct_try_receive_message(ct); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 494a51a5200f..cb222f202301 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -6,6 +6,7 @@ #ifndef _INTEL_GUC_CT_H_ #define _INTEL_GUC_CT_H_ +#include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -27,12 +28,16 @@ struct intel_guc; * record (command transport buffer descriptor) and the actual buffer which * holds the commands. * + * @lock: protects access to the commands buffer and buffer descriptor * @desc: pointer to the buffer descriptor * @cmds: pointer to the commands buffer + * @size: size of the commands buffer */ struct intel_guc_ct_buffer { + spinlock_t lock; struct guc_ct_buffer_desc *desc; u32 *cmds; + u32 size; }; @@ -45,8 +50,13 @@ struct intel_guc_ct { struct i915_vma *vma; bool enabled; - /* buffers for sending(0) and receiving(1) commands */ - struct intel_guc_ct_buffer ctbs[2]; + /* buffers for sending and receiving commands */ + struct { + struct intel_guc_ct_buffer send; + struct intel_guc_ct_buffer recv; + } ctbs; + + struct tasklet_struct receive_tasklet; struct { u32 last_fence; /* last fence used to send request */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 2270d6b3b272..76fe766ad1bc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -30,7 +30,7 @@ static void guc_prepare_xfer(struct intel_uncore *uncore) else intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - if (IS_GEN(uncore->i915, 9)) { + if (GRAPHICS_VER(uncore->i915) == 9) { /* DOP Clock Gating Enable for GuC clocks */ intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 79c560d9c0b6..e9a9d85e2aa3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -9,6 +9,13 @@ #include <linux/bits.h> #include <linux/compiler.h> #include <linux/types.h> +#include "gt/intel_engine_types.h" + +#include "abi/guc_actions_abi.h" +#include "abi/guc_errors_abi.h" +#include "abi/guc_communication_mmio_abi.h" +#include "abi/guc_communication_ctb_abi.h" +#include "abi/guc_messages_abi.h" #define GUC_CLIENT_PRIORITY_KMD_HIGH 0 #define GUC_CLIENT_PRIORITY_HIGH 1 @@ -26,6 +33,12 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) +#define GUC_RENDER_CLASS 0 +#define GUC_VIDEO_CLASS 1 +#define GUC_VIDEOENHANCE_CLASS 2 +#define GUC_BLITTER_CLASS 3 +#define GUC_RESERVED_CLASS 4 +#define GUC_LAST_ENGINE_CLASS GUC_RESERVED_CLASS #define GUC_MAX_ENGINE_CLASSES 16 #define GUC_MAX_INSTANCES_PER_CLASS 32 @@ -123,6 +136,25 @@ #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \ (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT) +static inline u8 engine_class_to_guc_class(u8 class) +{ + BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS); + BUILD_BUG_ON(GUC_BLITTER_CLASS != COPY_ENGINE_CLASS); + BUILD_BUG_ON(GUC_VIDEO_CLASS != VIDEO_DECODE_CLASS); + BUILD_BUG_ON(GUC_VIDEOENHANCE_CLASS != VIDEO_ENHANCEMENT_CLASS); + GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS); + + return class; +} + +static inline u8 guc_class_to_engine_class(u8 guc_class) +{ + GEM_BUG_ON(guc_class > GUC_LAST_ENGINE_CLASS); + GEM_BUG_ON(guc_class == GUC_RESERVED_CLASS); + + return guc_class; +} + /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; @@ -207,104 +239,6 @@ struct guc_stage_desc { u64 desc_private; } __packed; -/** - * DOC: CTB based communication - * - * The CTB (command transport buffer) communication between Host and GuC - * is based on u32 data stream written to the shared buffer. One buffer can - * be used to transmit data only in one direction (one-directional channel). - * - * Current status of the each buffer is stored in the buffer descriptor. - * Buffer descriptor holds tail and head fields that represents active data - * stream. The tail field is updated by the data producer (sender), and head - * field is updated by the data consumer (receiver):: - * - * +------------+ - * | DESCRIPTOR | +=================+============+========+ - * +============+ | | MESSAGE(s) | | - * | address |--------->+=================+============+========+ - * +------------+ - * | head | ^-----head--------^ - * +------------+ - * | tail | ^---------tail-----------------^ - * +------------+ - * | size | ^---------------size--------------------^ - * +------------+ - * - * Each message in data stream starts with the single u32 treated as a header, - * followed by optional set of u32 data that makes message specific payload:: - * - * +------------+---------+---------+---------+ - * | MESSAGE | - * +------------+---------+---------+---------+ - * | msg[0] | [1] | ... | [n-1] | - * +------------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+ - * | | 0 | ... | n | - * +======+=====+=========+=========+=========+ - * | 31:16| code| | | | - * +------+-----+ | | | - * | 15:5|flags| | | | - * +------+-----+ | | | - * | 4:0| len| | | | - * +------+-----+---------+---------+---------+ - * - * ^-------------len-------------^ - * - * The message header consists of: - * - * - **len**, indicates length of the message payload (in u32) - * - **code**, indicates message code - * - **flags**, holds various bits to control message handling - */ - -/* - * Describes single command transport buffer. - * Used by both guc-master and clients. - */ -struct guc_ct_buffer_desc { - u32 addr; /* gfx address */ - u64 host_private; /* host private data */ - u32 size; /* size in bytes */ - u32 head; /* offset updated by GuC*/ - u32 tail; /* offset updated by owner */ - u32 is_in_error; /* error indicator */ - u32 fence; /* fence updated by GuC */ - u32 status; /* status updated by GuC */ - u32 owner; /* id of the channel owner */ - u32 owner_sub_id; /* owner-defined field for extra tracking */ - u32 reserved[5]; -} __packed; - -/* Type of command transport buffer */ -#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u -#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u - -/* - * Definition of the command transport message header (DW0) - * - * bit[4..0] message len (in dwords) - * bit[7..5] reserved - * bit[8] response (G2H only) - * bit[8] write fence to desc (H2G only) - * bit[9] write status to H2G buff (H2G only) - * bit[10] send status back via G2H (H2G only) - * bit[15..11] reserved - * bit[31..16] action code - */ -#define GUC_CT_MSG_LEN_SHIFT 0 -#define GUC_CT_MSG_LEN_MASK 0x1F -#define GUC_CT_MSG_IS_RESPONSE (1 << 8) -#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) -#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) -#define GUC_CT_MSG_SEND_STATUS (1 << 10) -#define GUC_CT_MSG_ACTION_SHIFT 16 -#define GUC_CT_MSG_ACTION_MASK 0xFFFF - -#define GUC_FORCEWAKE_RENDER (1 << 0) -#define GUC_FORCEWAKE_MEDIA (1 << 1) - #define GUC_POWER_UNSPECIFIED 0 #define GUC_POWER_D0 1 #define GUC_POWER_D1 2 @@ -480,120 +414,17 @@ struct guc_shared_ctx_data { struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; } __packed; -/** - * DOC: MMIO based communication - * - * The MMIO based communication between Host and GuC uses software scratch - * registers, where first register holds data treated as message header, - * and other registers are used to hold message payload. - * - * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, - * but no H2G command takes more than 8 parameters and the GuC FW - * itself uses an 8-element array to store the H2G message. - * - * +-----------+---------+---------+---------+ - * | MMIO[0] | MMIO[1] | ... | MMIO[n] | - * +-----------+---------+---------+---------+ - * | header | optional payload | - * +======+====+=========+=========+=========+ - * | 31:28|type| | | | - * +------+----+ | | | - * | 27:16|data| | | | - * +------+----+ | | | - * | 15:0|code| | | | - * +------+----+---------+---------+---------+ - * - * The message header consists of: - * - * - **type**, indicates message type - * - **code**, indicates message code, is specific for **type** - * - **data**, indicates message data, optional, depends on **code** - * - * The following message **types** are supported: - * - * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code - * must be priovided in **code** field. Optional action specific parameters - * can be provided in remaining payload registers or **data** field. - * - * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, - * action response status will be provided in **code** field. Optional - * response data can be returned in remaining payload registers or **data** - * field. - */ - -#define GUC_MAX_MMIO_MSG_LEN 8 - -#define INTEL_GUC_MSG_TYPE_SHIFT 28 -#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) -#define INTEL_GUC_MSG_DATA_SHIFT 16 -#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) -#define INTEL_GUC_MSG_CODE_SHIFT 0 -#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) - #define __INTEL_GUC_MSG_GET(T, m) \ (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) #define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) #define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) #define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) -enum intel_guc_msg_type { - INTEL_GUC_MSG_TYPE_REQUEST = 0x0, - INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, -}; - #define __INTEL_GUC_MSG_TYPE_IS(T, m) \ (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) #define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) #define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) -enum intel_guc_action { - INTEL_GUC_ACTION_DEFAULT = 0x0, - INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, - INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, - INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, - INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, - INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, - INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, - INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, - INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, - INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, - INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, - INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, - INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, - INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, - INTEL_GUC_ACTION_LIMIT -}; - -enum intel_guc_preempt_options { - INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, - INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, -}; - -enum intel_guc_report_status { - INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, - INTEL_GUC_REPORT_STATUS_ACKED = 0x1, - INTEL_GUC_REPORT_STATUS_ERROR = 0x2, - INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, -}; - -enum intel_guc_sleep_state_status { - INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, - INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, - INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 -#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 -}; - -#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) -#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 -#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) -#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) - -enum intel_guc_response_status { - INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, - INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, -}; - #define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ (typecheck(u32, (m)) && \ ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c index 129e0cf7dfe2..64e0b86bf258 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c @@ -121,4 +121,3 @@ void intel_guc_log_debugfs_register(struct intel_guc_log *log, intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log); } - diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 92688a9b6717..7c8ff9792f7b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -11,6 +11,7 @@ #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" #include "gt/intel_lrc.h" #include "gt/intel_mocs.h" @@ -264,6 +265,14 @@ static void guc_submission_tasklet(struct tasklet_struct *t) spin_unlock_irqrestore(&engine->active.lock, flags); } +static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir) +{ + if (iir & GT_RENDER_USER_INTERRUPT) { + intel_engine_signal_breadcrumbs(engine); + tasklet_hi_schedule(&engine->execlists.tasklet); + } +} + static void guc_reset_prepare(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -423,32 +432,6 @@ void intel_guc_submission_fini(struct intel_guc *guc) } } -static void guc_interrupts_capture(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; - u32 dmask = irqs << 16 | irqs; - - GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); - - /* Don't handle the ctx switch interrupt in GuC submission mode */ - intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0); - intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0); -} - -static void guc_interrupts_release(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; - u32 dmask = irqs << 16 | irqs; - - GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); - - /* Handle ctx switch interrupts again */ - intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask); - intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask); -} - static int guc_context_alloc(struct intel_context *ce) { return lrc_alloc(ce, ce->engine); @@ -608,35 +591,6 @@ static int guc_resume(struct intel_engine_cs *engine) static void guc_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = guc_submit_request; - engine->schedule = i915_schedule; - engine->execlists.tasklet.callback = guc_submission_tasklet; - - engine->reset.prepare = guc_reset_prepare; - engine->reset.rewind = guc_reset_rewind; - engine->reset.cancel = guc_reset_cancel; - engine->reset.finish = guc_reset_finish; - - engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; - engine->flags |= I915_ENGINE_HAS_PREEMPTION; - - /* - * TODO: GuC supports timeslicing and semaphores as well, but they're - * handled by the firmware so some minor tweaks are required before - * enabling. - * - * engine->flags |= I915_ENGINE_HAS_TIMESLICES; - * engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - */ - - engine->emit_bb_start = gen8_emit_bb_start; - - /* - * For the breadcrumb irq to work we need the interrupts to stay - * enabled. However, on all platforms on which we'll have support for - * GuC submission we don't allow disabling the interrupts at runtime, so - * we're always safe with the current flow. - */ - GEM_BUG_ON(engine->irq_enable || engine->irq_disable); } static void guc_release(struct intel_engine_cs *engine) @@ -658,19 +612,39 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->cops = &guc_context_ops; engine->request_alloc = guc_request_alloc; + engine->schedule = i915_schedule; + + engine->reset.prepare = guc_reset_prepare; + engine->reset.rewind = guc_reset_rewind; + engine->reset.cancel = guc_reset_cancel; + engine->reset.finish = guc_reset_finish; + engine->emit_flush = gen8_emit_flush_xcs; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; - if (INTEL_GEN(engine->i915) >= 12) { + if (GRAPHICS_VER(engine->i915) >= 12) { engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = guc_set_default_submission; + + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + + /* + * TODO: GuC supports timeslicing and semaphores as well, but they're + * handled by the firmware so some minor tweaks are required before + * enabling. + * + * engine->flags |= I915_ENGINE_HAS_TIMESLICES; + * engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + */ + + engine->emit_bb_start = gen8_emit_bb_start; } static void rcs_submission_override(struct intel_engine_cs *engine) { - switch (INTEL_GEN(engine->i915)) { + switch (GRAPHICS_VER(engine->i915)) { case 12: engine->emit_flush = gen12_emit_flush_rcs; engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; @@ -689,6 +663,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine) static inline void guc_default_irqs(struct intel_engine_cs *engine) { engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT; + intel_engine_set_irq_handler(engine, cs_irq_handler); } int intel_guc_submission_setup(struct intel_engine_cs *engine) @@ -699,7 +674,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) * The setup relies on several assumptions (e.g. irqs always enabled) * that are only valid on gen11+ */ - GEM_BUG_ON(INTEL_GEN(i915) < 11); + GEM_BUG_ON(GRAPHICS_VER(i915) < 11); tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet); @@ -721,9 +696,6 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_stage_desc_init(guc); - - /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(guc_to_gt(guc)); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -734,8 +706,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) /* Note: By the time we're here, GuC may have already been reset */ - guc_interrupts_release(gt); - guc_stage_desc_fini(guc); } @@ -753,8 +723,3 @@ void intel_guc_submission_init_early(struct intel_guc *guc) { guc->submission_selected = __guc_submission_selected(guc); } - -bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine) -{ - return engine->set_default_submission == guc_set_default_submission; -} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 5f7b9e6347d0..3f7005018939 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -20,7 +20,6 @@ void intel_guc_submission_fini(struct intel_guc *guc); int intel_guc_preempt_work_create(struct intel_guc *guc); void intel_guc_preempt_work_destroy(struct intel_guc *guc); int intel_guc_submission_setup(struct intel_engine_cs *engine); -bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine); static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2126dd81ac38..fc5387b410a2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -43,7 +43,7 @@ void intel_huc_init_early(struct intel_huc *huc) intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC); - if (INTEL_GEN(i915) >= 11) { + if (GRAPHICS_VER(i915) >= 11) { huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; huc->status.mask = HUC_LOAD_SUCCESSFUL; huc->status.value = HUC_LOAD_SUCCESSFUL; @@ -82,7 +82,9 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) if (IS_ERR(vma)) return PTR_ERR(vma); - vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB); + vaddr = i915_gem_object_pin_map_unlocked(vma->obj, + i915_coherent_map_type(gt->i915, + vma->obj, true)); if (IS_ERR(vaddr)) { i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 6abb8f2dc33d..6d8b9233214e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -23,7 +23,7 @@ static void uc_expand_default_options(struct intel_uc *uc) return; /* Don't enable GuC/HuC on pre-Gen12 */ - if (INTEL_GEN(i915) < 12) { + if (GRAPHICS_VER(i915) < 12) { i915->params.enable_guc = 0; return; } @@ -467,7 +467,7 @@ static int __uc_init_hw(struct intel_uc *uc) /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN(i915, 9)) + if (GRAPHICS_VER(i915) == 9) attempts = 3; else attempts = 1; @@ -502,10 +502,6 @@ static int __uc_init_hw(struct intel_uc *uc) intel_huc_auth(huc); - ret = intel_guc_sample_forcewake(guc); - if (ret) - goto err_communication; - if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_enable(guc); @@ -529,8 +525,6 @@ static int __uc_init_hw(struct intel_uc *uc) /* * We've failed to load the firmware :( */ -err_communication: - guc_disable_communication(guc); err_log_capture: __uc_capture_load_err_log(uc); err_out: @@ -558,9 +552,6 @@ static void __uc_fini_hw(struct intel_uc *uc) if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_disable(guc); - if (guc_communication_enabled(guc)) - guc_disable_communication(guc); - __uc_sanitize(uc); } @@ -577,7 +568,6 @@ void intel_uc_reset_prepare(struct intel_uc *uc) if (!intel_guc_is_ready(guc)) return; - guc_disable_communication(guc); __uc_sanitize(uc); } |