diff options
-rw-r--r-- | drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/gen7_renderclear.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_engine_pm.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_lrc.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_mocs.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ppgtt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_rc6.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_reset.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_ring_submission.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_rps.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 |
14 files changed, 67 insertions, 75 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 680bd9442eb0..e08dff376339 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -12,9 +12,9 @@ #include "intel_gt.h" /* Write pde (index) from the page directory @pd to the page table @pt */ -static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, - const unsigned int pde, - const struct i915_page_table *pt) +static void gen6_write_pde(const struct gen6_ppgtt *ppgtt, + const unsigned int pde, + const struct i915_page_table *pt) { dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); @@ -27,8 +27,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; - struct intel_engine_cs *engine; - enum intel_engine_id id; u32 ecochk; intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); @@ -41,13 +39,6 @@ void gen7_ppgtt_enable(struct intel_gt *gt) ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; } intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - - for_each_engine(engine, gt, id) { - /* GFX_MODE is per-ring on gen7+ */ - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); - } } void gen6_ppgtt_enable(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index 94465374ca2f..56bdcdaa9a88 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -40,7 +40,7 @@ struct batch_vals { u32 size; }; -static inline int num_primitives(const struct batch_vals *bv) +static int num_primitives(const struct batch_vals *bv) { /* * We need to saturate the GPU with work in order to dispatch diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 8066b93e6dc4..5b932d2dbfd3 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -330,7 +330,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) return 0; } -static inline u32 preempt_address(struct intel_engine_cs *engine) +static u32 preempt_address(struct intel_engine_cs *engine) { return (i915_ggtt_offset(engine->status_page.vma) + I915_GEM_HWS_PREEMPT_ADDR); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index a37c968ef8f7..755522ced60d 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -109,7 +109,7 @@ static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) -static inline unsigned int +static unsigned int gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) { const int shift = gen8_pd_shift(lvl); @@ -125,7 +125,7 @@ gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) return i915_pde_index(end, shift) - *idx; } -static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) +static bool gen8_pd_contains(u64 start, u64 end, int lvl) { const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); @@ -133,7 +133,7 @@ static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) return (start ^ end) & mask && (start & ~mask) == 0; } -static inline unsigned int gen8_pt_count(u64 start, u64 end) +static unsigned int gen8_pt_count(u64 start, u64 end) { GEM_BUG_ON(start >= end); if ((start ^ end) >> gen8_pd_shift(1)) @@ -142,14 +142,13 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end) return end - start; } -static inline unsigned int -gen8_pd_top_count(const struct i915_address_space *vm) +static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) { unsigned int shift = __gen8_pte_shift(vm->top); return (vm->total + (1ull << shift) - 1) >> shift; } -static inline struct i915_page_directory * +static struct i915_page_directory * gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) { struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); @@ -160,7 +159,7 @@ gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); } -static inline struct i915_page_directory * +static struct i915_page_directory * gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) { return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2843db731b7d..e67d09259dd0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -79,7 +79,7 @@ static int __engine_unpark(struct intel_wakeref *wf) #if IS_ENABLED(CONFIG_LOCKDEP) -static inline unsigned long __timeline_mark_lock(struct intel_context *ce) +static unsigned long __timeline_mark_lock(struct intel_context *ce) { unsigned long flags; @@ -89,8 +89,8 @@ static inline unsigned long __timeline_mark_lock(struct intel_context *ce) return flags; } -static inline void __timeline_mark_unlock(struct intel_context *ce, - unsigned long flags) +static void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) { mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); local_irq_restore(flags); @@ -98,13 +98,13 @@ static inline void __timeline_mark_unlock(struct intel_context *ce, #else -static inline unsigned long __timeline_mark_lock(struct intel_context *ce) +static unsigned long __timeline_mark_lock(struct intel_context *ce) { return 0; } -static inline void __timeline_mark_unlock(struct intel_context *ce, - unsigned long flags) +static void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) { } diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index d7d5a58990bb..33c7495b12b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -230,8 +230,7 @@ active_request(const struct intel_timeline * const tl, struct i915_request *rq) return __active_request(tl, rq, 0); } -static inline void -ring_set_paused(const struct intel_engine_cs *engine, int state) +static void ring_set_paused(const struct intel_engine_cs *engine, int state) { /* * We inspect HWS_PREEMPT with a semaphore inside @@ -244,12 +243,12 @@ ring_set_paused(const struct intel_engine_cs *engine, int state) wmb(); } -static inline struct i915_priolist *to_priolist(struct rb_node *rb) +static struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); } -static inline int rq_prio(const struct i915_request *rq) +static int rq_prio(const struct i915_request *rq) { return READ_ONCE(rq->sched.attr.priority); } @@ -299,8 +298,8 @@ static int virtual_prio(const struct intel_engine_execlists *el) return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN; } -static inline bool need_preempt(const struct intel_engine_cs *engine, - const struct i915_request *rq) +static bool need_preempt(const struct intel_engine_cs *engine, + const struct i915_request *rq) { int last_prio; @@ -351,7 +350,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, queue_prio(&engine->execlists)) > last_prio; } -__maybe_unused static inline bool +__maybe_unused static bool assert_priority_queue(const struct i915_request *prev, const struct i915_request *next) { @@ -418,7 +417,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) return __unwind_incomplete_requests(engine); } -static inline void +static void execlists_context_status_change(struct i915_request *rq, unsigned long status) { /* @@ -503,7 +502,7 @@ static void reset_active(struct i915_request *rq, ce->lrc.lrca = lrc_update_regs(ce, engine, head); } -static inline struct intel_engine_cs * +static struct intel_engine_cs * __execlists_schedule_in(struct i915_request *rq) { struct intel_engine_cs * const engine = rq->engine; @@ -549,7 +548,7 @@ __execlists_schedule_in(struct i915_request *rq) return engine; } -static inline void execlists_schedule_in(struct i915_request *rq, int idx) +static void execlists_schedule_in(struct i915_request *rq, int idx) { struct intel_context * const ce = rq->context; struct intel_engine_cs *old; @@ -608,9 +607,9 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) tasklet_hi_schedule(&ve->base.execlists.tasklet); } -static inline void __execlists_schedule_out(struct i915_request *rq) +static void __execlists_schedule_out(struct i915_request * const rq, + struct intel_context * const ce) { - struct intel_context * const ce = rq->context; struct intel_engine_cs * const engine = rq->engine; unsigned int ccid; @@ -621,6 +620,7 @@ static inline void __execlists_schedule_out(struct i915_request *rq) */ CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid); + GEM_BUG_ON(ce->inflight != engine); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) lrc_check_regs(ce, engine, "after"); @@ -660,10 +660,12 @@ static inline void __execlists_schedule_out(struct i915_request *rq) */ if (ce->engine != engine) kick_siblings(rq, ce); + + WRITE_ONCE(ce->inflight, NULL); + intel_context_put(ce); } -static inline void -execlists_schedule_out(struct i915_request *rq) +static inline void execlists_schedule_out(struct i915_request *rq) { struct intel_context * const ce = rq->context; @@ -671,12 +673,8 @@ execlists_schedule_out(struct i915_request *rq) GEM_BUG_ON(!ce->inflight); ce->inflight = ptr_dec(ce->inflight); - if (!__intel_context_inflight_count(ce->inflight)) { - GEM_BUG_ON(ce->inflight != rq->engine); - __execlists_schedule_out(rq); - WRITE_ONCE(ce->inflight, NULL); - intel_context_put(ce); - } + if (!__intel_context_inflight_count(ce->inflight)) + __execlists_schedule_out(rq, ce); i915_request_put(rq); } @@ -728,7 +726,7 @@ static u64 execlists_update_context(struct i915_request *rq) return desc; } -static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) +static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) { if (execlists->ctrl_reg) { writel(lower_32_bits(desc), execlists->submit_reg + port * 2); @@ -757,7 +755,7 @@ dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) return buf; } -static __maybe_unused void +static __maybe_unused noinline void trace_ports(const struct intel_engine_execlists *execlists, const char *msg, struct i915_request * const *ports) @@ -774,13 +772,13 @@ trace_ports(const struct intel_engine_execlists *execlists, dump_port(p1, sizeof(p1), ", ", ports[1])); } -static inline bool +static bool reset_in_progress(const struct intel_engine_execlists *execlists) { return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } -static __maybe_unused bool +static __maybe_unused noinline bool assert_pending_valid(const struct intel_engine_execlists *execlists, const char *msg) { @@ -1621,12 +1619,12 @@ static void execlists_dequeue_irq(struct intel_engine_cs *engine) local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ } -static inline void clear_ports(struct i915_request **ports, int count) +static void clear_ports(struct i915_request **ports, int count) { memset_p((void **)ports, NULL, count); } -static inline void +static void copy_ports(struct i915_request **dst, struct i915_request **src, int count) { /* A memcpy_p() would be very useful here! */ @@ -1660,8 +1658,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists, return inactive; } -static inline void -invalidate_csb_entries(const u64 *first, const u64 *last) +static void invalidate_csb_entries(const u64 *first, const u64 *last) { clflush((void *)first); clflush((void *)last); @@ -1693,7 +1690,7 @@ invalidate_csb_entries(const u64 *first, const u64 *last) * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline bool gen12_csb_parse(const u64 csb) +static bool gen12_csb_parse(const u64 csb) { bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); bool new_queue = @@ -1720,7 +1717,7 @@ static inline bool gen12_csb_parse(const u64 csb) return false; } -static inline bool gen8_csb_parse(const u64 csb) +static bool gen8_csb_parse(const u64 csb) { return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } @@ -1759,8 +1756,7 @@ wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) return entry; } -static inline u64 -csb_read(const struct intel_engine_cs *engine, u64 * const csb) +static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb) { u64 entry = READ_ONCE(*csb); @@ -3180,8 +3176,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) } } -static inline void -logical_ring_default_irqs(struct intel_engine_cs *engine) +static void logical_ring_default_irqs(struct intel_engine_cs *engine) { unsigned int shift = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a0fc78c89b61..94f485b591af 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1035,7 +1035,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) return cs; } -static inline u32 context_wa_bb_offset(const struct intel_context *ce) +static u32 context_wa_bb_offset(const struct intel_context *ce) { return PAGE_SIZE * ce->wa_bb_page; } @@ -1098,7 +1098,7 @@ setup_indirect_ctx_bb(const struct intel_context *ce, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static inline u32 lrc_descriptor(const struct intel_context *ce) +static u32 lrc_descriptor(const struct intel_context *ce) { u32 desc; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index c4512ee4daf2..8acb84960cd0 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -472,7 +472,7 @@ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, return table->table[I915_MOCS_PTE].l3cc_value; } -static inline u32 l3cc_combine(u16 low, u16 high) +static u32 l3cc_combine(u16 low, u16 high) { return low | (u32)high << 16; } diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 46d9aceda64c..96b85a10ef33 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -80,7 +80,7 @@ void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl) kfree(pt); } -static inline void +static void write_dma_entry(struct drm_i915_gem_object * const pdma, const unsigned short idx, const u64 encoded_entry) diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index d7b8e4457fc2..35504c97f11d 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -49,7 +49,7 @@ static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc) return rc6_to_gt(rc)->i915; } -static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) { intel_uncore_write_fw(uncore, reg, val); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 9d177297db79..0a1f93f00b14 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1110,7 +1110,7 @@ error: goto finish; } -static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) +static int intel_gt_reset_engine(struct intel_engine_cs *engine) { return __intel_gt_reset(engine->gt, engine->mask); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 20f42722be8b..597eaead57f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -189,9 +189,16 @@ static void set_pp_dir(struct intel_engine_cs *engine) { struct i915_address_space *vm = vm_alias(engine->gt->vm); - if (vm) { - ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); - ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm)); + if (!vm) + return; + + ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); + ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm)); + + if (INTEL_GEN(engine->i915) >= 7) { + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } } @@ -663,9 +670,9 @@ static int load_pd_dir(struct i915_request *rq, return rq->engine->emit_flush(rq, EMIT_FLUSH); } -static inline int mi_set_context(struct i915_request *rq, - struct intel_context *ce, - u32 flags) +static int mi_set_context(struct i915_request *rq, + struct intel_context *ce, + u32 flags) { struct intel_engine_cs *engine = rq->engine; struct drm_i915_private *i915 = engine->i915; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 69e1bd46cc46..ee5835c29c03 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -43,7 +43,7 @@ static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) return mask & ~rps->pm_intrmsk_mbz; } -static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) { intel_uncore_write_fw(uncore, reg, val); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index d99773e6776e..3fdcd5ff71dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1304,7 +1304,7 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) } __maybe_unused -static inline bool is_nonpriv_flags_valid(u32 flags) +static bool is_nonpriv_flags_valid(u32 flags) { /* Check only valid flag bits are set */ if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) |