diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_perf.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_perf.c | 161 |
1 files changed, 108 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 0f556d80ba36..551be589d6f4 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) aging_tail = hw_tail; stream->oa_buffer.aging_timestamp = now; } else { - DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n", - hw_tail); + drm_err(&stream->perf->i915->drm, + "Ignoring spurious out of range OA buffer tail pointer = %x\n", + hw_tail); } } @@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, u32 taken; int ret = 0; - if (WARN_ON(!stream->enabled)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) return -EIO; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * only be incremented by multiples of the report size (notably also * all a power of two). */ - if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || - tail > OA_BUFFER_SIZE || tail % report_size, - "Inconsistent OA buffer pointers: head = %u, tail = %u\n", - head, tail)) + if (drm_WARN_ONCE(&uncore->i915->drm, + head > OA_BUFFER_SIZE || head % report_size || + tail > OA_BUFFER_SIZE || tail % report_size, + "Inconsistent OA buffer pointers: head = %u, tail = %u\n", + head, tail)) return -EIO; @@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * here would imply a driver bug that would result * in an overrun. */ - if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { - DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); + if (drm_WARN_ON(&uncore->i915->drm, + (OA_BUFFER_SIZE - head) < report_size)) { + drm_err(&uncore->i915->drm, + "Spurious OA head ptr: non-integral report offset\n"); break; } @@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, i915_reg_t oastatus_reg; int ret; - if (WARN_ON(!stream->oa_buffer.vaddr)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) return -EIO; oastatus_reg = IS_GEN(stream->perf->i915, 12) ? @@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, u32 taken; int ret = 0; - if (WARN_ON(!stream->enabled)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) return -EIO; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * only be incremented by multiples of the report size (notably also * all a power of two). */ - if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || - tail > OA_BUFFER_SIZE || tail % report_size, - "Inconsistent OA buffer pointers: head = %u, tail = %u\n", - head, tail)) + if (drm_WARN_ONCE(&uncore->i915->drm, + head > OA_BUFFER_SIZE || head % report_size || + tail > OA_BUFFER_SIZE || tail % report_size, + "Inconsistent OA buffer pointers: head = %u, tail = %u\n", + head, tail)) return -EIO; @@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * here would imply a driver bug that would result * in an overrun. */ - if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { - DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); + if (drm_WARN_ON(&uncore->i915->drm, + (OA_BUFFER_SIZE - head) < report_size)) { + drm_err(&uncore->i915->drm, + "Spurious OA head ptr: non-integral report offset\n"); break; } @@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream, u32 oastatus1; int ret; - if (WARN_ON(!stream->oa_buffer.vaddr)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) return -EIO; oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); @@ -1319,7 +1326,13 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) case 12: { stream->specific_ctx_id_mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); - stream->specific_ctx_id = stream->specific_ctx_id_mask; + /* + * Pick an unused context id + * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts + * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context + */ + stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); + BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG); break; } @@ -1327,11 +1340,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) MISSING_CASE(INTEL_GEN(ce->engine->i915)); } - ce->tag = stream->specific_ctx_id_mask; + ce->tag = stream->specific_ctx_id; - DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", - stream->specific_ctx_id, - stream->specific_ctx_id_mask); + drm_dbg(&stream->perf->i915->drm, + "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", + stream->specific_ctx_id, + stream->specific_ctx_id_mask); return 0; } @@ -1391,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) /* * Unset exclusive_stream first, it will be checked while disabling * the metric set on gen8+. + * + * See i915_oa_init_reg_state() and lrc_configure_all_contexts() */ - perf->exclusive_stream = NULL; + WRITE_ONCE(perf->exclusive_stream, NULL); perf->ops.disable_metric_set(stream); free_oa_buffer(stream); @@ -1575,11 +1591,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream) static int alloc_oa_buffer(struct i915_perf_stream *stream) { + struct drm_i915_private *i915 = stream->perf->i915; struct drm_i915_gem_object *bo; struct i915_vma *vma; int ret; - if (WARN_ON(stream->oa_buffer.vma)) + if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) return -ENODEV; BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); @@ -1587,7 +1604,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream) bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); if (IS_ERR(bo)) { - DRM_ERROR("Failed to allocate OA buffer\n"); + drm_err(&i915->drm, "Failed to allocate OA buffer\n"); return PTR_ERR(bo); } @@ -1669,7 +1686,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream) bo = i915_gem_object_create_internal(i915, 4096); if (IS_ERR(bo)) { - DRM_ERROR("Failed to allocate NOA wait batchbuffer\n"); + drm_err(&i915->drm, + "Failed to allocate NOA wait batchbuffer\n"); return PTR_ERR(bo); } @@ -1954,9 +1972,10 @@ out: return i915_vma_get(oa_bo->vma); } -static int emit_oa_config(struct i915_perf_stream *stream, - struct i915_oa_config *oa_config, - struct intel_context *ce) +static struct i915_request * +emit_oa_config(struct i915_perf_stream *stream, + struct i915_oa_config *oa_config, + struct intel_context *ce) { struct i915_request *rq; struct i915_vma *vma; @@ -1964,7 +1983,7 @@ static int emit_oa_config(struct i915_perf_stream *stream, vma = get_oa_vma(stream, oa_config); if (IS_ERR(vma)) - return PTR_ERR(vma); + return ERR_CAST(vma); err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) @@ -1989,13 +2008,17 @@ static int emit_oa_config(struct i915_perf_stream *stream, err = rq->engine->emit_bb_start(rq, vma->node.start, 0, I915_DISPATCH_SECURE); + if (err) + goto err_add_request; + + i915_request_get(rq); err_add_request: i915_request_add(rq); err_vma_unpin: i915_vma_unpin(vma); err_vma_put: i915_vma_put(vma); - return err; + return err ? ERR_PTR(err) : rq; } static struct intel_context *oa_context(struct i915_perf_stream *stream) @@ -2003,7 +2026,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream) return stream->pinned_ctx ?: stream->engine->kernel_context; } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +hsw_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; @@ -2178,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce, struct i915_request *rq; int err; + intel_engine_pm_get(ce->engine); rq = i915_request_create(ce); + intel_engine_pm_put(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2406,7 +2432,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs)); } -static int gen8_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +gen8_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2448,7 +2475,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) */ ret = lrc_configure_all_contexts(stream, oa_config); if (ret) - return ret; + return ERR_PTR(ret); return emit_oa_config(stream, oa_config, oa_context(stream)); } @@ -2460,7 +2487,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS); } -static int gen12_enable_metric_set(struct i915_perf_stream *stream) +static struct i915_request * +gen12_enable_metric_set(struct i915_perf_stream *stream) { struct intel_uncore *uncore = stream->uncore; struct i915_oa_config *oa_config = stream->oa_config; @@ -2491,7 +2519,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) */ ret = gen12_configure_all_contexts(stream, oa_config); if (ret) - return ret; + return ERR_PTR(ret); /* * For Gen12, performance counters are context @@ -2501,7 +2529,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream) if (stream->ctx) { ret = gen12_configure_oar_context(stream, true); if (ret) - return ret; + return ERR_PTR(ret); } return emit_oa_config(stream, oa_config, oa_context(stream)); @@ -2645,7 +2673,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) if (intel_wait_for_register(uncore, GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); } static void gen8_oa_disable(struct i915_perf_stream *stream) @@ -2656,7 +2685,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream) if (intel_wait_for_register(uncore, GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); } static void gen12_oa_disable(struct i915_perf_stream *stream) @@ -2668,7 +2698,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream) GEN12_OAG_OACONTROL, GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); + + intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); + if (intel_wait_for_register(uncore, + GEN12_OA_TLB_INV_CR, + 1, 0, + 50)) + drm_err(&stream->perf->i915->drm, + "wait for OA tlb invalidate timed out\n"); } /** @@ -2696,6 +2735,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = { .read = i915_oa_read, }; +static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) +{ + struct i915_request *rq; + + rq = stream->perf->ops.enable_metric_set(stream); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + + return 0; +} + /** * i915_oa_stream_init - validate combined props for OA stream and init * @stream: An i915 perf stream @@ -2718,6 +2771,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, struct drm_i915_perf_open_param *param, struct perf_open_properties *props) { + struct drm_i915_private *i915 = stream->perf->i915; struct i915_perf *perf = stream->perf; int format_size; int ret; @@ -2774,7 +2828,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->sample_size += format_size; stream->oa_buffer.format_size = format_size; - if (WARN_ON(stream->oa_buffer.format_size == 0)) + if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) return -EINVAL; stream->hold_preemption = props->hold_preemption; @@ -2827,9 +2881,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, goto err_oa_buf_alloc; stream->ops = &i915_oa_stream_ops; - perf->exclusive_stream = stream; + WRITE_ONCE(perf->exclusive_stream, stream); - ret = perf->ops.enable_metric_set(stream); + ret = i915_perf_stream_enable_sync(stream); if (ret) { DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; @@ -2847,7 +2901,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - perf->exclusive_stream = NULL; + WRITE_ONCE(perf->exclusive_stream, NULL); perf->ops.disable_metric_set(stream); free_oa_buffer(stream); @@ -2873,12 +2927,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce, { struct i915_perf_stream *stream; - /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ - if (engine->class != RENDER_CLASS) return; - stream = engine->i915->perf.exclusive_stream; + /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ + stream = READ_ONCE(engine->i915->perf.exclusive_stream); /* * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller * is already doing that, so nothing to be done for gen12 here. @@ -3147,7 +3200,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, return -EINVAL; if (config != stream->oa_config) { - int err; + struct i915_request *rq; /* * If OA is bound to a specific context, emit the @@ -3158,11 +3211,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream, * When set globally, we use a low priority kernel context, * so it will effectively take effect when idle. */ - err = emit_oa_config(stream, config, oa_context(stream)); - if (err == 0) + rq = emit_oa_config(stream, config, oa_context(stream)); + if (!IS_ERR(rq)) { config = xchg(&stream->oa_config, config); - else - ret = err; + i915_request_put(rq); + } else { + ret = PTR_ERR(rq); + } } i915_oa_config_put(config); |