diff options
75 files changed, 4388 insertions, 2424 deletions
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 41dc881b00dc..7ecad7134677 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -335,6 +335,15 @@ objects, which has the goal to make space in gpu virtual address spaces. .. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c :internal: +WOPCM +===== + +WOPCM Layout +------------ + +.. kernel-doc:: drivers/gpu/drm/i915/intel_wopcm.c + :doc: WOPCM Layout + GuC === @@ -359,6 +368,12 @@ GuC Firmware Layout .. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h :doc: GuC Firmware Layout +GuC Address Space +----------------- + +.. kernel-doc:: drivers/gpu/drm/i915/intel_guc.c + :doc: GuC Address Space + Tracing ======= diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 108d21f34777..80efee1ff7f3 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -25,6 +25,7 @@ config DRM_I915_DEBUG select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y + select STACKDEPOT if DRM=y # for DRM_DEBUG_MM select DRM_DEBUG_MM_SELFTEST select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -89,6 +90,18 @@ config DRM_I915_SW_FENCE_CHECK_DAG If in doubt, say "N". +config DRM_I915_DEBUG_GUC + bool "Enable additional driver debugging for GuC" + depends on DRM_I915 + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will help resolve GuC related issues. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_SELFTEST bool "Enable selftests upon driver load" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4eee91a3a236..0c79c19223af 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -12,7 +12,7 @@ # Note the danger in using -Wall -Wextra is that when CI updates gcc we # will most likely get a sudden build breakage... Hopefully we will fix # new warnings before CI updates! -subdir-ccflags-y := -Wall -Wextra +subdir-ccflags-y := -Wall -Wextra -Wvla subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) subdir-ccflags-y += $(call cc-disable-warning, type-limits) subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) @@ -79,7 +79,8 @@ i915-y += i915_cmd_parser.o \ intel_lrc.o \ intel_mocs.o \ intel_ringbuffer.o \ - intel_uncore.o + intel_uncore.o \ + intel_wopcm.o # general-purpose microcontroller (GuC) support i915-y += intel_uc.o \ @@ -171,7 +172,8 @@ i915-y += i915_perf.o \ i915_oa_glk.o \ i915_oa_cflgt2.o \ i915_oa_cflgt3.o \ - i915_oa_cnl.o + i915_oa_cnl.o \ + i915_oa_icl.o ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 32a66dfdf112..f7d0078eb61b 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -122,18 +122,7 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) seq_printf(s, "Total: %d, Diff: %d\n", param.total, param.diff); return 0; } - -static int vgpu_mmio_diff_open(struct inode *inode, struct file *file) -{ - return single_open(file, vgpu_mmio_diff_show, inode->i_private); -} - -static const struct file_operations vgpu_mmio_diff_fops = { - .open = vgpu_mmio_diff_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(vgpu_mmio_diff); /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 89f7ff2c652e..1dba2c451255 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1796,9 +1796,9 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_rps *rps = &dev_priv->gt_pm.rps; - int ret = 0; - int gpu_freq, ia_freq; unsigned int max_gpu_freq, min_gpu_freq; + int gpu_freq, ia_freq; + int ret; if (!HAS_LLC(dev_priv)) return -ENODEV; @@ -1809,13 +1809,12 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) if (ret) goto out; + min_gpu_freq = rps->min_freq; + max_gpu_freq = rps->max_freq; if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ - min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER; - max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER; - } else { - min_gpu_freq = rps->min_freq_softlimit; - max_gpu_freq = rps->max_freq_softlimit; + min_gpu_freq /= GEN9_FREQ_SCALER; + max_gpu_freq /= GEN9_FREQ_SCALER; } seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); @@ -1923,8 +1922,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) { - seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", - ring->space, ring->head, ring->tail); + seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)", + ring->space, ring->head, ring->tail, ring->emit); } static int i915_context_status(struct seq_file *m, void *unused) @@ -2326,30 +2325,45 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) return 0; } -static void i915_guc_log_info(struct seq_file *m, - struct drm_i915_private *dev_priv) +static const char * +stringify_guc_log_type(enum guc_log_buffer_type type) { - struct intel_guc *guc = &dev_priv->guc; + switch (type) { + case GUC_ISR_LOG_BUFFER: + return "ISR"; + case GUC_DPC_LOG_BUFFER: + return "DPC"; + case GUC_CRASH_DUMP_LOG_BUFFER: + return "CRASH"; + default: + MISSING_CASE(type); + } - seq_puts(m, "\nGuC logging stats:\n"); + return ""; +} - seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", - guc->log.flush_count[GUC_ISR_LOG_BUFFER], - guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); +static void i915_guc_log_info(struct seq_file *m, + struct drm_i915_private *dev_priv) +{ + struct intel_guc_log *log = &dev_priv->guc.log; + enum guc_log_buffer_type type; - seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", - guc->log.flush_count[GUC_DPC_LOG_BUFFER], - guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); + if (!intel_guc_log_relay_enabled(log)) { + seq_puts(m, "GuC log relay disabled\n"); + return; + } - seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", - guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], - guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); + seq_puts(m, "GuC logging stats:\n"); - seq_printf(m, "\tTotal flush interrupt count: %u\n", - guc->log.flush_interrupt_count); + seq_printf(m, "\tRelay full count: %u\n", + log->relay.full_count); - seq_printf(m, "\tCapture miss count: %u\n", - guc->log.capture_miss_count); + for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { + seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n", + stringify_guc_log_type(type), + log->stats[type].flush, + log->stats[type].sampled_overflow); + } } static void i915_guc_client_info(struct seq_file *m, @@ -2379,14 +2393,19 @@ static int i915_guc_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_guc *guc = &dev_priv->guc; - if (!USES_GUC_SUBMISSION(dev_priv)) + if (!USES_GUC(dev_priv)) return -ENODEV; + i915_guc_log_info(m, dev_priv); + + if (!USES_GUC_SUBMISSION(dev_priv)) + return 0; + GEM_BUG_ON(!guc->execbuf_client); - seq_printf(m, "Doorbell map:\n"); + seq_printf(m, "\nDoorbell map:\n"); seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); - seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); + seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline); seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); i915_guc_client_info(m, dev_priv, guc->execbuf_client); @@ -2396,8 +2415,6 @@ static int i915_guc_info(struct seq_file *m, void *data) i915_guc_client_info(m, dev_priv, guc->preempt_client); } - i915_guc_log_info(m, dev_priv); - /* Add more as required ... */ return 0; @@ -2496,35 +2513,73 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) return 0; } -static int i915_guc_log_control_get(void *data, u64 *val) +static int i915_guc_log_level_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - if (!HAS_GUC(dev_priv)) + if (!USES_GUC(dev_priv)) return -ENODEV; - if (!dev_priv->guc.log.vma) - return -EINVAL; - - *val = i915_modparams.guc_log_level; + *val = intel_guc_log_level_get(&dev_priv->guc.log); return 0; } -static int i915_guc_log_control_set(void *data, u64 val) +static int i915_guc_log_level_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; - if (!HAS_GUC(dev_priv)) + if (!USES_GUC(dev_priv)) return -ENODEV; - return intel_guc_log_control(&dev_priv->guc, val); + return intel_guc_log_level_set(&dev_priv->guc.log, val); } -DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, - i915_guc_log_control_get, i915_guc_log_control_set, +DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, + i915_guc_log_level_get, i915_guc_log_level_set, "%lld\n"); +static int i915_guc_log_relay_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (!USES_GUC(dev_priv)) + return -ENODEV; + + file->private_data = &dev_priv->guc.log; + + return intel_guc_log_relay_open(&dev_priv->guc.log); +} + +static ssize_t +i915_guc_log_relay_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct intel_guc_log *log = filp->private_data; + + intel_guc_log_relay_flush(log); + + return cnt; +} + +static int i915_guc_log_relay_release(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + intel_guc_log_relay_close(&dev_priv->guc.log); + + return 0; +} + +static const struct file_operations i915_guc_log_relay_fops = { + .owner = THIS_MODULE, + .open = i915_guc_log_relay_open, + .write = i915_guc_log_relay_write, + .release = i915_guc_log_relay_release, +}; + static const char *psr2_live_status(u32 val) { static const char * const live_status[] = { @@ -2569,14 +2624,13 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) mutex_lock(&dev_priv->psr.lock); seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); - seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); seq_printf(m, "Re-enable work scheduled: %s\n", yesno(work_busy(&dev_priv->psr.work.work))); if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_support) + if (dev_priv->psr.psr2_enabled) enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; else enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; @@ -2624,7 +2678,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Performance_Counter: %u\n", psrperf); } - if (dev_priv->psr.psr2_support) { + if (dev_priv->psr.psr2_enabled) { u32 psr2 = I915_READ(EDP_PSR2_STATUS); seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n", @@ -3231,7 +3285,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); + seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, + pll->info->id); seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); seq_printf(m, " tracked hardware state:\n"); @@ -3567,7 +3622,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file, static int i915_displayport_test_active_show(struct seq_file *m, void *data) { - struct drm_device *dev = m->private; + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; @@ -3601,10 +3657,8 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data) static int i915_displayport_test_active_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; - return single_open(file, i915_displayport_test_active_show, - &dev_priv->drm); + inode->i_private); } static const struct file_operations i915_displayport_test_active_fops = { @@ -3618,7 +3672,8 @@ static const struct file_operations i915_displayport_test_active_fops = { static int i915_displayport_test_data_show(struct seq_file *m, void *data) { - struct drm_device *dev = m->private; + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; @@ -3657,26 +3712,12 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data) return 0; } -static int i915_displayport_test_data_open(struct inode *inode, - struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - return single_open(file, i915_displayport_test_data_show, - &dev_priv->drm); -} - -static const struct file_operations i915_displayport_test_data_fops = { - .owner = THIS_MODULE, - .open = i915_displayport_test_data_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); static int i915_displayport_test_type_show(struct seq_file *m, void *data) { - struct drm_device *dev = m->private; + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct intel_dp *intel_dp; @@ -3703,23 +3744,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) return 0; } - -static int i915_displayport_test_type_open(struct inode *inode, - struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - return single_open(file, i915_displayport_test_type_show, - &dev_priv->drm); -} - -static const struct file_operations i915_displayport_test_type_fops = { - .owner = THIS_MODULE, - .open = i915_displayport_test_type_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) { @@ -3987,8 +4012,8 @@ i915_wedged_set(void *data, u64 val) engine->hangcheck.stalled = true; } - i915_handle_error(i915, val, "Manually set wedged engine mask = %llx", - val); + i915_handle_error(i915, val, I915_ERROR_CAPTURE, + "Manually set wedged engine mask = %llx", val); wait_on_bit(&i915->gpu_error.flags, I915_RESET_HANDOFF, @@ -4316,9 +4341,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { - int ss_max = 2; +#define SS_MAX 2 + const int ss_max = SS_MAX; + u32 sig1[SS_MAX], sig2[SS_MAX]; int ss; - u32 sig1[ss_max], sig2[ss_max]; sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); @@ -4342,15 +4368,16 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, sseu->eu_per_subslice = max_t(unsigned int, sseu->eu_per_subslice, eu_cnt); } +#undef SS_MAX } static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { +#define SS_MAX 6 const struct intel_device_info *info = INTEL_INFO(dev_priv); + u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; - u32 s_reg[info->sseu.max_slices]; - u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2]; for (s = 0; s < info->sseu.max_slices; s++) { /* @@ -4397,15 +4424,16 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, eu_cnt); } } +#undef SS_MAX } static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { +#define SS_MAX 3 const struct intel_device_info *info = INTEL_INFO(dev_priv); + u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; - u32 s_reg[info->sseu.max_slices]; - u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2]; for (s = 0; s < info->sseu.max_slices; s++) { s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); @@ -4452,6 +4480,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, eu_cnt); } } +#undef SS_MAX } static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, @@ -4779,7 +4808,8 @@ static const struct i915_debugfs_files { {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, - {"i915_guc_log_control", &i915_guc_log_control_fops}, + {"i915_guc_log_level", &i915_guc_log_level_fops}, + {"i915_guc_log_relay", &i915_guc_log_relay_fops}, {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, {"i915_ipc_status", &i915_ipc_status_fops}, {"i915_drrs_ctl", &i915_drrs_ctl_fops} @@ -4876,19 +4906,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data) return 0; } - -static int i915_dpcd_open(struct inode *inode, struct file *file) -{ - return single_open(file, i915_dpcd_show, inode->i_private); -} - -static const struct file_operations i915_dpcd_fops = { - .owner = THIS_MODULE, - .open = i915_dpcd_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(i915_dpcd); static int i915_panel_show(struct seq_file *m, void *data) { @@ -4910,19 +4928,7 @@ static int i915_panel_show(struct seq_file *m, void *data) return 0; } - -static int i915_panel_open(struct inode *inode, struct file *file) -{ - return single_open(file, i915_panel_show, inode->i_private); -} - -static const struct file_operations i915_panel_fops = { - .owner = THIS_MODULE, - .open = i915_panel_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(i915_panel); /** * i915_debugfs_connector_add - add i915 specific connector debugfs files diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 07c07d55398b..d354627882e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -377,9 +377,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - intel_runtime_pm_get(dev_priv); - value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; - intel_runtime_pm_put(dev_priv); + value = intel_huc_check_status(&dev_priv->huc); + if (value < 0) + return value; break; case I915_PARAM_MMAP_GTT_VERSION: /* Though we've started our numbering from 1, and so class all @@ -692,11 +692,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_irq; - intel_uc_init_fw(dev_priv); - ret = i915_gem_init(dev_priv); if (ret) - goto cleanup_uc; + goto cleanup_irq; intel_setup_overlay(dev_priv); @@ -716,8 +714,6 @@ cleanup_gem: if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); -cleanup_uc: - intel_uc_fini_fw(dev_priv); cleanup_irq: drm_irq_uninstall(dev); intel_teardown_gmbus(dev_priv); @@ -919,16 +915,21 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); - intel_uc_init_early(dev_priv); i915_memcpy_init_early(dev_priv); ret = i915_workqueues_init(dev_priv); if (ret < 0) goto err_engines; + ret = i915_gem_init_early(dev_priv); + if (ret < 0) + goto err_workqueues; + /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); + intel_wopcm_init_early(&dev_priv->wopcm); + intel_uc_init_early(dev_priv); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); intel_power_domains_init(dev_priv); @@ -937,18 +938,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); intel_init_audio_hooks(dev_priv); - ret = i915_gem_load_init(dev_priv); - if (ret < 0) - goto err_irq; - intel_display_crc_init(dev_priv); intel_detect_preproduction_hw(dev_priv); return 0; -err_irq: - intel_irq_fini(dev_priv); +err_workqueues: i915_workqueues_cleanup(dev_priv); err_engines: i915_engines_cleanup(dev_priv); @@ -961,8 +957,9 @@ err_engines: */ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { - i915_gem_load_cleanup(dev_priv); intel_irq_fini(dev_priv); + intel_uc_cleanup_early(dev_priv); + i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); i915_engines_cleanup(dev_priv); } @@ -1032,6 +1029,10 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uncore_init(dev_priv); + intel_device_info_init_mmio(dev_priv); + + intel_uncore_prune(dev_priv); + intel_uc_init_mmio(dev_priv); ret = intel_engines_init_mmio(dev_priv); @@ -1074,8 +1075,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) i915_modparams.enable_ppgtt); DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - intel_uc_sanitize_options(dev_priv); - intel_gvt_sanitize_options(dev_priv); } @@ -1238,7 +1237,6 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Reveal our presence to userspace */ if (drm_dev_register(dev, 0) == 0) { i915_debugfs_register(dev_priv); - i915_guc_log_register(dev_priv); i915_setup_sysfs(dev_priv); /* Depends on sysfs having been initialized */ @@ -1298,7 +1296,6 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_pmu_unregister(dev_priv); i915_teardown_sysfs(dev_priv); - i915_guc_log_unregister(dev_priv); drm_dev_unregister(&dev_priv->drm); i915_gem_shrinker_unregister(dev_priv); @@ -1457,7 +1454,6 @@ void i915_driver_unload(struct drm_device *dev) i915_reset_error_state(dev_priv); i915_gem_fini(dev_priv); - intel_uc_fini_fw(dev_priv); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); @@ -1870,7 +1866,6 @@ static int i915_resume_switcheroo(struct drm_device *dev) /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset - * @flags: Instructions * * Reset the chip. Useful if a hang is detected. Marks the device as wedged * on failure. @@ -1885,7 +1880,7 @@ static int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -void i915_reset(struct drm_i915_private *i915, unsigned int flags) +void i915_reset(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; int ret; @@ -1902,8 +1897,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) if (!i915_gem_unset_wedged(i915)) goto wakeup; - if (!(flags & I915_RESET_QUIET)) - dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n"); + if (error->reason) + dev_notice(i915->drm.dev, + "Resetting chip for %s\n", error->reason); error->reset_count++; disable_irq(i915->drm.irq); @@ -1992,7 +1988,6 @@ taint: error: i915_gem_set_wedged(i915); i915_retire_requests(i915); - intel_gpu_reset(i915, ALL_ENGINES); goto finish; } @@ -2005,7 +2000,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, /** * i915_reset_engine - reset GPU engine to recover from a hang * @engine: engine to reset - * @flags: options + * @msg: reason for GPU reset; or NULL for no dev_notice() * * Reset a specific GPU engine. Useful if a hang is detected. * Returns zero on successful reset or otherwise an error code. @@ -2015,7 +2010,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, * - reset engine (which will force the engine to idle) * - re-init/configure engine */ -int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) +int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) { struct i915_gpu_error *error = &engine->i915->gpu_error; struct i915_request *active_request; @@ -2030,10 +2025,9 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) goto out; } - if (!(flags & I915_RESET_QUIET)) { + if (msg) dev_notice(engine->i915->drm.dev, - "Resetting %s after gpu hang\n", engine->name); - } + "Resetting %s for %s\n", engine->name, msg); error->reset_engine_count[engine->id]++; if (!engine->i915->guc.execbuf_client) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ce18b6cf6e68..5373b171bb96 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -64,6 +64,7 @@ #include "intel_opregion.h" #include "intel_ringbuffer.h" #include "intel_uncore.h" +#include "intel_wopcm.h" #include "intel_uc.h" #include "i915_gem.h" @@ -72,7 +73,7 @@ #include "i915_gem_object.h" #include "i915_gem_gtt.h" #include "i915_gem_timeline.h" - +#include "i915_gpu_error.h" #include "i915_request.h" #include "i915_vma.h" @@ -261,6 +262,7 @@ enum hpd_pin { HPD_PORT_C, HPD_PORT_D, HPD_PORT_E, + HPD_PORT_F, HPD_NUM_PINS }; @@ -453,172 +455,6 @@ struct intel_csr { uint32_t allowed_dc_mask; }; -struct intel_display_error_state; - -struct i915_gpu_state { - struct kref ref; - ktime_t time; - ktime_t boottime; - ktime_t uptime; - - struct drm_i915_private *i915; - - char error_msg[128]; - bool simulated; - bool awake; - bool wakelock; - bool suspended; - int iommu; - u32 reset_count; - u32 suspend_count; - struct intel_device_info device_info; - struct intel_driver_caps driver_caps; - struct i915_params params; - - struct i915_error_uc { - struct intel_uc_fw guc_fw; - struct intel_uc_fw huc_fw; - struct drm_i915_error_object *guc_log; - } uc; - - /* Generic register state */ - u32 eir; - u32 pgtbl_er; - u32 ier; - u32 gtier[4], ngtier; - u32 ccid; - u32 derrmr; - u32 forcewake; - u32 error; /* gen6+ */ - u32 err_int; /* gen7 */ - u32 fault_data0; /* gen8, gen9 */ - u32 fault_data1; /* gen8, gen9 */ - u32 done_reg; - u32 gac_eco; - u32 gam_ecochk; - u32 gab_ctl; - u32 gfx_mode; - - u32 nfence; - u64 fence[I915_MAX_NUM_FENCES]; - struct intel_overlay_error_state *overlay; - struct intel_display_error_state *display; - - struct drm_i915_error_engine { - int engine_id; - /* Software tracked state */ - bool idle; - bool waiting; - int num_waiters; - unsigned long hangcheck_timestamp; - bool hangcheck_stalled; - enum intel_engine_hangcheck_action hangcheck_action; - struct i915_address_space *vm; - int num_requests; - u32 reset_count; - - /* position of active request inside the ring */ - u32 rq_head, rq_post, rq_tail; - - /* our own tracking of ring head and tail */ - u32 cpu_ring_head; - u32 cpu_ring_tail; - - u32 last_seqno; - - /* Register state */ - u32 start; - u32 tail; - u32 head; - u32 ctl; - u32 mode; - u32 hws; - u32 ipeir; - u32 ipehr; - u32 bbstate; - u32 instpm; - u32 instps; - u32 seqno; - u64 bbaddr; - u64 acthd; - u32 fault_reg; - u64 faddr; - u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; - struct intel_instdone instdone; - - struct drm_i915_error_context { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 handle; - u32 hw_id; - int priority; - int ban_score; - int active; - int guilty; - bool bannable; - } context; - - struct drm_i915_error_object { - u64 gtt_offset; - u64 gtt_size; - int page_count; - int unused; - u32 *pages[0]; - } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; - - struct drm_i915_error_object **user_bo; - long user_bo_count; - - struct drm_i915_error_object *wa_ctx; - struct drm_i915_error_object *default_state; - - struct drm_i915_error_request { - long jiffies; - pid_t pid; - u32 context; - int priority; - int ban_score; - u32 seqno; - u32 head; - u32 tail; - } *requests, execlist[EXECLIST_MAX_PORTS]; - unsigned int num_ports; - - struct drm_i915_error_waiter { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 seqno; - } *waiters; - - struct { - u32 gfx_mode; - union { - u64 pdp[4]; - u32 pp_dir_base; - }; - } vm_info; - } engine[I915_NUM_ENGINES]; - - struct drm_i915_error_buffer { - u32 size; - u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; - u64 gtt_offset; - u32 read_domains; - u32 write_domain; - s32 fence_reg:I915_MAX_NUM_FENCE_BITS; - u32 tiling:2; - u32 dirty:1; - u32 purgeable:1; - u32 userptr:1; - s32 engine:4; - u32 cache_level:3; - } *active_bo[I915_NUM_ENGINES], *pinned_bo; - u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; - struct i915_address_space *active_vm[I915_NUM_ENGINES]; -}; - enum i915_cache_level { I915_CACHE_NONE = 0, I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ @@ -766,12 +602,13 @@ struct i915_psr { bool active; struct delayed_work work; unsigned busy_frontbuffer_bits; - bool psr2_support; - bool aux_frame_sync; + bool sink_psr2_support; bool link_standby; - bool y_cord_support; bool colorimetry_support; bool alpm; + bool has_hw_tracking; + bool psr2_enabled; + u8 sink_sync_latency; void (*enable_source)(struct intel_dp *, const struct intel_crtc_state *); @@ -1146,16 +983,6 @@ struct i915_gem_mm { u32 object_count; }; -struct drm_i915_error_state_buf { - struct drm_i915_private *i915; - unsigned bytes; - unsigned size; - int err; - u8 *buf; - loff_t start; - loff_t pos; -}; - #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ @@ -1164,102 +991,6 @@ struct drm_i915_error_state_buf { #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ -struct i915_gpu_error { - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - - struct delayed_work hangcheck_work; - - /* For reset and error_state handling. */ - spinlock_t lock; - /* Protected by the above dev->gpu_error.lock. */ - struct i915_gpu_state *first_error; - - atomic_t pending_fb_pin; - - unsigned long missed_irq_rings; - - /** - * State variable controlling the reset flow and count - * - * This is a counter which gets incremented when reset is triggered, - * - * Before the reset commences, the I915_RESET_BACKOFF bit is set - * meaning that any waiters holding onto the struct_mutex should - * relinquish the lock immediately in order for the reset to start. - * - * If reset is not completed succesfully, the I915_WEDGE bit is - * set meaning that hardware is terminally sour and there is no - * recovery. All waiters on the reset_queue will be woken when - * that happens. - * - * This counter is used by the wait_seqno code to notice that reset - * event happened and it needs to restart the entire ioctl (since most - * likely the seqno it waited for won't ever signal anytime soon). - * - * This is important for lock-free wait paths, where no contended lock - * naturally enforces the correct ordering between the bail-out of the - * waiter and the gpu reset work code. - */ - unsigned long reset_count; - - /** - * flags: Control various stages of the GPU reset - * - * #I915_RESET_BACKOFF - When we start a reset, we want to stop any - * other users acquiring the struct_mutex. To do this we set the - * #I915_RESET_BACKOFF bit in the error flags when we detect a reset - * and then check for that bit before acquiring the struct_mutex (in - * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a - * secondary role in preventing two concurrent global reset attempts. - * - * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the - * struct_mutex. We try to acquire the struct_mutex in the reset worker, - * but it may be held by some long running waiter (that we cannot - * interrupt without causing trouble). Once we are ready to do the GPU - * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If - * they already hold the struct_mutex and want to participate they can - * inspect the bit and do the reset directly, otherwise the worker - * waits for the struct_mutex. - * - * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to - * acquire the struct_mutex to reset an engine, we need an explicit - * flag to prevent two concurrent reset attempts in the same engine. - * As the number of engines continues to grow, allocate the flags from - * the most significant bits. - * - * #I915_WEDGED - If reset fails and we can no longer use the GPU, - * we set the #I915_WEDGED bit. Prior to command submission, e.g. - * i915_request_alloc(), this bit is checked and the sequence - * aborted (with -EIO reported to userspace) if set. - */ - unsigned long flags; -#define I915_RESET_BACKOFF 0 -#define I915_RESET_HANDOFF 1 -#define I915_RESET_MODESET 2 -#define I915_WEDGED (BITS_PER_LONG - 1) -#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) - - /** Number of times an engine has been reset */ - u32 reset_engine_count[I915_NUM_ENGINES]; - - /** - * Waitqueue to signal when a hang is detected. Used to for waiters - * to release the struct_mutex for the reset to procede. - */ - wait_queue_head_t wait_queue; - - /** - * Waitqueue to signal when the reset has completed. Used by clients - * that wait for dev_priv->mm.wedged to settle. - */ - wait_queue_head_t reset_queue; - - /* For missed irq/seqno simulation. */ - unsigned long test_irq_rings; -}; - enum modeset_restore { MODESET_ON_LID_OPEN, MODESET_DONE, @@ -1860,6 +1591,8 @@ struct drm_i915_private { struct intel_gvt *gvt; + struct intel_wopcm wopcm; + struct intel_huc huc; struct intel_guc guc; @@ -2392,6 +2125,11 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) return to_i915(dev_get_drvdata(kdev)); } +static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) +{ + return container_of(wopcm, struct drm_i915_private, wopcm); +} + static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) { return container_of(guc, struct drm_i915_private, guc); @@ -2963,10 +2701,8 @@ extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); -#define I915_RESET_QUIET BIT(0) -extern void i915_reset(struct drm_i915_private *i915, unsigned int flags); -extern int i915_reset_engine(struct intel_engine_cs *engine, - unsigned int flags); +extern void i915_reset(struct drm_i915_private *i915); +extern int i915_reset_engine(struct intel_engine_cs *engine, const char *msg); extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv); extern int intel_reset_guc(struct drm_i915_private *dev_priv); @@ -3014,10 +2750,12 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) &dev_priv->gpu_error.hangcheck_work, delay); } -__printf(3, 4) +__printf(4, 5) void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, + unsigned long flags, const char *fmt, ...); +#define I915_ERROR_CAPTURE BIT(0) extern void intel_irq_init(struct drm_i915_private *dev_priv); extern void intel_irq_fini(struct drm_i915_private *dev_priv); @@ -3132,8 +2870,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_sanitize(struct drm_i915_private *i915); -int i915_gem_load_init(struct drm_i915_private *dev_priv); -void i915_gem_load_cleanup(struct drm_i915_private *dev_priv); +int i915_gem_init_early(struct drm_i915_private *dev_priv); +void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); @@ -3589,64 +3327,6 @@ static inline int i915_debugfs_connector_add(struct drm_connector *connector) static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} #endif -/* i915_gpu_error.c */ -#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) - -__printf(2, 3) -void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); -int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, - const struct i915_gpu_state *gpu); -int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, - struct drm_i915_private *i915, - size_t count, loff_t pos); -static inline void i915_error_state_buf_release( - struct drm_i915_error_state_buf *eb) -{ - kfree(eb->buf); -} - -struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); -void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *error_msg); - -static inline struct i915_gpu_state * -i915_gpu_state_get(struct i915_gpu_state *gpu) -{ - kref_get(&gpu->ref); - return gpu; -} - -void __i915_gpu_state_free(struct kref *kref); -static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) -{ - if (gpu) - kref_put(&gpu->ref, __i915_gpu_state_free); -} - -struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); -void i915_reset_error_state(struct drm_i915_private *i915); - -#else - -static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, - u32 engine_mask, - const char *error_msg) -{ -} - -static inline struct i915_gpu_state * -i915_first_error_state(struct drm_i915_private *i915) -{ - return NULL; -} - -static inline void i915_reset_error_state(struct drm_i915_private *i915) -{ -} - -#endif - const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7b5a9d7c9593..9650a7b10c5f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2998,6 +2998,7 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) } i915_gem_revoke_fences(dev_priv); + intel_uc_sanitize(dev_priv); return err; } @@ -3192,6 +3193,9 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) static void nop_submit_request(struct i915_request *request) { + GEM_TRACE("%s fence %llx:%d -> -EIO\n", + request->engine->name, + request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); i915_request_submit(request); @@ -3201,6 +3205,9 @@ static void nop_complete_submit_request(struct i915_request *request) { unsigned long flags; + GEM_TRACE("%s fence %llx:%d -> -EIO\n", + request->engine->name, + request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); spin_lock_irqsave(&request->engine->timeline->lock, flags); @@ -3214,6 +3221,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; + GEM_TRACE("start\n"); + if (drm_debug & DRM_UT_DRIVER) { struct drm_printer p = drm_debug_printer(__func__); @@ -3237,6 +3246,9 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) } i915->caps.scheduler = 0; + /* Even if the GPU reset fails, it should still stop the engines */ + intel_gpu_reset(i915, ALL_ENGINES); + /* * Make sure no one is running the old callback before we proceed with * cancelling requests and resetting the completion tracking. Otherwise @@ -3278,6 +3290,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915_gem_reset_finish_engine(engine); } + GEM_TRACE("end\n"); + wake_up_all(&i915->gpu_error.reset_queue); } @@ -3290,7 +3304,10 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) return true; - /* Before unwedging, make sure that all pending operations + GEM_TRACE("start\n"); + + /* + * Before unwedging, make sure that all pending operations * are flushed and errored out - we may have requests waiting upon * third party fences. We marked all inflight requests as EIO, and * every execbuf since returned EIO, for consistency we want all @@ -3308,7 +3325,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) if (!rq) continue; - /* We can't use our normal waiter as we want to + /* + * We can't use our normal waiter as we want to * avoid recursively trying to handle the current * reset. The basic dma_fence_default_wait() installs * a callback for dma_fence_signal(), which is @@ -3323,8 +3341,11 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) return false; } } + i915_retire_requests(i915); + GEM_BUG_ON(i915->gt.active_requests); - /* Undo nop_submit_request. We prevent all new i915 requests from + /* + * Undo nop_submit_request. We prevent all new i915 requests from * being queued (by disallowing execbuf whilst wedged) so having * waited for all active requests above, we know the system is idle * and do not have to worry about a thread being inside @@ -3335,6 +3356,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) intel_engines_reset_default_submission(i915); i915_gem_contexts_lost(i915); + GEM_TRACE("end\n"); + smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, &i915->gpu_error.flags); @@ -3666,16 +3689,7 @@ static int wait_for_engines(struct drm_i915_private *i915) if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { dev_err(i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); - if (drm_debug & DRM_UT_DRIVER) { - struct drm_printer p = drm_debug_printer(__func__); - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - intel_engine_dump(engine, &p, - "%s\n", engine->name); - } - + GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); return -EIO; } @@ -4088,9 +4102,10 @@ out: } /* - * Prepare buffer for display plane (scanout, cursors, etc). - * Can be called from an uninterruptible phase (modesetting) and allows - * any flushes to be pipelined (for pageflips). + * Prepare buffer for display plane (scanout, cursors, etc). Can be called from + * an uninterruptible phase (modesetting) and allows any flushes to be pipelined + * (for pageflips). We only flush the caches while preparing the buffer for + * display, the callers are responsible for frontbuffer flush. */ struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, @@ -4146,9 +4161,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ __i915_gem_object_flush_for_display(obj); - intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); /* It should now be out of any other write domains, and we can update * the domain values for our changes. @@ -4973,6 +4986,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machines is a good idea, we don't - just in case it leaves the * machine in an unusable condition. */ + intel_uc_sanitize(dev_priv); i915_gem_sanitize(dev_priv); intel_runtime_pm_put(dev_priv); @@ -5140,6 +5154,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) goto out; } + ret = intel_wopcm_init_hw(&dev_priv->wopcm); + if (ret) { + DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); + goto out; + } + /* We can't enable contexts until all firmware is loaded */ ret = intel_uc_init_hw(dev_priv); if (ret) { @@ -5297,6 +5317,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; + ret = intel_wopcm_init(&dev_priv->wopcm); + if (ret) + return ret; + ret = intel_uc_init_misc(dev_priv); if (ret) return ret; @@ -5478,8 +5502,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); } -int -i915_gem_load_init(struct drm_i915_private *dev_priv) +int i915_gem_init_early(struct drm_i915_private *dev_priv) { int err = -ENOMEM; @@ -5554,7 +5577,7 @@ err_out: return err; } -void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) +void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) { i915_gem_drain_freed_objects(dev_priv); GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index f54c4ff74ded..8922344fc21b 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -53,8 +53,10 @@ #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) #define GEM_TRACE(...) trace_printk(__VA_ARGS__) +#define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) #else #define GEM_TRACE(...) do { } while (0) +#define GEM_TRACE_DUMP() do { } while (0) #endif #define I915_NUM_ENGINES 8 diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index d3cbe8432f48..f3890b664e3f 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -1,29 +1,11 @@ /* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * SPDX-License-Identifier: MIT * + * Copyright © 2014-2018 Intel Corporation */ -#include "i915_drv.h" #include "i915_gem_batch_pool.h" +#include "i915_drv.h" /** * DOC: batch pool @@ -41,11 +23,11 @@ /** * i915_gem_batch_pool_init() - initialize a batch buffer pool - * @engine: the associated request submission engine * @pool: the batch buffer pool + * @engine: the associated request submission engine */ -void i915_gem_batch_pool_init(struct intel_engine_cs *engine, - struct i915_gem_batch_pool *pool) +void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool, + struct intel_engine_cs *engine) { int n; diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h index 10d5ac4c00d3..56947daaaf65 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h @@ -1,31 +1,13 @@ /* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * SPDX-License-Identifier: MIT * + * Copyright © 2014-2018 Intel Corporation */ #ifndef I915_GEM_BATCH_POOL_H #define I915_GEM_BATCH_POOL_H -#include "i915_drv.h" +#include <linux/types.h> struct intel_engine_cs; @@ -34,9 +16,8 @@ struct i915_gem_batch_pool { struct list_head cache_list[4]; }; -/* i915_gem_batch_pool.c */ -void i915_gem_batch_pool_init(struct intel_engine_cs *engine, - struct i915_gem_batch_pool *pool); +void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool, + struct intel_engine_cs *engine); void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); struct drm_i915_gem_object* i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f2cbea7cf940..5cfac0255758 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -318,12 +318,13 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); - /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not + /* + * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not * present or not in use we still need a small bias as ring wraparound * at offset 0 sometimes hangs. No idea why. */ if (USES_GUC(dev_priv)) - ctx->ggtt_offset_bias = GUC_WOPCM_TOP; + ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias; else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 62aa67960bf4..af915d041281 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -121,8 +121,8 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { - DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res); - DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm); + DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); + DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); } } @@ -174,18 +174,19 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) } static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, resource_size_t *size) + resource_size_t *base, + resource_size_t *size) { - uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? - CTG_STOLEN_RESERVED : - ELK_STOLEN_RESERVED); + u32 reg_val = I915_READ(IS_GM45(dev_priv) ? + CTG_STOLEN_RESERVED : + ELK_STOLEN_RESERVED); resource_size_t stolen_top = dev_priv->dsm.end + 1; - if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) { - *base = 0; - *size = 0; + DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", + IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); + + if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) return; - } /* * Whether ILK really reuses the ELK register for this is unclear. @@ -193,30 +194,25 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, */ WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val); - *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; + if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) + return; + *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); - /* On these platforms, the register doesn't have a size field, so the - * size is the distance between the base and the top of the stolen - * memory. We also have the genuine case where base is zero and there's - * nothing reserved. */ - if (*base == 0) - *size = 0; - else - *size = stolen_top - *base; + *size = stolen_top - *base; } static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, resource_size_t *size) + resource_size_t *base, + resource_size_t *size) { - uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { - *base = 0; - *size = 0; + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; - } *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; @@ -239,17 +235,44 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, } } -static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, resource_size_t *size) +static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) { - uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = dev_priv->dsm.end + 1; - if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { - *base = 0; - *size = 0; + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; + + switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { + default: + MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); + case GEN7_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; } + /* + * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the + * reserved location as (top - size). + */ + *base = stolen_top - *size; +} + +static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { @@ -266,15 +289,15 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, } static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, resource_size_t *size) + resource_size_t *base, + resource_size_t *size) { - uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { - *base = 0; - *size = 0; + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; - } *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; @@ -298,29 +321,22 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, } static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, resource_size_t *size) + resource_size_t *base, + resource_size_t *size) { - uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); - resource_size_t stolen_top; + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = dev_priv->dsm.end + 1; - if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) { - *base = 0; - *size = 0; + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; - } - stolen_top = dev_priv->dsm.end + 1; + if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) + return; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; - - /* On these platforms, the register doesn't have a size field, so the - * size is the distance between the base and the top of the stolen - * memory. We also have the genuine case where base is zero and there's - * nothing reserved. */ - if (*base == 0) - *size = 0; - else - *size = stolen_top - *base; + *size = stolen_top - *base; } int i915_gem_init_stolen(struct drm_i915_private *dev_priv) @@ -353,7 +369,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); stolen_top = dev_priv->dsm.end + 1; - reserved_base = 0; + reserved_base = stolen_top; reserved_size = 0; switch (INTEL_GEN(dev_priv)) { @@ -373,8 +389,12 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) &reserved_base, &reserved_size); break; case 7: - gen7_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); + if (IS_VALLEYVIEW(dev_priv)) + vlv_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + else + gen7_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); break; default: if (IS_LP(dev_priv)) @@ -386,11 +406,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) break; } - /* It is possible for the reserved base to be zero, but the register - * field for size doesn't have a zero option. */ - if (reserved_base == 0) { - reserved_size = 0; + /* + * Our expectation is that the reserved space is at the top of the + * stolen region and *never* at the bottom. If we see !reserved_base, + * it likely means we failed to read the registers correctly. + */ + if (!reserved_base) { + DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", + &reserved_base, &reserved_size); reserved_base = stolen_top; + reserved_size = 0; } dev_priv->dsm_reserved = @@ -406,9 +431,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; - DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n", - (u64)resource_size(&dev_priv->dsm) >> 10, - ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); + DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", + (u64)resource_size(&dev_priv->dsm) >> 10, + ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); stolen_usable_start = 0; /* WaSkipStolenMemoryFirstPage:bdw+ */ @@ -580,8 +605,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv lockdep_assert_held(&dev_priv->drm.struct_mutex); - DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", - &stolen_offset, >t_offset, &size); + DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", + &stolen_offset, >t_offset, &size); /* KISS and expect everything to be page-aligned */ if (WARN_ON(size == 0) || @@ -599,14 +624,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); mutex_unlock(&dev_priv->mm.stolen_lock); if (ret) { - DRM_DEBUG_KMS("failed to allocate stolen space\n"); + DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); kfree(stolen); return NULL; } obj = _i915_gem_object_create_stolen(dev_priv, stolen); if (obj == NULL) { - DRM_DEBUG_KMS("failed to allocate stolen object\n"); + DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); return NULL; @@ -635,7 +660,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv size, gtt_offset, obj->cache_level, 0); if (ret) { - DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); + DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); goto err_pages; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index f89ac7a8f95f..effaf982b19b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -32,6 +32,7 @@ #include <linux/zlib.h> #include <drm/drm_print.h> +#include "i915_gpu_error.h" #include "i915_drv.h" static inline const struct intel_engine_cs * diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h new file mode 100644 index 000000000000..ac5760673cc9 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -0,0 +1,359 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright � 2008-2018 Intel Corporation + */ + +#ifndef _I915_GPU_ERROR_H_ +#define _I915_GPU_ERROR_H_ + +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/sched.h> + +#include <drm/drm_mm.h> + +#include "intel_device_info.h" +#include "intel_ringbuffer.h" +#include "intel_uc_fw.h" + +#include "i915_gem.h" +#include "i915_gem_gtt.h" +#include "i915_params.h" + +struct drm_i915_private; +struct intel_overlay_error_state; +struct intel_display_error_state; + +struct i915_gpu_state { + struct kref ref; + ktime_t time; + ktime_t boottime; + ktime_t uptime; + + struct drm_i915_private *i915; + + char error_msg[128]; + bool simulated; + bool awake; + bool wakelock; + bool suspended; + int iommu; + u32 reset_count; + u32 suspend_count; + struct intel_device_info device_info; + struct intel_driver_caps driver_caps; + struct i915_params params; + + struct i915_error_uc { + struct intel_uc_fw guc_fw; + struct intel_uc_fw huc_fw; + struct drm_i915_error_object *guc_log; + } uc; + + /* Generic register state */ + u32 eir; + u32 pgtbl_er; + u32 ier; + u32 gtier[4], ngtier; + u32 ccid; + u32 derrmr; + u32 forcewake; + u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ + u32 done_reg; + u32 gac_eco; + u32 gam_ecochk; + u32 gab_ctl; + u32 gfx_mode; + + u32 nfence; + u64 fence[I915_MAX_NUM_FENCES]; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; + + struct drm_i915_error_engine { + int engine_id; + /* Software tracked state */ + bool idle; + bool waiting; + int num_waiters; + unsigned long hangcheck_timestamp; + bool hangcheck_stalled; + enum intel_engine_hangcheck_action hangcheck_action; + struct i915_address_space *vm; + int num_requests; + u32 reset_count; + + /* position of active request inside the ring */ + u32 rq_head, rq_post, rq_tail; + + /* our own tracking of ring head and tail */ + u32 cpu_ring_head; + u32 cpu_ring_tail; + + u32 last_seqno; + + /* Register state */ + u32 start; + u32 tail; + u32 head; + u32 ctl; + u32 mode; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 bbstate; + u32 instpm; + u32 instps; + u32 seqno; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u64 faddr; + u32 rc_psmi; /* sleep state */ + u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; + struct intel_instdone instdone; + + struct drm_i915_error_context { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 handle; + u32 hw_id; + int priority; + int ban_score; + int active; + int guilty; + bool bannable; + } context; + + struct drm_i915_error_object { + u64 gtt_offset; + u64 gtt_size; + int page_count; + int unused; + u32 *pages[0]; + } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + + struct drm_i915_error_object **user_bo; + long user_bo_count; + + struct drm_i915_error_object *wa_ctx; + struct drm_i915_error_object *default_state; + + struct drm_i915_error_request { + long jiffies; + pid_t pid; + u32 context; + int priority; + int ban_score; + u32 seqno; + u32 head; + u32 tail; + } *requests, execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; + + struct drm_i915_error_waiter { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 seqno; + } *waiters; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + } engine[I915_NUM_ENGINES]; + + struct drm_i915_error_buffer { + u32 size; + u32 name; + u32 rseqno[I915_NUM_ENGINES], wseqno; + u64 gtt_offset; + u32 read_domains; + u32 write_domain; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + u32 userptr:1; + s32 engine:4; + u32 cache_level:3; + } *active_bo[I915_NUM_ENGINES], *pinned_bo; + u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; + struct i915_address_space *active_vm[I915_NUM_ENGINES]; +}; + +struct i915_gpu_error { + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + + struct delayed_work hangcheck_work; + + /* For reset and error_state handling. */ + spinlock_t lock; + /* Protected by the above dev->gpu_error.lock. */ + struct i915_gpu_state *first_error; + + atomic_t pending_fb_pin; + + unsigned long missed_irq_rings; + + /** + * State variable controlling the reset flow and count + * + * This is a counter which gets incremented when reset is triggered, + * + * Before the reset commences, the I915_RESET_BACKOFF bit is set + * meaning that any waiters holding onto the struct_mutex should + * relinquish the lock immediately in order for the reset to start. + * + * If reset is not completed successfully, the I915_WEDGE bit is + * set meaning that hardware is terminally sour and there is no + * recovery. All waiters on the reset_queue will be woken when + * that happens. + * + * This counter is used by the wait_seqno code to notice that reset + * event happened and it needs to restart the entire ioctl (since most + * likely the seqno it waited for won't ever signal anytime soon). + * + * This is important for lock-free wait paths, where no contended lock + * naturally enforces the correct ordering between the bail-out of the + * waiter and the gpu reset work code. + */ + unsigned long reset_count; + + /** + * flags: Control various stages of the GPU reset + * + * #I915_RESET_BACKOFF - When we start a reset, we want to stop any + * other users acquiring the struct_mutex. To do this we set the + * #I915_RESET_BACKOFF bit in the error flags when we detect a reset + * and then check for that bit before acquiring the struct_mutex (in + * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a + * secondary role in preventing two concurrent global reset attempts. + * + * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the + * struct_mutex. We try to acquire the struct_mutex in the reset worker, + * but it may be held by some long running waiter (that we cannot + * interrupt without causing trouble). Once we are ready to do the GPU + * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If + * they already hold the struct_mutex and want to participate they can + * inspect the bit and do the reset directly, otherwise the worker + * waits for the struct_mutex. + * + * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to + * acquire the struct_mutex to reset an engine, we need an explicit + * flag to prevent two concurrent reset attempts in the same engine. + * As the number of engines continues to grow, allocate the flags from + * the most significant bits. + * + * #I915_WEDGED - If reset fails and we can no longer use the GPU, + * we set the #I915_WEDGED bit. Prior to command submission, e.g. + * i915_request_alloc(), this bit is checked and the sequence + * aborted (with -EIO reported to userspace) if set. + */ + unsigned long flags; +#define I915_RESET_BACKOFF 0 +#define I915_RESET_HANDOFF 1 +#define I915_RESET_MODESET 2 +#define I915_WEDGED (BITS_PER_LONG - 1) +#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES) + + /** Number of times an engine has been reset */ + u32 reset_engine_count[I915_NUM_ENGINES]; + + /** Reason for the current *global* reset */ + const char *reason; + + /** + * Waitqueue to signal when a hang is detected. Used to for waiters + * to release the struct_mutex for the reset to procede. + */ + wait_queue_head_t wait_queue; + + /** + * Waitqueue to signal when the reset has completed. Used by clients + * that wait for dev_priv->mm.wedged to settle. + */ + wait_queue_head_t reset_queue; + + /* For missed irq/seqno simulation. */ + unsigned long test_irq_rings; +}; + +struct drm_i915_error_state_buf { + struct drm_i915_private *i915; + unsigned int bytes; + unsigned int size; + int err; + u8 *buf; + loff_t start; + loff_t pos; +}; + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +__printf(2, 3) +void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); +int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, + const struct i915_gpu_state *gpu); +int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, + struct drm_i915_private *i915, + size_t count, loff_t pos); + +static inline void +i915_error_state_buf_release(struct drm_i915_error_state_buf *eb) +{ + kfree(eb->buf); +} + +struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, + const char *error_msg); + +static inline struct i915_gpu_state * +i915_gpu_state_get(struct i915_gpu_state *gpu) +{ + kref_get(&gpu->ref); + return gpu; +} + +void __i915_gpu_state_free(struct kref *kref); +static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) +{ + if (gpu) + kref_put(&gpu->ref, __i915_gpu_state_free); +} + +struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); +void i915_reset_error_state(struct drm_i915_private *i915); + +#else + +static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, + const char *error_msg) +{ +} + +static inline struct i915_gpu_state * +i915_first_error_state(struct drm_i915_private *i915) +{ + return NULL; +} + +static inline void i915_reset_error_state(struct drm_i915_private *i915) +{ +} + +#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ + +#endif /* _I915_GPU_ERROR_H_ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 633c18785c1e..27aee25429b7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1399,19 +1399,18 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, } static void -gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) +gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) { struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; - if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - if (READ_ONCE(engine->execlists.active)) { - __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - tasklet = true; - } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { + if (READ_ONCE(engine->execlists.active)) + tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, + &engine->irq_posted); } - if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { + if (iir & GT_RENDER_USER_INTERRUPT) { notify_ring(engine); tasklet |= USES_GUC_SUBMISSION(engine->i915); } @@ -1466,21 +1465,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { gen8_cs_irq_handler(i915->engine[RCS], - gt_iir[0], GEN8_RCS_IRQ_SHIFT); + gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); gen8_cs_irq_handler(i915->engine[BCS], - gt_iir[0], GEN8_BCS_IRQ_SHIFT); + gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); } if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { gen8_cs_irq_handler(i915->engine[VCS], - gt_iir[1], GEN8_VCS1_IRQ_SHIFT); + gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); gen8_cs_irq_handler(i915->engine[VCS2], - gt_iir[1], GEN8_VCS2_IRQ_SHIFT); + gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); } if (master_ctl & GEN8_GT_VECS_IRQ) { gen8_cs_irq_handler(i915->engine[VECS], - gt_iir[3], GEN8_VECS_IRQ_SHIFT); + gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); } if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { @@ -1627,7 +1626,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, int head, tail; spin_lock(&pipe_crc->lock); - if (pipe_crc->source) { + if (pipe_crc->source && !crtc->base.crc.opened) { if (!pipe_crc->entries) { spin_unlock(&pipe_crc->lock); DRM_DEBUG_KMS("spurious interrupt\n"); @@ -1667,7 +1666,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, * On GEN8+ sometimes the second CRC is bonkers as well, so * don't trust that one either. */ - if (pipe_crc->skipped == 0 || + if (pipe_crc->skipped <= 0 || (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); @@ -1766,37 +1765,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { - if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { - /* Sample the log buffer flush related bits & clear them out now - * itself from the message identity register to minimize the - * probability of losing a flush interrupt, when there are back - * to back flush interrupts. - * There can be a new flush interrupt, for different log buffer - * type (like for ISR), whilst Host is handling one (for DPC). - * Since same bit is used in message register for ISR & DPC, it - * could happen that GuC sets the bit for 2nd interrupt but Host - * clears out the bit on handling the 1st interrupt. - */ - u32 msg, flush; - - msg = I915_READ(SOFT_SCRATCH(15)); - flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); - if (flush) { - /* Clear the message bits that are handled */ - I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); - - /* Handle flush interrupt in bottom half */ - queue_work(dev_priv->guc.log.runtime.flush_wq, - &dev_priv->guc.log.runtime.flush_work); - - dev_priv->guc.log.flush_interrupt_count++; - } else { - /* Not clearing of unhandled event bits won't result in - * re-triggering of the interrupt. - */ - } - } + if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) + intel_guc_to_host_event_handler(&dev_priv->guc); } static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) @@ -2762,12 +2732,6 @@ static void __fini_wedge(struct wedge_me *w) (W)->i915; \ __fini_wedge((W))) -static __always_inline void -gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir) -{ - gen8_cs_irq_handler(engine, iir, 0); -} - static void gen11_gt_engine_irq_handler(struct drm_i915_private * const i915, const unsigned int bank, @@ -2781,27 +2745,27 @@ gen11_gt_engine_irq_handler(struct drm_i915_private * const i915, switch (engine_n) { case GEN11_RCS0: - return gen11_cs_irq_handler(engine[RCS], iir); + return gen8_cs_irq_handler(engine[RCS], iir); case GEN11_BCS: - return gen11_cs_irq_handler(engine[BCS], iir); + return gen8_cs_irq_handler(engine[BCS], iir); } case 1: switch (engine_n) { case GEN11_VCS(0): - return gen11_cs_irq_handler(engine[_VCS(0)], iir); + return gen8_cs_irq_handler(engine[_VCS(0)], iir); case GEN11_VCS(1): - return gen11_cs_irq_handler(engine[_VCS(1)], iir); + return gen8_cs_irq_handler(engine[_VCS(1)], iir); case GEN11_VCS(2): - return gen11_cs_irq_handler(engine[_VCS(2)], iir); + return gen8_cs_irq_handler(engine[_VCS(2)], iir); case GEN11_VCS(3): - return gen11_cs_irq_handler(engine[_VCS(3)], iir); + return gen8_cs_irq_handler(engine[_VCS(3)], iir); case GEN11_VECS(0): - return gen11_cs_irq_handler(engine[_VECS(0)], iir); + return gen8_cs_irq_handler(engine[_VECS(0)], iir); case GEN11_VECS(1): - return gen11_cs_irq_handler(engine[_VECS(1)], iir); + return gen8_cs_irq_handler(engine[_VECS(1)], iir); } } } @@ -2912,15 +2876,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) return IRQ_HANDLED; } -/** - * i915_reset_device - do process context error handling work - * @dev_priv: i915 device private - * - * Fire an error uevent so userspace can see that a hang or error - * was detected. - */ -static void i915_reset_device(struct drm_i915_private *dev_priv) +static void i915_reset_device(struct drm_i915_private *dev_priv, + const char *msg) { + struct i915_gpu_error *error = &dev_priv->gpu_error; struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; @@ -2936,29 +2895,32 @@ static void i915_reset_device(struct drm_i915_private *dev_priv) i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { intel_prepare_reset(dev_priv); + error->reason = msg; + /* Signal that locked waiters should reset the GPU */ - set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.wait_queue); + set_bit(I915_RESET_HANDOFF, &error->flags); + wake_up_all(&error->wait_queue); /* Wait for anyone holding the lock to wakeup, without * blocking indefinitely on struct_mutex. */ do { if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - i915_reset(dev_priv, 0); + i915_reset(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); } - } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, + } while (wait_on_bit_timeout(&error->flags, I915_RESET_HANDOFF, TASK_UNINTERRUPTIBLE, 1)); + error->reason = NULL; + intel_finish_reset(dev_priv); } - if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) - kobject_uevent_env(kobj, - KOBJ_CHANGE, reset_done_event); + if (!test_bit(I915_WEDGED, &error->flags)) + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } static void i915_clear_error_registers(struct drm_i915_private *dev_priv) @@ -2990,6 +2952,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) * i915_handle_error - handle a gpu error * @dev_priv: i915 device private * @engine_mask: mask representing engines that are hung + * @flags: control flags * @fmt: Error message format string * * Do some basic checking of register state at error time and @@ -3000,16 +2963,23 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) */ void i915_handle_error(struct drm_i915_private *dev_priv, u32 engine_mask, + unsigned long flags, const char *fmt, ...) { struct intel_engine_cs *engine; unsigned int tmp; - va_list args; char error_msg[80]; + char *msg = NULL; - va_start(args, fmt); - vscnprintf(error_msg, sizeof(error_msg), fmt, args); - va_end(args); + if (fmt) { + va_list args; + + va_start(args, fmt); + vscnprintf(error_msg, sizeof(error_msg), fmt, args); + va_end(args); + + msg = error_msg; + } /* * In most cases it's guaranteed that we get here with an RPM @@ -3020,8 +2990,12 @@ void i915_handle_error(struct drm_i915_private *dev_priv, */ intel_runtime_pm_get(dev_priv); - i915_capture_error_state(dev_priv, engine_mask, error_msg); - i915_clear_error_registers(dev_priv); + engine_mask &= INTEL_INFO(dev_priv)->ring_mask; + + if (flags & I915_ERROR_CAPTURE) { + i915_capture_error_state(dev_priv, engine_mask, msg); + i915_clear_error_registers(dev_priv); + } /* * Try engine reset when available. We fall back to full reset if @@ -3034,7 +3008,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, &dev_priv->gpu_error.flags)) continue; - if (i915_reset_engine(engine, 0) == 0) + if (i915_reset_engine(engine, msg) == 0) engine_mask &= ~intel_engine_flag(engine); clear_bit(I915_RESET_ENGINE + engine->id, @@ -3064,7 +3038,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, TASK_UNINTERRUPTIBLE); } - i915_reset_device(dev_priv); + i915_reset_device(dev_priv, msg); for_each_engine(engine, dev_priv, tmp) { clear_bit(I915_RESET_ENGINE + engine->id, diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c new file mode 100644 index 000000000000..a5667926e3de --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_icl.c @@ -0,0 +1,118 @@ +/* + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + * + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "i915_oa_icl.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x0000ffff }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x0000ffff }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x0000ffff }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0xd04), 0x00000200 }, + { _MMIO(0x9840), 0x00000000 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x10060000 }, + { _MMIO(0x9888), 0x22060000 }, + { _MMIO(0x9888), 0x16060000 }, + { _MMIO(0x9888), 0x24060000 }, + { _MMIO(0x9888), 0x18060000 }, + { _MMIO(0x9888), 0x1a060000 }, + { _MMIO(0x9888), 0x12060000 }, + { _MMIO(0x9888), 0x14060000 }, + { _MMIO(0x9888), 0x10060000 }, + { _MMIO(0x9888), 0x22060000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x16130000 }, + { _MMIO(0x9888), 0x24000001 }, + { _MMIO(0x9888), 0x0e130056 }, + { _MMIO(0x9888), 0x10130000 }, + { _MMIO(0x9888), 0x1a130000 }, + { _MMIO(0x9888), 0x541f0001 }, + { _MMIO(0x9888), 0x181f0000 }, + { _MMIO(0x9888), 0x4c1f0000 }, + { _MMIO(0x9888), 0x301f0000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "a291665e-244b-4b76-9b9a-01de9d3c8068", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h new file mode 100644 index 000000000000..ae1c24aafe4f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_icl.h @@ -0,0 +1,34 @@ +/* + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + * + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_OA_ICL_H__ +#define __I915_OA_ICL_H__ + +extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 430f5f9d0ff4..c96360398072 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -48,7 +48,7 @@ struct drm_printer; param(int, enable_ips, 1) \ param(int, invert_brightness, 0) \ param(int, enable_guc, 0) \ - param(int, guc_log_level, 0) \ + param(int, guc_log_level, -1) \ param(char *, guc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \ param(int, mmio_debug, 0) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 062e91b39085..4364922e935d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -602,6 +602,7 @@ static const struct intel_device_info intel_icelake_11_info = { PLATFORM(INTEL_ICELAKE), .is_alpha_support = 1, .has_resource_streamer = 0, + .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, }; #undef GEN diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index abaca6edeb71..bfc906cd4e5e 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -209,6 +209,7 @@ #include "i915_oa_cflgt2.h" #include "i915_oa_cflgt3.h" #include "i915_oa_cnl.h" +#include "i915_oa_icl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1042,7 +1043,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, I915_WRITE(GEN7_OASTATUS2, ((head & GEN7_OASTATUS2_HEAD_MASK) | - OA_MEM_SELECT_GGTT)); + GEN7_OASTATUS2_MEM_SELECT_GGTT)); dev_priv->perf.oa.oa_buffer.head = head; spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags); @@ -1332,7 +1333,8 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv) /* Pre-DevBDW: OABUFFER must be set with counters off, * before OASTATUS1, but after OASTATUS2 */ - I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */ + I915_WRITE(GEN7_OASTATUS2, + gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */ dev_priv->perf.oa.oa_buffer.head = gtt_offset; I915_WRITE(GEN7_OABUFFER, gtt_offset); @@ -1392,7 +1394,7 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv) * bit." */ I915_WRITE(GEN8_OABUFFER, gtt_offset | - OABUFFER_SIZE_16M | OA_MEM_SELECT_GGTT); + OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); /* Mark that we need updated tail pointers to read from... */ @@ -1840,7 +1842,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, * be read back from automatically triggered reports, as part of the * RPT_ID field. */ - if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { + if (IS_GEN(dev_priv, 9, 11)) { I915_WRITE(GEN8_OA_DEBUG, _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); @@ -1870,7 +1872,6 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & ~GT_NOA_ENABLE)); - } static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) @@ -1885,6 +1886,13 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) static void gen7_oa_enable(struct drm_i915_private *dev_priv) { + struct i915_gem_context *ctx = + dev_priv->perf.oa.exclusive_stream->ctx; + u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; + bool periodic = dev_priv->perf.oa.periodic; + u32 period_exponent = dev_priv->perf.oa.period_exponent; + u32 report_format = dev_priv->perf.oa.oa_buffer.format; + /* * Reset buf pointers so we don't forward reports from before now. * @@ -1896,25 +1904,14 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv) */ gen7_init_oa_buffer(dev_priv); - if (dev_priv->perf.oa.exclusive_stream->enabled) { - struct i915_gem_context *ctx = - dev_priv->perf.oa.exclusive_stream->ctx; - u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; - - bool periodic = dev_priv->perf.oa.periodic; - u32 period_exponent = dev_priv->perf.oa.period_exponent; - u32 report_format = dev_priv->perf.oa.oa_buffer.format; - - I915_WRITE(GEN7_OACONTROL, - (ctx_id & GEN7_OACONTROL_CTX_MASK) | - (period_exponent << - GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | - (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | - (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | - (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | - GEN7_OACONTROL_ENABLE); - } else - I915_WRITE(GEN7_OACONTROL, 0); + I915_WRITE(GEN7_OACONTROL, + (ctx_id & GEN7_OACONTROL_CTX_MASK) | + (period_exponent << + GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | + (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | + (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | + (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | + GEN7_OACONTROL_ENABLE); } static void gen8_oa_enable(struct drm_i915_private *dev_priv) @@ -2099,13 +2096,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (stream->ctx) { ret = oa_get_render_ctx_id(stream); - if (ret) + if (ret) { + DRM_DEBUG("Invalid context id to filter with\n"); return ret; + } } ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); - if (ret) + if (ret) { + DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); goto err_config; + } /* PRM - observability performance counters: * @@ -2132,8 +2133,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, stream->oa_config); - if (ret) + if (ret) { + DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; + } stream->ops = &i915_oa_stream_ops; @@ -2745,7 +2748,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, props->ctx_handle = value; break; case DRM_I915_PERF_PROP_SAMPLE_OA: - props->sample_flags |= SAMPLE_OA_REPORT; + if (value) + props->sample_flags |= SAMPLE_OA_REPORT; break; case DRM_I915_PERF_PROP_OA_METRICS_SET: if (value == 0) { @@ -2935,6 +2939,8 @@ void i915_perf_register(struct drm_i915_private *dev_priv) i915_perf_load_test_config_cflgt3(dev_priv); } else if (IS_CANNONLAKE(dev_priv)) { i915_perf_load_test_config_cnl(dev_priv); + } else if (IS_ICELAKE(dev_priv)) { + i915_perf_load_test_config_icl(dev_priv); } if (dev_priv->perf.oa.test_config.id == 0) @@ -3292,6 +3298,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, mutex_unlock(&dev_priv->perf.metrics_lock); + DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); + return oa_config->id; sysfs_err: @@ -3348,6 +3356,9 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, &oa_config->sysfs_metric); idr_remove(&dev_priv->perf.metrics_idr, *arg); + + DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); + put_oa_config(dev_priv, oa_config); config_err: @@ -3467,7 +3478,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } - } else if (IS_GEN10(dev_priv)) { + } else if (IS_GEN(dev_priv, 10, 11)) { dev_priv->perf.oa.ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; dev_priv->perf.oa.ops.is_valid_mux_reg = diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..11fb76bd3860 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1,33 +1,12 @@ /* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * SPDX-License-Identifier: MIT * + * Copyright © 2017-2018 Intel Corporation */ -#include <linux/perf_event.h> -#include <linux/pm_runtime.h> - -#include "i915_drv.h" #include "i915_pmu.h" #include "intel_ringbuffer.h" +#include "i915_drv.h" /* Frequency for the sampling timer for events which need it. */ #define FREQUENCY 200 diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index aa1b1a987ea1..2ba735299f7c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -1,29 +1,19 @@ /* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * SPDX-License-Identifier: MIT * + * Copyright © 2017-2018 Intel Corporation */ + #ifndef __I915_PMU_H__ #define __I915_PMU_H__ +#include <linux/hrtimer.h> +#include <linux/perf_event.h> +#include <linux/spinlock_types.h> +#include <drm/i915_drm.h> + +struct drm_i915_private; + enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e6a8c0ee7df1..176dca6554f4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -153,9 +153,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _PLL(pll, a, b) ((a) + (pll)*((b)-(a))) #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) -#define _MMIO_PORT6(port, a, b, c, d, e, f) _MMIO(_PICK(port, a, b, c, d, e, f)) -#define _MMIO_PORT6_LN(port, ln, a0, a1, b, c, d, e, f) \ - _MMIO(_PICK(port, a0, b, c, d, e, f) + (ln * (a1 - a0))) #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) @@ -430,145 +427,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VGA_CR_INDEX_CGA 0x3d4 #define VGA_CR_DATA_CGA 0x3d5 -/* - * Instruction field definitions used by the command parser - */ -#define INSTR_CLIENT_SHIFT 29 -#define INSTR_MI_CLIENT 0x0 -#define INSTR_BC_CLIENT 0x2 -#define INSTR_RC_CLIENT 0x3 -#define INSTR_SUBCLIENT_SHIFT 27 -#define INSTR_SUBCLIENT_MASK 0x18000000 -#define INSTR_MEDIA_SUBCLIENT 0x2 -#define INSTR_26_TO_24_MASK 0x7000000 -#define INSTR_26_TO_24_SHIFT 24 - -/* - * Memory interface instructions used by the kernel - */ -#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) -/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */ -#define MI_GLOBAL_GTT (1<<22) - -#define MI_NOOP MI_INSTR(0, 0) -#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) -#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) -#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) -#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) -#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) -#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) -#define MI_FLUSH MI_INSTR(0x04, 0) -#define MI_READ_FLUSH (1 << 0) -#define MI_EXE_FLUSH (1 << 1) -#define MI_NO_WRITE_FLUSH (1 << 2) -#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ -#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ -#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ -#define MI_REPORT_HEAD MI_INSTR(0x07, 0) -#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) -#define MI_ARB_ENABLE (1<<0) -#define MI_ARB_DISABLE (0<<0) -#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) -#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) -#define MI_SUSPEND_FLUSH_EN (1<<0) -#define MI_SET_APPID MI_INSTR(0x0e, 0) -#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) -#define MI_OVERLAY_CONTINUE (0x0<<21) -#define MI_OVERLAY_ON (0x1<<21) -#define MI_OVERLAY_OFF (0x2<<21) -#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) -#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) -#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) -#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) -/* IVB has funny definitions for which plane to flip. */ -#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) -#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) -#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) -#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) -/* SKL ones */ -#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8) -#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8) -#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ -#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) -#define MI_SEMAPHORE_UPDATE (1<<21) -#define MI_SEMAPHORE_COMPARE (1<<20) -#define MI_SEMAPHORE_REGISTER (1<<18) -#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ -#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ -#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ -#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ -#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ -#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ -#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ -#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ -#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ -#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ -#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ -#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ -#define MI_SEMAPHORE_SYNC_INVALID (3<<16) -#define MI_SEMAPHORE_SYNC_MASK (3<<16) -#define MI_SET_CONTEXT MI_INSTR(0x18, 0) -#define MI_MM_SPACE_GTT (1<<8) -#define MI_MM_SPACE_PHYSICAL (0<<8) -#define MI_SAVE_EXT_STATE_EN (1<<3) -#define MI_RESTORE_EXT_STATE_EN (1<<2) -#define MI_FORCE_RESTORE (1<<1) -#define MI_RESTORE_INHIBIT (1<<0) -#define HSW_MI_RS_SAVE_STATE_EN (1<<3) -#define HSW_MI_RS_RESTORE_STATE_EN (1<<2) -#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ -#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) -#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ -#define MI_SEMAPHORE_POLL (1<<15) -#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) -#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) -#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) -#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ -#define MI_USE_GGTT (1 << 22) /* g4x+ */ -#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) -#define MI_STORE_DWORD_INDEX_SHIFT 2 -/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: - * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw - * simply ignores the register load under certain conditions. - * - One can actually load arbitrary many arbitrary registers: Simply issue x - * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! - */ -#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) -#define MI_LRI_FORCE_POSTED (1<<12) -#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) -#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) -#define MI_SRM_LRM_GLOBAL_GTT (1<<22) -#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ -#define MI_FLUSH_DW_STORE_INDEX (1<<21) -#define MI_INVALIDATE_TLB (1<<18) -#define MI_FLUSH_DW_OP_STOREDW (1<<14) -#define MI_FLUSH_DW_OP_MASK (3<<14) -#define MI_FLUSH_DW_NOTIFY (1<<8) -#define MI_INVALIDATE_BSD (1<<7) -#define MI_FLUSH_DW_USE_GTT (1<<2) -#define MI_FLUSH_DW_USE_PPGTT (0<<2) -#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) -#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) -#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) -#define MI_BATCH_NON_SECURE (1) -/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ -#define MI_BATCH_NON_SECURE_I965 (1<<8) -#define MI_BATCH_PPGTT_HSW (1<<8) -#define MI_BATCH_NON_SECURE_HSW (1<<13) -#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) -#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ -#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) -#define MI_BATCH_RESOURCE_STREAMER (1<<10) - #define MI_PREDICATE_SRC0 _MMIO(0x2400) #define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4) #define MI_PREDICATE_SRC1 _MMIO(0x2408) @@ -579,130 +437,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define LOWER_SLICE_DISABLED (0<<0) /* - * 3D instructions used by the kernel - */ -#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) - -#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) -#define GEN9_MEDIA_POOL_ENABLE (1 << 31) -#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) -#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) -#define SC_UPDATE_SCISSOR (0x1<<1) -#define SC_ENABLE_MASK (0x1<<0) -#define SC_ENABLE (0x1<<0) -#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) -#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) -#define SCI_YMIN_MASK (0xffff<<16) -#define SCI_XMIN_MASK (0xffff<<0) -#define SCI_YMAX_MASK (0xffff<<16) -#define SCI_XMAX_MASK (0xffff<<0) -#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) -#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) -#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) -#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) -#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) -#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) -#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) -#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) -#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) - -#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) -#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) -#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) -#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) -#define BLT_WRITE_A (2<<20) -#define BLT_WRITE_RGB (1<<20) -#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) -#define BLT_DEPTH_8 (0<<24) -#define BLT_DEPTH_16_565 (1<<24) -#define BLT_DEPTH_16_1555 (2<<24) -#define BLT_DEPTH_32 (3<<24) -#define BLT_ROP_SRC_COPY (0xcc<<16) -#define BLT_ROP_COLOR_COPY (0xf0<<16) -#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ -#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ -#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) -#define ASYNC_FLIP (1<<22) -#define DISPLAY_PLANE_A (0<<20) -#define DISPLAY_PLANE_B (1<<20) -#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) -#define PIPE_CONTROL_FLUSH_L3 (1<<27) -#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ -#define PIPE_CONTROL_MMIO_WRITE (1<<23) -#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) -#define PIPE_CONTROL_CS_STALL (1<<20) -#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) -#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) -#define PIPE_CONTROL_QW_WRITE (1<<14) -#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) -#define PIPE_CONTROL_DEPTH_STALL (1<<13) -#define PIPE_CONTROL_WRITE_FLUSH (1<<12) -#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ -#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ -#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ -#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) -#define PIPE_CONTROL_NOTIFY (1<<8) -#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ -#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) -#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) -#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) -#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) -#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) -#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) -#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ - -/* - * Commands used only by the command parser - */ -#define MI_SET_PREDICATE MI_INSTR(0x01, 0) -#define MI_ARB_CHECK MI_INSTR(0x05, 0) -#define MI_RS_CONTROL MI_INSTR(0x06, 0) -#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0) -#define MI_PREDICATE MI_INSTR(0x0C, 0) -#define MI_RS_CONTEXT MI_INSTR(0x0F, 0) -#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0) -#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0) -#define MI_URB_CLEAR MI_INSTR(0x19, 0) -#define MI_UPDATE_GTT MI_INSTR(0x23, 0) -#define MI_CLFLUSH MI_INSTR(0x27, 0) -#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) -#define MI_REPORT_PERF_COUNT_GGTT (1<<0) -#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) -#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) -#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) -#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) -#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0) - -#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16)) -#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16)) -#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16)) -#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18) -#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16)) -#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16)) -#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16)) -#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16)) -#define GFX_OP_3DSTATE_SO_DECL_LIST \ - ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16)) - -#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16)) -#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16)) -#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16)) -#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16)) -#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \ - ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16)) - -#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16)) - -#define COLOR_BLT ((0x2<<29)|(0x40<<22)) -#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) - -/* * Registers used only by the command parser */ #define BCS_SWCTRL _MMIO(0x22200) @@ -802,6 +536,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_OABUFFER_UDW _MMIO(0x23b4) #define GEN8_OABUFFER _MMIO(0x2b14) +#define GEN8_OABUFFER_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN7_OASTATUS1 _MMIO(0x2364) #define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 @@ -810,7 +545,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_OASTATUS1_REPORT_LOST (1<<0) #define GEN7_OASTATUS2 _MMIO(0x2368) -#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 +#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 +#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN8_OASTATUS _MMIO(0x2b08) #define GEN8_OASTATUS_OVERRUN_STATUS (1<<3) @@ -832,8 +568,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OABUFFER_SIZE_8M (6<<3) #define OABUFFER_SIZE_16M (7<<3) -#define OA_MEM_SELECT_GGTT (1<<0) - /* * Flexible, Aggregate EU Counter Registers. * Note: these aren't contiguous @@ -1127,6 +861,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (1 << GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) #define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 0 #define GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 1 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT 3 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK (0x7 << GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT) +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ 0 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ 1 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ 2 +#define GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ 3 #define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT 1 #define GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK (0x3 << GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT) @@ -1948,79 +1688,100 @@ enum i915_power_well_id { #define _CNL_PORT_PCS_DW1_LN0_C 0x162C04 #define _CNL_PORT_PCS_DW1_LN0_D 0x162E04 #define _CNL_PORT_PCS_DW1_LN0_F 0x162804 -#define CNL_PORT_PCS_DW1_GRP(port) _MMIO_PORT6(port, \ +#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_B, \ _CNL_PORT_PCS_DW1_GRP_C, \ _CNL_PORT_PCS_DW1_GRP_D, \ _CNL_PORT_PCS_DW1_GRP_AE, \ - _CNL_PORT_PCS_DW1_GRP_F) -#define CNL_PORT_PCS_DW1_LN0(port) _MMIO_PORT6(port, \ + _CNL_PORT_PCS_DW1_GRP_F)) + +#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_B, \ _CNL_PORT_PCS_DW1_LN0_C, \ _CNL_PORT_PCS_DW1_LN0_D, \ _CNL_PORT_PCS_DW1_LN0_AE, \ - _CNL_PORT_PCS_DW1_LN0_F) + _CNL_PORT_PCS_DW1_LN0_F)) +#define _ICL_PORT_PCS_DW1_GRP_A 0x162604 +#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 +#define _ICL_PORT_PCS_DW1_LN0_A 0x162804 +#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 +#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ + _ICL_PORT_PCS_DW1_GRP_A, \ + _ICL_PORT_PCS_DW1_GRP_B) +#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ + _ICL_PORT_PCS_DW1_LN0_A, \ + _ICL_PORT_PCS_DW1_LN0_B) #define COMMON_KEEPER_EN (1 << 26) -#define _CNL_PORT_TX_DW2_GRP_AE 0x162348 -#define _CNL_PORT_TX_DW2_GRP_B 0x1623C8 -#define _CNL_PORT_TX_DW2_GRP_C 0x162B48 -#define _CNL_PORT_TX_DW2_GRP_D 0x162BC8 -#define _CNL_PORT_TX_DW2_GRP_F 0x162A48 -#define _CNL_PORT_TX_DW2_LN0_AE 0x162448 -#define _CNL_PORT_TX_DW2_LN0_B 0x162648 -#define _CNL_PORT_TX_DW2_LN0_C 0x162C48 -#define _CNL_PORT_TX_DW2_LN0_D 0x162E48 -#define _CNL_PORT_TX_DW2_LN0_F 0x162848 -#define CNL_PORT_TX_DW2_GRP(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW2_GRP_AE, \ - _CNL_PORT_TX_DW2_GRP_B, \ - _CNL_PORT_TX_DW2_GRP_C, \ - _CNL_PORT_TX_DW2_GRP_D, \ - _CNL_PORT_TX_DW2_GRP_AE, \ - _CNL_PORT_TX_DW2_GRP_F) -#define CNL_PORT_TX_DW2_LN0(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW2_LN0_AE, \ - _CNL_PORT_TX_DW2_LN0_B, \ - _CNL_PORT_TX_DW2_LN0_C, \ - _CNL_PORT_TX_DW2_LN0_D, \ - _CNL_PORT_TX_DW2_LN0_AE, \ - _CNL_PORT_TX_DW2_LN0_F) -#define SWING_SEL_UPPER(x) ((x >> 3) << 15) +/* CNL Port TX registers */ +#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 +#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 +#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 +#define _CNL_PORT_TX_D_GRP_OFFSET 0x162BC0 +#define _CNL_PORT_TX_F_GRP_OFFSET 0x162A40 +#define _CNL_PORT_TX_AE_LN0_OFFSET 0x162440 +#define _CNL_PORT_TX_B_LN0_OFFSET 0x162640 +#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 +#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 +#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 +#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ + _CNL_PORT_TX_AE_GRP_OFFSET, \ + _CNL_PORT_TX_B_GRP_OFFSET, \ + _CNL_PORT_TX_B_GRP_OFFSET, \ + _CNL_PORT_TX_D_GRP_OFFSET, \ + _CNL_PORT_TX_AE_GRP_OFFSET, \ + _CNL_PORT_TX_F_GRP_OFFSET) + \ + 4*(dw)) +#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ + _CNL_PORT_TX_AE_LN0_OFFSET, \ + _CNL_PORT_TX_B_LN0_OFFSET, \ + _CNL_PORT_TX_B_LN0_OFFSET, \ + _CNL_PORT_TX_D_LN0_OFFSET, \ + _CNL_PORT_TX_AE_LN0_OFFSET, \ + _CNL_PORT_TX_F_LN0_OFFSET) + \ + 4*(dw)) + +#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) +#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) +#define _ICL_PORT_TX_DW2_GRP_A 0x162688 +#define _ICL_PORT_TX_DW2_GRP_B 0x6C688 +#define _ICL_PORT_TX_DW2_LN0_A 0x162888 +#define _ICL_PORT_TX_DW2_LN0_B 0x6C888 +#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW2_GRP_A, \ + _ICL_PORT_TX_DW2_GRP_B) +#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW2_LN0_A, \ + _ICL_PORT_TX_DW2_LN0_B) +#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) -#define SWING_SEL_LOWER(x) ((x & 0x7) << 11) +#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) #define SWING_SEL_LOWER_MASK (0x7 << 11) #define RCOMP_SCALAR(x) ((x) << 0) #define RCOMP_SCALAR_MASK (0xFF << 0) -#define _CNL_PORT_TX_DW4_GRP_AE 0x162350 -#define _CNL_PORT_TX_DW4_GRP_B 0x1623D0 -#define _CNL_PORT_TX_DW4_GRP_C 0x162B50 -#define _CNL_PORT_TX_DW4_GRP_D 0x162BD0 -#define _CNL_PORT_TX_DW4_GRP_F 0x162A50 #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 -#define _CNL_PORT_TX_DW4_LN0_B 0x162650 -#define _CNL_PORT_TX_DW4_LN0_C 0x162C50 -#define _CNL_PORT_TX_DW4_LN0_D 0x162E50 -#define _CNL_PORT_TX_DW4_LN0_F 0x162850 -#define CNL_PORT_TX_DW4_GRP(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW4_GRP_AE, \ - _CNL_PORT_TX_DW4_GRP_B, \ - _CNL_PORT_TX_DW4_GRP_C, \ - _CNL_PORT_TX_DW4_GRP_D, \ - _CNL_PORT_TX_DW4_GRP_AE, \ - _CNL_PORT_TX_DW4_GRP_F) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO_PORT6_LN(port, ln, \ - _CNL_PORT_TX_DW4_LN0_AE, \ - _CNL_PORT_TX_DW4_LN1_AE, \ - _CNL_PORT_TX_DW4_LN0_B, \ - _CNL_PORT_TX_DW4_LN0_C, \ - _CNL_PORT_TX_DW4_LN0_D, \ - _CNL_PORT_TX_DW4_LN0_AE, \ - _CNL_PORT_TX_DW4_LN0_F) +#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) +#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) +#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ + (ln * (_CNL_PORT_TX_DW4_LN1_AE - \ + _CNL_PORT_TX_DW4_LN0_AE))) +#define _ICL_PORT_TX_DW4_GRP_A 0x162690 +#define _ICL_PORT_TX_DW4_GRP_B 0x6C690 +#define _ICL_PORT_TX_DW4_LN0_A 0x162890 +#define _ICL_PORT_TX_DW4_LN1_A 0x162990 +#define _ICL_PORT_TX_DW4_LN0_B 0x6C890 +#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW4_GRP_A, \ + _ICL_PORT_TX_DW4_GRP_B) +#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \ + _ICL_PORT_TX_DW4_LN0_A, \ + _ICL_PORT_TX_DW4_LN0_B) + \ + (ln * (_ICL_PORT_TX_DW4_LN1_A - \ + _ICL_PORT_TX_DW4_LN0_A))) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -2029,64 +1790,147 @@ enum i915_power_well_id { #define CURSOR_COEFF(x) ((x) << 0) #define CURSOR_COEFF_MASK (0x3F << 0) -#define _CNL_PORT_TX_DW5_GRP_AE 0x162354 -#define _CNL_PORT_TX_DW5_GRP_B 0x1623D4 -#define _CNL_PORT_TX_DW5_GRP_C 0x162B54 -#define _CNL_PORT_TX_DW5_GRP_D 0x162BD4 -#define _CNL_PORT_TX_DW5_GRP_F 0x162A54 -#define _CNL_PORT_TX_DW5_LN0_AE 0x162454 -#define _CNL_PORT_TX_DW5_LN0_B 0x162654 -#define _CNL_PORT_TX_DW5_LN0_C 0x162C54 -#define _CNL_PORT_TX_DW5_LN0_D 0x162E54 -#define _CNL_PORT_TX_DW5_LN0_F 0x162854 -#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW5_GRP_AE, \ - _CNL_PORT_TX_DW5_GRP_B, \ - _CNL_PORT_TX_DW5_GRP_C, \ - _CNL_PORT_TX_DW5_GRP_D, \ - _CNL_PORT_TX_DW5_GRP_AE, \ - _CNL_PORT_TX_DW5_GRP_F) -#define CNL_PORT_TX_DW5_LN0(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW5_LN0_AE, \ - _CNL_PORT_TX_DW5_LN0_B, \ - _CNL_PORT_TX_DW5_LN0_C, \ - _CNL_PORT_TX_DW5_LN0_D, \ - _CNL_PORT_TX_DW5_LN0_AE, \ - _CNL_PORT_TX_DW5_LN0_F) +#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5)) +#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5)) +#define _ICL_PORT_TX_DW5_GRP_A 0x162694 +#define _ICL_PORT_TX_DW5_GRP_B 0x6C694 +#define _ICL_PORT_TX_DW5_LN0_A 0x162894 +#define _ICL_PORT_TX_DW5_LN0_B 0x6C894 +#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW5_GRP_A, \ + _ICL_PORT_TX_DW5_GRP_B) +#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW5_LN0_A, \ + _ICL_PORT_TX_DW5_LN0_B) #define TX_TRAINING_EN (1 << 31) +#define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) #define SCALING_MODE_SEL(x) ((x) << 18) #define SCALING_MODE_SEL_MASK (0x7 << 18) #define RTERM_SELECT(x) ((x) << 3) #define RTERM_SELECT_MASK (0x7 << 3) -#define _CNL_PORT_TX_DW7_GRP_AE 0x16235C -#define _CNL_PORT_TX_DW7_GRP_B 0x1623DC -#define _CNL_PORT_TX_DW7_GRP_C 0x162B5C -#define _CNL_PORT_TX_DW7_GRP_D 0x162BDC -#define _CNL_PORT_TX_DW7_GRP_F 0x162A5C -#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C -#define _CNL_PORT_TX_DW7_LN0_B 0x16265C -#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C -#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C -#define _CNL_PORT_TX_DW7_LN0_F 0x16285C -#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW7_GRP_AE, \ - _CNL_PORT_TX_DW7_GRP_B, \ - _CNL_PORT_TX_DW7_GRP_C, \ - _CNL_PORT_TX_DW7_GRP_D, \ - _CNL_PORT_TX_DW7_GRP_AE, \ - _CNL_PORT_TX_DW7_GRP_F) -#define CNL_PORT_TX_DW7_LN0(port) _MMIO_PORT6(port, \ - _CNL_PORT_TX_DW7_LN0_AE, \ - _CNL_PORT_TX_DW7_LN0_B, \ - _CNL_PORT_TX_DW7_LN0_C, \ - _CNL_PORT_TX_DW7_LN0_D, \ - _CNL_PORT_TX_DW7_LN0_AE, \ - _CNL_PORT_TX_DW7_LN0_F) +#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) +#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) +#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ + _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) + +#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C +#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C +#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1) + +#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC +#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC +#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define CRI_USE_FS32 (1 << 5) + +#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C +#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C +#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1) + +#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC +#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC +#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define CRI_CALCINIT (1 << 1) + +#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 +#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 +#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 +#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 +#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 +#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 +#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 +#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 +#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1) + +#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 +#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 +#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) +#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) + +#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1 0x168144 +#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1 0x168544 +#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2 0x169144 +#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2 0x169544 +#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3 0x16A144 +#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3 0x16A544 +#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4 0x16B144 +#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4 0x16B544 +#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \ + _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \ + _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1) + +#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 +#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 +#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \ + _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \ + _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \ + _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) +#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) +#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) +#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) +#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) + /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. */ @@ -2473,6 +2317,10 @@ enum i915_power_well_id { #define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3) #define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24) #define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3) +#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27) +#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) +#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) +#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) #define RING_IPEIR(base) _MMIO((base)+0x64) #define RING_IPEHR(base) _MMIO((base)+0x68) /* @@ -2867,6 +2715,19 @@ enum i915_power_well_id { #define GEN10_EU_DISABLE3 _MMIO(0x9140) #define GEN10_EU_DIS_SS_MASK 0xff +#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) +#define GEN11_GT_VDBOX_DISABLE_MASK 0xff +#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 +#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) + +#define GEN11_EU_DISABLE _MMIO(0x9134) +#define GEN11_EU_DIS_MASK 0xFF + +#define GEN11_GT_SLICE_ENABLE _MMIO(0x9138) +#define GEN11_GT_S_ENA_MASK 0xFF + +#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C) + #define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) @@ -4151,6 +4012,12 @@ enum { #define EDP_PSR_IDLE_FRAME_SHIFT 0 #define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) +#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) +#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20) +#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16) +#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11) +#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff) + #define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) @@ -4180,17 +4047,19 @@ enum { #define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) #define EDP_PSR_PERF_CNT_MASK 0xffffff -#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) +#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */ #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28) #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) #define EDP_PSR_DEBUG_MASK_HPD (1<<25) #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16) -#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) +#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */ #define EDP_PSR2_CTL _MMIO(0x6f900) #define EDP_PSR2_ENABLE (1<<31) #define EDP_SU_TRACK_ENABLE (1<<30) +#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */ +#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */ #define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) #define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20) #define EDP_PSR2_TP2_TIME_500 (0<<8) @@ -4200,8 +4069,9 @@ enum { #define EDP_PSR2_TP2_TIME_MASK (3<<8) #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) -#define EDP_PSR2_IDLE_MASK 0xf #define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4) +#define EDP_PSR2_IDLE_FRAME_MASK 0xf +#define EDP_PSR2_IDLE_FRAME_SHIFT 0 #define EDP_PSR2_STATUS _MMIO(0x6f940) #define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) @@ -5265,8 +5135,6 @@ enum { #define DP_LINK_TRAIN_OFF (3 << 28) #define DP_LINK_TRAIN_MASK (3 << 28) #define DP_LINK_TRAIN_SHIFT 28 -#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14) -#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14)) /* CPT Link training mode */ #define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) @@ -6009,6 +5877,7 @@ enum { #define CURSIZE _MMIO(0x700a0) /* 845/865 */ #define _CUR_FBC_CTL_A 0x700a0 /* ivb+ */ #define CUR_FBC_CTL_EN (1 << 31) +#define _CURASURFLIVE 0x700ac /* g4x+ */ #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 #define _CURBPOS 0x700c8 @@ -6025,6 +5894,7 @@ enum { #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) #define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A) +#define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE) #define CURSOR_A_OFFSET 0x70080 #define CURSOR_B_OFFSET 0x700c0 @@ -7197,6 +7067,7 @@ enum { #define CHICKEN_TRANS_A 0x420c0 #define CHICKEN_TRANS_B 0x420c4 #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) +#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */ #define DDI_TRAINING_OVERRIDE_ENABLE (1<<19) #define DDI_TRAINING_OVERRIDE_VALUE (1<<18) #define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 282f57630cc1..585242831974 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -59,11 +59,7 @@ static bool i915_fence_signaled(struct dma_fence *fence) static bool i915_fence_enable_signaling(struct dma_fence *fence) { - if (i915_fence_signaled(fence)) - return false; - - intel_engine_enable_signaling(to_request(fence), true); - return !i915_fence_signaled(fence); + return intel_engine_enable_signaling(to_request(fence), true); } static signed long i915_fence_wait(struct dma_fence *fence, @@ -211,11 +207,19 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) if (ret) return ret; + GEM_BUG_ON(i915->gt.active_requests); + /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ for_each_engine(engine, i915, id) { struct i915_gem_timeline *timeline; struct intel_timeline *tl = engine->timeline; + GEM_TRACE("%s seqno %d (current %d) -> %d\n", + engine->name, + tl->seqno, + intel_engine_get_seqno(engine), + seqno); + if (!i915_seqno_passed(seqno, tl->seqno)) { /* Flush any waiters before we reuse the seqno */ intel_engine_disarm_breadcrumbs(engine); @@ -358,7 +362,7 @@ static void advance_ring(struct i915_request *request) * is just about to be. Either works, if we miss the last two * noops - they are safe to be replayed on a reset. */ - tail = READ_ONCE(request->ring->tail); + tail = READ_ONCE(request->tail); } else { tail = request->postfix; } @@ -385,6 +389,12 @@ static void i915_request_retire(struct i915_request *request) struct intel_engine_cs *engine = request->engine; struct i915_gem_active *active, *next; + GEM_TRACE("%s fence %llx:%d, global_seqno %d, current %d\n", + engine->name, + request->fence.context, request->fence.seqno, + request->global_seqno, + intel_engine_get_seqno(engine)); + lockdep_assert_held(&request->i915->drm.struct_mutex); GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); GEM_BUG_ON(!i915_request_completed(request)); @@ -486,21 +496,34 @@ static u32 timeline_get_seqno(struct intel_timeline *tl) return ++tl->seqno; } +static void move_to_timeline(struct i915_request *request, + struct intel_timeline *timeline) +{ + GEM_BUG_ON(request->timeline == request->engine->timeline); + lockdep_assert_held(&request->engine->timeline->lock); + + spin_lock(&request->timeline->lock); + list_move_tail(&request->link, &timeline->requests); + spin_unlock(&request->timeline->lock); +} + void __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct intel_timeline *timeline; u32 seqno; + GEM_TRACE("%s fence %llx:%d -> global_seqno %d, current %d\n", + engine->name, + request->fence.context, request->fence.seqno, + engine->timeline->seqno + 1, + intel_engine_get_seqno(engine)); + GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline->lock); - /* Transfer from per-context onto the global per-engine timeline */ - timeline = engine->timeline; - GEM_BUG_ON(timeline == request->timeline); GEM_BUG_ON(request->global_seqno); - seqno = timeline_get_seqno(timeline); + seqno = timeline_get_seqno(engine->timeline); GEM_BUG_ON(!seqno); GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); @@ -514,9 +537,8 @@ void __i915_request_submit(struct i915_request *request) engine->emit_breadcrumb(request, request->ring->vaddr + request->postfix); - spin_lock(&request->timeline->lock); - list_move_tail(&request->link, &timeline->requests); - spin_unlock(&request->timeline->lock); + /* Transfer from per-context onto the global per-engine timeline */ + move_to_timeline(request, engine->timeline); trace_i915_request_execute(request); @@ -539,7 +561,12 @@ void i915_request_submit(struct i915_request *request) void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct intel_timeline *timeline; + + GEM_TRACE("%s fence %llx:%d <- global_seqno %d, current %d\n", + engine->name, + request->fence.context, request->fence.seqno, + request->global_seqno, + intel_engine_get_seqno(engine)); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline->lock); @@ -562,12 +589,7 @@ void __i915_request_unsubmit(struct i915_request *request) spin_unlock(&request->lock); /* Transfer back from the global per-engine timeline to per-context */ - timeline = request->timeline; - GEM_BUG_ON(timeline == engine->timeline); - - spin_lock(&timeline->lock); - list_move(&request->link, &timeline->requests); - spin_unlock(&timeline->lock); + move_to_timeline(request, request->timeline); /* * We don't need to wake_up any waiters on request->execute, they @@ -1000,6 +1022,9 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) u32 *cs; int err; + GEM_TRACE("%s fence %llx:%d\n", + engine->name, request->fence.context, request->fence.seqno); + lockdep_assert_held(&request->i915->drm.struct_mutex); trace_i915_request_add(request); @@ -1210,7 +1235,7 @@ static bool __i915_wait_request_check_and_reset(struct i915_request *request) return false; __set_current_state(TASK_RUNNING); - i915_reset(request->i915, 0); + i915_reset(request->i915); return true; } diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 51dbfe5bb418..0695717522ea 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -40,8 +40,8 @@ #undef WARN_ON_ONCE #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") -#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ - (long)(x), __func__) +#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ + __stringify(x), (long)(x)) #if GCC_VERSION >= 70000 #define add_overflows(A, B) \ diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 1f79e7a47433..671a6d61e29d 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -730,10 +730,11 @@ static void insert_signal(struct intel_breadcrumbs *b, list_add(&request->signaling.link, &iter->signaling.link); } -void intel_engine_enable_signaling(struct i915_request *request, bool wakeup) +bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup) { struct intel_engine_cs *engine = request->engine; struct intel_breadcrumbs *b = &engine->breadcrumbs; + struct intel_wait *wait = &request->signaling.wait; u32 seqno; /* @@ -750,12 +751,12 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup) seqno = i915_request_global_seqno(request); if (!seqno) /* will be enabled later upon execution */ - return; + return true; - GEM_BUG_ON(request->signaling.wait.seqno); - request->signaling.wait.tsk = b->signaler; - request->signaling.wait.request = request; - request->signaling.wait.seqno = seqno; + GEM_BUG_ON(wait->seqno); + wait->tsk = b->signaler; + wait->request = request; + wait->seqno = seqno; /* * Add ourselves into the list of waiters, but registering our @@ -768,11 +769,15 @@ void intel_engine_enable_signaling(struct i915_request *request, bool wakeup) */ spin_lock(&b->rb_lock); insert_signal(b, request, seqno); - wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait); + wakeup &= __intel_engine_add_wait(engine, wait); spin_unlock(&b->rb_lock); - if (wakeup) + if (wakeup) { wake_up_process(b->signaler); + return !intel_wait_complete(wait); + } + + return true; } void intel_engine_cancel_signaling(struct i915_request *request) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8c2d778560f0..a6672a9abd85 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -493,6 +493,125 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ }; +struct icl_combo_phy_ddi_buf_trans { + u32 dw2_swing_select; + u32 dw2_swing_scalar; + u32 dw4_scaling; +}; + +/* Voltage Swing Programming for VccIO 0.85V for DP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { + /* Voltage mV db */ + { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ + { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ + { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ + { 0x2, 0x98, 0x900F }, /* 400 9.5 */ + { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ + { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ + { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ + { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ + { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ + { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +}; + +/* FIXME - After table is updated in Bspec */ +/* Voltage Swing Programming for VccIO 0.85V for eDP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { + /* Voltage mV db */ + { 0x0, 0x00, 0x00 }, /* 200 0.0 */ + { 0x0, 0x00, 0x00 }, /* 200 1.5 */ + { 0x0, 0x00, 0x00 }, /* 200 4.0 */ + { 0x0, 0x00, 0x00 }, /* 200 6.0 */ + { 0x0, 0x00, 0x00 }, /* 250 0.0 */ + { 0x0, 0x00, 0x00 }, /* 250 1.5 */ + { 0x0, 0x00, 0x00 }, /* 250 4.0 */ + { 0x0, 0x00, 0x00 }, /* 300 0.0 */ + { 0x0, 0x00, 0x00 }, /* 300 1.5 */ + { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for DP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { + /* Voltage mV db */ + { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ + { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ + { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ + { 0x2, 0x98, 0x900F }, /* 400 9.5 */ + { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ + { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ + { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ + { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ + { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ + { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +}; + +/* FIXME - After table is updated in Bspec */ +/* Voltage Swing Programming for VccIO 0.95V for eDP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { + /* Voltage mV db */ + { 0x0, 0x00, 0x00 }, /* 200 0.0 */ + { 0x0, 0x00, 0x00 }, /* 200 1.5 */ + { 0x0, 0x00, 0x00 }, /* 200 4.0 */ + { 0x0, 0x00, 0x00 }, /* 200 6.0 */ + { 0x0, 0x00, 0x00 }, /* 250 0.0 */ + { 0x0, 0x00, 0x00 }, /* 250 1.5 */ + { 0x0, 0x00, 0x00 }, /* 250 4.0 */ + { 0x0, 0x00, 0x00 }, /* 300 0.0 */ + { 0x0, 0x00, 0x00 }, /* 300 1.5 */ + { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for DP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { + /* Voltage mV db */ + { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ + { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ + { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ + { 0x2, 0x98, 0x900F }, /* 400 9.5 */ + { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ + { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ + { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ + { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ + { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ + { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ +}; + +/* FIXME - After table is updated in Bspec */ +/* Voltage Swing Programming for VccIO 1.05V for eDP */ +static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { + /* Voltage mV db */ + { 0x0, 0x00, 0x00 }, /* 200 0.0 */ + { 0x0, 0x00, 0x00 }, /* 200 1.5 */ + { 0x0, 0x00, 0x00 }, /* 200 4.0 */ + { 0x0, 0x00, 0x00 }, /* 200 6.0 */ + { 0x0, 0x00, 0x00 }, /* 250 0.0 */ + { 0x0, 0x00, 0x00 }, /* 250 1.5 */ + { 0x0, 0x00, 0x00 }, /* 250 4.0 */ + { 0x0, 0x00, 0x00 }, /* 300 0.0 */ + { 0x0, 0x00, 0x00 }, /* 300 1.5 */ + { 0x0, 0x00, 0x00 }, /* 350 0.0 */ +}; + +struct icl_mg_phy_ddi_buf_trans { + u32 cri_txdeemph_override_5_0; + u32 cri_txdeemph_override_11_6; + u32 cri_txdeemph_override_17_12; +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = { + /* Voltage swing pre-emphasis */ + { 0x0, 0x1B, 0x00 }, /* 0 0 */ + { 0x0, 0x23, 0x08 }, /* 0 1 */ + { 0x0, 0x2D, 0x12 }, /* 0 2 */ + { 0x0, 0x00, 0x00 }, /* 0 3 */ + { 0x0, 0x23, 0x00 }, /* 1 0 */ + { 0x0, 0x2B, 0x09 }, /* 1 1 */ + { 0x0, 0x2E, 0x11 }, /* 1 2 */ + { 0x0, 0x2F, 0x00 }, /* 2 0 */ + { 0x0, 0x33, 0x0C }, /* 2 1 */ + { 0x0, 0x00, 0x00 }, /* 3 0 */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -875,7 +994,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { - switch (pll->id) { + switch (pll->info->id) { case DPLL_ID_WRPLL1: return PORT_CLK_SEL_WRPLL1; case DPLL_ID_WRPLL2: @@ -889,7 +1008,7 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) case DPLL_ID_LCPLL_2700: return PORT_CLK_SEL_LCPLL_2700; default: - MISSING_CASE(pll->id); + MISSING_CASE(pll->info->id); return PORT_CLK_SEL_NONE; } } @@ -2131,7 +2250,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ val = I915_READ(DPCLKA_CFGCR0); val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->id, port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); I915_WRITE(DPCLKA_CFGCR0, val); /* @@ -2148,7 +2267,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) | DPLL_CTRL2_DDI_CLK_SEL_MASK(port)); - val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) | + val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); I915_WRITE(DPLL_CTRL2, val); @@ -2424,12 +2543,14 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; - intel_hdmi_handle_sink_scrambling(encoder, - conn_state->connector, - crtc_state->hdmi_high_tmds_clock_ratio, - crtc_state->hdmi_scrambling); + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, + crtc_state->hdmi_high_tmds_clock_ratio, + crtc_state->hdmi_scrambling)) + DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); /* Display WA #1143: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) { @@ -2520,13 +2641,16 @@ static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct drm_connector *connector = old_conn_state->connector; + if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); - intel_hdmi_handle_sink_scrambling(encoder, - old_conn_state->connector, - false, false); + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, + false, false)) + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); } static void intel_disable_ddi(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 3dd350f7b8e6..a32ba72c514e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -83,11 +83,11 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) { int s; - drm_printf(p, "slice mask: %04x\n", sseu->slice_mask); - drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask)); + drm_printf(p, "slice total: %u, mask=%04x\n", + hweight8(sseu->slice_mask), sseu->slice_mask); drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); - for (s = 0; s < ARRAY_SIZE(sseu->subslice_mask); s++) { - drm_printf(p, "slice%d %u subslices mask=%04x\n", + for (s = 0; s < sseu->max_slices; s++) { + drm_printf(p, "slice%d: %u subslices, mask=%04x\n", s, hweight8(sseu->subslice_mask[s]), sseu->subslice_mask[s]); } @@ -158,6 +158,45 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu) return total; } +static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + u8 s_en; + u32 ss_en, ss_en_mask; + u8 eu_en; + int s; + + sseu->max_slices = 1; + sseu->max_subslices = 8; + sseu->max_eus_per_subslice = 8; + + s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; + ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); + ss_en_mask = BIT(sseu->max_subslices) - 1; + eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); + + for (s = 0; s < sseu->max_slices; s++) { + if (s_en & BIT(s)) { + int ss_idx = sseu->max_subslices * s; + int ss; + + sseu->slice_mask |= BIT(s); + sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; + for (ss = 0; ss < sseu->max_subslices; ss++) { + if (sseu->subslice_mask[s] & BIT(ss)) + sseu_set_eus(sseu, s, ss, eu_en); + } + } + } + sseu->eu_per_subslice = hweight8(eu_en); + sseu->eu_total = compute_eu_total(sseu); + + /* ICL has no power gating restrictions. */ + sseu->has_slice_pg = 1; + sseu->has_subslice_pg = 1; + sseu->has_eu_pg = 1; +} + static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; @@ -557,6 +596,52 @@ static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) return base_freq + frac_freq; } +static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv, + u32 rpm_config_reg) +{ + u32 f19_2_mhz = 19200; + u32 f24_mhz = 24000; + u32 crystal_clock = (rpm_config_reg & + GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> + GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; + + switch (crystal_clock) { + case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: + return f19_2_mhz; + case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: + return f24_mhz; + default: + MISSING_CASE(crystal_clock); + return 0; + } +} + +static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv, + u32 rpm_config_reg) +{ + u32 f19_2_mhz = 19200; + u32 f24_mhz = 24000; + u32 f25_mhz = 25000; + u32 f38_4_mhz = 38400; + u32 crystal_clock = (rpm_config_reg & + GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> + GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; + + switch (crystal_clock) { + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: + return f24_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: + return f19_2_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: + return f38_4_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: + return f25_mhz; + default: + MISSING_CASE(crystal_clock); + return 0; + } +} + static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) { u32 f12_5_mhz = 12500; @@ -597,10 +682,9 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) } return freq; - } else if (INTEL_GEN(dev_priv) <= 10) { + } else if (INTEL_GEN(dev_priv) <= 11) { u32 ctc_reg = I915_READ(CTC_MODE); u32 freq = 0; - u32 rpm_config_reg = 0; /* First figure out the reference frequency. There are 2 ways * we can compute the frequency, either through the @@ -610,20 +694,14 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { freq = read_reference_ts_freq(dev_priv); } else { - u32 crystal_clock; - - rpm_config_reg = I915_READ(RPM_CONFIG0); - crystal_clock = (rpm_config_reg & - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - switch (crystal_clock) { - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - freq = f19_2_mhz; - break; - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - freq = f24_mhz; - break; - } + u32 rpm_config_reg = I915_READ(RPM_CONFIG0); + + if (INTEL_GEN(dev_priv) <= 10) + freq = gen10_get_crystal_clock_freq(dev_priv, + rpm_config_reg); + else + freq = gen11_get_crystal_clock_freq(dev_priv, + rpm_config_reg); /* Now figure out how the command stream's timestamp * register increments from this frequency (it might @@ -768,8 +846,10 @@ void intel_device_info_runtime_init(struct intel_device_info *info) broadwell_sseu_info_init(dev_priv); else if (INTEL_GEN(dev_priv) == 9) gen9_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) >= 10) + else if (INTEL_GEN(dev_priv) == 10) gen10_sseu_info_init(dev_priv); + else if (INTEL_INFO(dev_priv)->gen >= 11) + gen11_sseu_info_init(dev_priv); /* Initialize command stream timestamp frequency */ info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); @@ -780,3 +860,50 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, { drm_printf(p, "scheduler: %x\n", caps->scheduler); } + +/* + * Determine which engines are fused off in our particular hardware. Since the + * fuse register is in the blitter powerwell, we need forcewake to be ready at + * this point (but later we need to prune the forcewake domains for engines that + * are indeed fused off). + */ +void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) +{ + struct intel_device_info *info = mkwrite_device_info(dev_priv); + u8 vdbox_disable, vebox_disable; + u32 media_fuse; + int i; + + if (INTEL_GEN(dev_priv) < 11) + return; + + media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); + + vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; + + DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); + for (i = 0; i < I915_MAX_VCS; i++) { + if (!HAS_ENGINE(dev_priv, _VCS(i))) + continue; + + if (!(BIT(i) & vdbox_disable)) + continue; + + info->ring_mask &= ~ENGINE_MASK(_VCS(i)); + DRM_DEBUG_DRIVER("vcs%u fused off\n", i); + } + + DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); + for (i = 0; i < I915_MAX_VECS; i++) { + if (!HAS_ENGINE(dev_priv, _VECS(i))) + continue; + + if (!(BIT(i) & vebox_disable)) + continue; + + info->ring_mask &= ~ENGINE_MASK(_VECS(i)); + DRM_DEBUG_DRIVER("vecs%u fused off\n", i); + } +} diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 0835752c8b22..933e31669557 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -114,7 +114,7 @@ enum intel_platform { func(has_ipc); #define GEN_MAX_SLICES (6) /* CNL upper bound */ -#define GEN_MAX_SUBSLICES (7) +#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ struct sseu_dev_info { u8 slice_mask; @@ -247,6 +247,8 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info, void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, struct drm_printer *p); +void intel_device_info_init_mmio(struct drm_i915_private *dev_priv); + void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b48fd2561fe..3acd75753a31 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2858,6 +2858,9 @@ valid_fb: return; } + obj = intel_fb_obj(fb); + intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); + plane_state->src_x = 0; plane_state->src_y = 0; plane_state->src_w = fb->width << 16; @@ -2871,7 +2874,6 @@ valid_fb: intel_state->base.src = drm_plane_state_src(plane_state); intel_state->base.dst = drm_plane_state_dest(plane_state); - obj = intel_fb_obj(fb); if (i915_gem_object_is_tiled(obj)) dev_priv->preserve_bios_swizzle = true; @@ -4762,10 +4764,13 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, /* range checks */ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || - dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || - - src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || - dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { + dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || + (IS_GEN11(dev_priv) && + (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || + dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || + (!IS_GEN11(dev_priv) && + (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || + dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " "size is out of scaler range\n", intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); @@ -8766,8 +8771,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, intel_get_shared_dpll_by_id(dev_priv, pll_id); pll = pipe_config->shared_dpll; - WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); tmp = pipe_config->dpll_hw_state.dpll; pipe_config->pixel_multiplier = @@ -9243,8 +9248,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, pll = pipe_config->shared_dpll; if (pll) { - WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); } /* @@ -11643,11 +11648,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); - DRM_DEBUG_KMS("%s\n", pll->name); + DRM_DEBUG_KMS("%s\n", pll->info->name); - active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); + active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); - if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { + if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) { I915_STATE_WARN(!pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); I915_STATE_WARN(pll->on && !pll->active_mask, @@ -12140,6 +12145,9 @@ static void intel_update_crtc(struct drm_crtc *crtc, if (modeset) { update_scanline_offset(intel_crtc); dev_priv->display.crtc_enable(pipe_config, state); + + /* vblanks work again, re-enable pipe CRC. */ + intel_crtc_enable_pipe_crc(intel_crtc); } else { intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), pipe_config); @@ -12320,6 +12328,13 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (old_crtc_state->active) { intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); + + /* + * We need to disable pipe CRC before disabling the pipe, + * or we race against vblank off. + */ + intel_crtc_disable_pipe_crc(intel_crtc); + dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); intel_crtc->active = false; intel_fbc_disable(intel_crtc); @@ -12778,6 +12793,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) return ret; + intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); + if (!new_state->fence) { /* implicit fencing */ struct dma_fence *fence; @@ -13165,8 +13182,9 @@ intel_legacy_cursor_update(struct drm_plane *plane, if (ret) goto out_unlock; - old_fb = old_plane_state->fb; + intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP); + old_fb = old_plane_state->fb; i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), intel_plane->frontbuffer_bit); @@ -13553,10 +13571,17 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) /* initialize shared scalers */ intel_crtc_init_scalers(intel_crtc, crtc_state); - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] != NULL); - dev_priv->plane_to_crtc_mapping[primary->i9xx_plane] = intel_crtc; - dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; + BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || + dev_priv->pipe_to_crtc_mapping[pipe] != NULL); + dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc; + + if (INTEL_GEN(dev_priv) < 9) { + enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; + + BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || + dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); + dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; + } drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); @@ -15101,8 +15126,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - pll->on = pll->funcs.get_hw_state(dev_priv, pll, - &pll->state.hw_state); + pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, + &pll->state.hw_state); pll->state.crtc_mask = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -15115,7 +15140,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pll->active_mask = pll->state.crtc_mask; DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", - pll->name, pll->state.crtc_mask, pll->on); + pll->info->name, pll->state.crtc_mask, pll->on); } for_each_intel_encoder(dev, encoder) { @@ -15289,9 +15314,10 @@ intel_modeset_setup_hw_state(struct drm_device *dev, if (!pll->on || pll->active_mask) continue; - DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); + DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", + pll->info->name); - pll->funcs.disable(dev_priv, pll); + pll->info->funcs->disable(dev_priv, pll); pll->on = false; } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 9a4a51e79fa1..62f82c4298ac 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -43,7 +43,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -#define DP_LINK_CHECK_TIMEOUT (10 * 1000) #define DP_DPRX_ESI_LEN 14 /* Compliance test status bits */ @@ -92,8 +91,6 @@ static const struct dp_link_dpll chv_dpll[] = { { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, { 270000, /* m2_int = 27, m2_fraction = 0 */ { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, - { 540000, /* m2_int = 27, m2_fraction = 0 */ - { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } }; /** @@ -2901,10 +2898,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, } } else { - if (IS_CHERRYVIEW(dev_priv)) - *DP &= ~DP_LINK_TRAIN_MASK_CHV; - else - *DP &= ~DP_LINK_TRAIN_MASK; + *DP &= ~DP_LINK_TRAIN_MASK; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { case DP_TRAINING_PATTERN_DISABLE: @@ -2917,12 +2911,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, *DP |= DP_LINK_TRAIN_PAT_2; break; case DP_TRAINING_PATTERN_3: - if (IS_CHERRYVIEW(dev_priv)) { - *DP |= DP_LINK_TRAIN_PAT_3_CHV; - } else { - DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); - *DP |= DP_LINK_TRAIN_PAT_2; - } + DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); + *DP |= DP_LINK_TRAIN_PAT_2; break; } } @@ -3661,10 +3651,7 @@ intel_dp_link_down(struct intel_encoder *encoder, DP &= ~DP_LINK_TRAIN_MASK_CPT; DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; } else { - if (IS_CHERRYVIEW(dev_priv)) - DP &= ~DP_LINK_TRAIN_MASK_CHV; - else - DP &= ~DP_LINK_TRAIN_MASK; + DP &= ~DP_LINK_TRAIN_MASK; DP |= DP_LINK_TRAIN_PAT_IDLE; } I915_WRITE(intel_dp->output_reg, DP); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 51c5ae4e9116..d5e114e9660b 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -118,10 +118,10 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) return; - cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state); + cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)\n", - pll->name, onoff(state), onoff(cur_state)); + pll->info->name, onoff(state), onoff(cur_state)); } /** @@ -143,11 +143,11 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) mutex_lock(&dev_priv->dpll_lock); WARN_ON(!pll->state.crtc_mask); if (!pll->active_mask) { - DRM_DEBUG_DRIVER("setting up %s\n", pll->name); + DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name); WARN_ON(pll->on); assert_shared_dpll_disabled(dev_priv, pll); - pll->funcs.prepare(dev_priv, pll); + pll->info->funcs->prepare(dev_priv, pll); } mutex_unlock(&dev_priv->dpll_lock); } @@ -179,7 +179,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) pll->active_mask |= crtc_mask; DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", - pll->name, pll->active_mask, pll->on, + pll->info->name, pll->active_mask, pll->on, crtc->base.base.id); if (old_mask) { @@ -189,8 +189,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) } WARN_ON(pll->on); - DRM_DEBUG_KMS("enabling %s\n", pll->name); - pll->funcs.enable(dev_priv, pll); + DRM_DEBUG_KMS("enabling %s\n", pll->info->name); + pll->info->funcs->enable(dev_priv, pll); pll->on = true; out: @@ -221,7 +221,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc) goto out; DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", - pll->name, pll->active_mask, pll->on, + pll->info->name, pll->active_mask, pll->on, crtc->base.base.id); assert_shared_dpll_enabled(dev_priv, pll); @@ -231,8 +231,8 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc) if (pll->active_mask) goto out; - DRM_DEBUG_KMS("disabling %s\n", pll->name); - pll->funcs.disable(dev_priv, pll); + DRM_DEBUG_KMS("disabling %s\n", pll->info->name); + pll->info->funcs->disable(dev_priv, pll); pll->on = false; out: @@ -263,7 +263,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, &shared_dpll[i].hw_state, sizeof(crtc_state->dpll_hw_state)) == 0) { DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", - crtc->base.base.id, crtc->base.name, pll->name, + crtc->base.base.id, crtc->base.name, + pll->info->name, shared_dpll[i].crtc_mask, pll->active_mask); return pll; @@ -275,7 +276,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, pll = &dev_priv->shared_dplls[i]; if (shared_dpll[i].crtc_mask == 0) { DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", - crtc->base.base.id, crtc->base.name, pll->name); + crtc->base.base.id, crtc->base.name, + pll->info->name); return pll; } } @@ -289,19 +291,19 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, { struct intel_shared_dpll_state *shared_dpll; struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - enum intel_dpll_id i = pll->id; + const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); - if (shared_dpll[i].crtc_mask == 0) - shared_dpll[i].hw_state = + if (shared_dpll[id].crtc_mask == 0) + shared_dpll[id].hw_state = crtc_state->dpll_hw_state; crtc_state->shared_dpll = pll; - DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, + DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); - shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe; + shared_dpll[id].crtc_mask |= 1 << crtc->pipe; } /** @@ -341,15 +343,16 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; - val = I915_READ(PCH_DPLL(pll->id)); + val = I915_READ(PCH_DPLL(id)); hw_state->dpll = val; - hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); - hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); + hw_state->fp0 = I915_READ(PCH_FP0(id)); + hw_state->fp1 = I915_READ(PCH_FP1(id)); intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); @@ -359,8 +362,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0); - I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1); + const enum intel_dpll_id id = pll->info->id; + + I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0); + I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1); } static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) @@ -379,13 +384,15 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; + /* PCH refclock must be enabled first */ ibx_assert_pch_refclk_enabled(dev_priv); - I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); + I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(pll->id)); + POSTING_READ(PCH_DPLL(id)); udelay(150); /* The pixel multiplier can only be updated once the @@ -393,14 +400,15 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, * * So write it again. */ - I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll); - POSTING_READ(PCH_DPLL(pll->id)); + I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); + POSTING_READ(PCH_DPLL(id)); udelay(200); } static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc; @@ -410,8 +418,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, assert_pch_transcoder_disabled(dev_priv, crtc->pipe); } - I915_WRITE(PCH_DPLL(pll->id), 0); - POSTING_READ(PCH_DPLL(pll->id)); + I915_WRITE(PCH_DPLL(id), 0); + POSTING_READ(PCH_DPLL(id)); udelay(200); } @@ -429,7 +437,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, pll = &dev_priv->shared_dplls[i]; DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", - crtc->base.base.id, crtc->base.name, pll->name); + crtc->base.base.id, crtc->base.name, + pll->info->name); } else { pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_PCH_PLL_A, @@ -466,8 +475,10 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll); - POSTING_READ(WRPLL_CTL(pll->id)); + const enum intel_dpll_id id = pll->info->id; + + I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll); + POSTING_READ(WRPLL_CTL(id)); udelay(20); } @@ -482,11 +493,12 @@ static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; - val = I915_READ(WRPLL_CTL(pll->id)); - I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL(pll->id)); + val = I915_READ(WRPLL_CTL(id)); + I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL(id)); } static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, @@ -503,12 +515,13 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; - val = I915_READ(WRPLL_CTL(pll->id)); + val = I915_READ(WRPLL_CTL(id)); hw_state->wrpll = val; intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); @@ -914,13 +927,15 @@ static const struct skl_dpll_regs skl_dpll_regs[4] = { static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; val = I915_READ(DPLL_CTRL1); - val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | - DPLL_CTRL1_LINK_RATE_MASK(pll->id)); - val |= pll->state.hw_state.ctrl1 << (pll->id * 6); + val &= ~(DPLL_CTRL1_HDMI_MODE(id) | + DPLL_CTRL1_SSC(id) | + DPLL_CTRL1_LINK_RATE_MASK(id)); + val |= pll->state.hw_state.ctrl1 << (id * 6); I915_WRITE(DPLL_CTRL1, val); POSTING_READ(DPLL_CTRL1); @@ -930,24 +945,25 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; + const enum intel_dpll_id id = pll->info->id; skl_ddi_pll_write_ctrl1(dev_priv, pll); - I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1); - I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2); - POSTING_READ(regs[pll->id].cfgcr1); - POSTING_READ(regs[pll->id].cfgcr2); + I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1); + I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2); + POSTING_READ(regs[id].cfgcr1); + POSTING_READ(regs[id].cfgcr2); /* the enable bit is always bit 31 */ - I915_WRITE(regs[pll->id].ctl, - I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); + I915_WRITE(regs[id].ctl, + I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE); if (intel_wait_for_register(dev_priv, DPLL_STATUS, - DPLL_LOCK(pll->id), - DPLL_LOCK(pll->id), + DPLL_LOCK(id), + DPLL_LOCK(id), 5)) - DRM_ERROR("DPLL %d not locked\n", pll->id); + DRM_ERROR("DPLL %d not locked\n", id); } static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, @@ -960,11 +976,12 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const struct skl_dpll_regs *regs = skl_dpll_regs; + const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ - I915_WRITE(regs[pll->id].ctl, - I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); - POSTING_READ(regs[pll->id].ctl); + I915_WRITE(regs[id].ctl, + I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE); + POSTING_READ(regs[id].ctl); } static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, @@ -978,6 +995,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; const struct skl_dpll_regs *regs = skl_dpll_regs; + const enum intel_dpll_id id = pll->info->id; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) @@ -985,17 +1003,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = false; - val = I915_READ(regs[pll->id].ctl); + val = I915_READ(regs[id].ctl); if (!(val & LCPLL_PLL_ENABLE)) goto out; val = I915_READ(DPLL_CTRL1); - hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; + hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; /* avoid reading back stale values if HDMI mode is not enabled */ - if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) { - hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); - hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); + if (val & DPLL_CTRL1_HDMI_MODE(id)) { + hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1); + hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2); } ret = true; @@ -1011,6 +1029,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; const struct skl_dpll_regs *regs = skl_dpll_regs; + const enum intel_dpll_id id = pll->info->id; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) @@ -1019,12 +1038,12 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, ret = false; /* DPLL0 is always enabled since it drives CDCLK */ - val = I915_READ(regs[pll->id].ctl); + val = I915_READ(regs[id].ctl); if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) goto out; val = I915_READ(DPLL_CTRL1); - hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; + hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; ret = true; @@ -1424,7 +1443,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { uint32_t temp; - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ enum dpio_phy phy; enum dpio_channel ch; @@ -1543,7 +1562,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ uint32_t temp; temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); @@ -1566,7 +1585,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ uint32_t val; bool ret; enum dpio_phy phy; @@ -1824,7 +1843,7 @@ bxt_get_dpll(struct intel_crtc *crtc, pll = intel_get_shared_dpll_by_id(dev_priv, i); DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", - crtc->base.base.id, crtc->base.name, pll->name); + crtc->base.base.id, crtc->base.name, pll->info->name); intel_reference_shared_dpll(pll, crtc_state); @@ -1877,13 +1896,6 @@ static void intel_ddi_pll_init(struct drm_device *dev) } } -struct dpll_info { - const char *name; - const int id; - const struct intel_shared_dpll_funcs *funcs; - uint32_t flags; -}; - struct intel_dpll_mgr { const struct dpll_info *dpll_info; @@ -1896,9 +1908,9 @@ struct intel_dpll_mgr { }; static const struct dpll_info pch_plls[] = { - { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 }, - { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 }, - { NULL, -1, NULL, 0 }, + { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, + { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, + { }, }; static const struct intel_dpll_mgr pch_pll_mgr = { @@ -1908,13 +1920,13 @@ static const struct intel_dpll_mgr pch_pll_mgr = { }; static const struct dpll_info hsw_plls[] = { - { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 }, - { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 }, - { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 }, - { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, - { NULL, -1, NULL, }, + { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, + { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, + { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, + { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, + { }, }; static const struct intel_dpll_mgr hsw_pll_mgr = { @@ -1924,11 +1936,11 @@ static const struct intel_dpll_mgr hsw_pll_mgr = { }; static const struct dpll_info skl_plls[] = { - { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, - { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, - { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, - { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, - { NULL, -1, NULL, }, + { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, + { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, + { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, + { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, + { }, }; static const struct intel_dpll_mgr skl_pll_mgr = { @@ -1938,10 +1950,10 @@ static const struct intel_dpll_mgr skl_pll_mgr = { }; static const struct dpll_info bxt_plls[] = { - { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 }, - { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 }, - { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 }, - { NULL, -1, NULL, }, + { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, + { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, + { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, + { }, }; static const struct intel_dpll_mgr bxt_pll_mgr = { @@ -1953,38 +1965,39 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; /* 1. Enable DPLL power in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(pll->id)); + val = I915_READ(CNL_DPLL_ENABLE(id)); val |= PLL_POWER_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); + I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ if (intel_wait_for_register(dev_priv, - CNL_DPLL_ENABLE(pll->id), + CNL_DPLL_ENABLE(id), PLL_POWER_STATE, PLL_POWER_STATE, 5)) - DRM_ERROR("PLL %d Power not enabled\n", pll->id); + DRM_ERROR("PLL %d Power not enabled\n", id); /* * 3. Configure DPLL_CFGCR0 to set SSC enable/disable, * select DP mode, and set DP link rate. */ val = pll->state.hw_state.cfgcr0; - I915_WRITE(CNL_DPLL_CFGCR0(pll->id), val); + I915_WRITE(CNL_DPLL_CFGCR0(id), val); /* 4. Reab back to ensure writes completed */ - POSTING_READ(CNL_DPLL_CFGCR0(pll->id)); + POSTING_READ(CNL_DPLL_CFGCR0(id)); /* 3. Configure DPLL_CFGCR0 */ /* Avoid touch CFGCR1 if HDMI mode is not enabled */ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { val = pll->state.hw_state.cfgcr1; - I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); + I915_WRITE(CNL_DPLL_CFGCR1(id), val); /* 4. Reab back to ensure writes completed */ - POSTING_READ(CNL_DPLL_CFGCR1(pll->id)); + POSTING_READ(CNL_DPLL_CFGCR1(id)); } /* @@ -1997,17 +2010,17 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, */ /* 6. Enable DPLL in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(pll->id)); + val = I915_READ(CNL_DPLL_ENABLE(id)); val |= PLL_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); + I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for PLL lock status in DPLL_ENABLE. */ if (intel_wait_for_register(dev_priv, - CNL_DPLL_ENABLE(pll->id), + CNL_DPLL_ENABLE(id), PLL_LOCK, PLL_LOCK, 5)) - DRM_ERROR("PLL %d not locked\n", pll->id); + DRM_ERROR("PLL %d not locked\n", id); /* * 8. If the frequency will result in a change to the voltage @@ -2027,6 +2040,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; /* @@ -2044,17 +2058,17 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, */ /* 3. Disable DPLL through DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(pll->id)); + val = I915_READ(CNL_DPLL_ENABLE(id)); val &= ~PLL_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); + I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ if (intel_wait_for_register(dev_priv, - CNL_DPLL_ENABLE(pll->id), + CNL_DPLL_ENABLE(id), PLL_LOCK, 0, 5)) - DRM_ERROR("PLL %d locked\n", pll->id); + DRM_ERROR("PLL %d locked\n", id); /* * 5. If the frequency will result in a change to the voltage @@ -2066,23 +2080,24 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, */ /* 6. Disable DPLL power in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(pll->id)); + val = I915_READ(CNL_DPLL_ENABLE(id)); val &= ~PLL_POWER_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(pll->id), val); + I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ if (intel_wait_for_register(dev_priv, - CNL_DPLL_ENABLE(pll->id), + CNL_DPLL_ENABLE(id), PLL_POWER_STATE, 0, 5)) - DRM_ERROR("PLL %d Power not disabled\n", pll->id); + DRM_ERROR("PLL %d Power not disabled\n", id); } static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { + const enum intel_dpll_id id = pll->info->id; uint32_t val; bool ret; @@ -2091,16 +2106,16 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = false; - val = I915_READ(CNL_DPLL_ENABLE(pll->id)); + val = I915_READ(CNL_DPLL_ENABLE(id)); if (!(val & PLL_ENABLE)) goto out; - val = I915_READ(CNL_DPLL_CFGCR0(pll->id)); + val = I915_READ(CNL_DPLL_CFGCR0(id)); hw_state->cfgcr0 = val; /* avoid reading back stale values if HDMI mode is not enabled */ if (val & DPLL_CFGCR0_HDMI_MODE) { - hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll->id)); + hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id)); } ret = true; @@ -2372,10 +2387,10 @@ static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = { }; static const struct dpll_info cnl_plls[] = { - { "DPLL 0", DPLL_ID_SKL_DPLL0, &cnl_ddi_pll_funcs, 0 }, - { "DPLL 1", DPLL_ID_SKL_DPLL1, &cnl_ddi_pll_funcs, 0 }, - { "DPLL 2", DPLL_ID_SKL_DPLL2, &cnl_ddi_pll_funcs, 0 }, - { NULL, -1, NULL, }, + { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, + { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, + { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, + { }, }; static const struct intel_dpll_mgr cnl_pll_mgr = { @@ -2415,13 +2430,9 @@ void intel_shared_dpll_init(struct drm_device *dev) dpll_info = dpll_mgr->dpll_info; - for (i = 0; dpll_info[i].id >= 0; i++) { + for (i = 0; dpll_info[i].name; i++) { WARN_ON(i != dpll_info[i].id); - - dev_priv->shared_dplls[i].id = dpll_info[i].id; - dev_priv->shared_dplls[i].name = dpll_info[i].name; - dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs; - dev_priv->shared_dplls[i].flags = dpll_info[i].flags; + dev_priv->shared_dplls[i].info = &dpll_info[i]; } dev_priv->dpll_mgr = dpll_mgr; @@ -2481,7 +2492,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_shared_dpll_state *shared_dpll_state; shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe); + shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe); } /** diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index f24ccf443d25..4febfaa90bde 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -206,6 +206,37 @@ struct intel_shared_dpll_funcs { }; /** + * struct dpll_info - display PLL platform specific info + */ +struct dpll_info { + /** + * @name: DPLL name; used for logging + */ + const char *name; + + /** + * @funcs: platform specific hooks + */ + const struct intel_shared_dpll_funcs *funcs; + + /** + * @id: unique indentifier for this DPLL; should match the index in the + * dev_priv->shared_dplls array + */ + enum intel_dpll_id id; + +#define INTEL_DPLL_ALWAYS_ON (1 << 0) + /** + * @flags: + * + * INTEL_DPLL_ALWAYS_ON + * Inform the state checker that the DPLL is kept enabled even if + * not in use by any CRTC. + */ + uint32_t flags; +}; + +/** * struct intel_shared_dpll - display PLL with tracked state and users */ struct intel_shared_dpll { @@ -228,30 +259,9 @@ struct intel_shared_dpll { bool on; /** - * @name: DPLL name; used for logging + * @info: platform specific info */ - const char *name; - - /** - * @id: unique indentifier for this DPLL; should match the index in the - * dev_priv->shared_dplls array - */ - enum intel_dpll_id id; - - /** - * @funcs: platform specific hooks - */ - struct intel_shared_dpll_funcs funcs; - -#define INTEL_DPLL_ALWAYS_ON (1 << 0) - /** - * @flags: - * - * INTEL_DPLL_ALWAYS_ON - * Inform the state checker that the DPLL is kept enabled even if - * not in use by any CRTC. - */ - uint32_t flags; + const struct dpll_info *info; }; #define SKL_DPLL0 0 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d4368589b355..d1452fd2a58d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -548,6 +548,10 @@ struct intel_initial_plane_config { #define SKL_MAX_DST_W 4096 #define SKL_MIN_DST_H 8 #define SKL_MAX_DST_H 4096 +#define ICL_MAX_SRC_W 5120 +#define ICL_MAX_SRC_H 4096 +#define ICL_MAX_DST_W 5120 +#define ICL_MAX_DST_H 4096 struct intel_scaler { int in_use; @@ -1783,7 +1787,7 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); -void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder, +bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, bool scrambling); @@ -1877,7 +1881,8 @@ void intel_psr_enable(struct intel_dp *intel_dp, void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); void intel_psr_invalidate(struct drm_i915_private *dev_priv, - unsigned frontbuffer_bits); + unsigned frontbuffer_bits, + enum fb_op_origin origin); void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); @@ -2138,8 +2143,17 @@ int intel_pipe_crc_create(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt); +void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc); +void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc); #else #define intel_crtc_set_crc_source NULL +static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc) +{ +} + +static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) +{ +} #endif extern const struct file_operations i915_display_crc_ctl_fops; #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 4ba139c27fba..12486d8f534b 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -81,13 +81,17 @@ static const struct engine_class_info intel_engine_classes[] = { }, }; +#define MAX_MMIO_BASES 3 struct engine_info { unsigned int hw_id; unsigned int uabi_id; u8 class; u8 instance; - u32 mmio_base; - unsigned irq_shift; + /* mmio bases table *must* be sorted in reverse gen order */ + struct engine_mmio_base { + u32 gen : 8; + u32 base : 24; + } mmio_bases[MAX_MMIO_BASES]; }; static const struct engine_info intel_engines[] = { @@ -96,64 +100,76 @@ static const struct engine_info intel_engines[] = { .uabi_id = I915_EXEC_RENDER, .class = RENDER_CLASS, .instance = 0, - .mmio_base = RENDER_RING_BASE, - .irq_shift = GEN8_RCS_IRQ_SHIFT, + .mmio_bases = { + { .gen = 1, .base = RENDER_RING_BASE } + }, }, [BCS] = { .hw_id = BCS_HW, .uabi_id = I915_EXEC_BLT, .class = COPY_ENGINE_CLASS, .instance = 0, - .mmio_base = BLT_RING_BASE, - .irq_shift = GEN8_BCS_IRQ_SHIFT, + .mmio_bases = { + { .gen = 6, .base = BLT_RING_BASE } + }, }, [VCS] = { .hw_id = VCS_HW, .uabi_id = I915_EXEC_BSD, .class = VIDEO_DECODE_CLASS, .instance = 0, - .mmio_base = GEN6_BSD_RING_BASE, - .irq_shift = GEN8_VCS1_IRQ_SHIFT, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD_RING_BASE }, + { .gen = 6, .base = GEN6_BSD_RING_BASE }, + { .gen = 4, .base = BSD_RING_BASE } + }, }, [VCS2] = { .hw_id = VCS2_HW, .uabi_id = I915_EXEC_BSD, .class = VIDEO_DECODE_CLASS, .instance = 1, - .mmio_base = GEN8_BSD2_RING_BASE, - .irq_shift = GEN8_VCS2_IRQ_SHIFT, + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD2_RING_BASE }, + { .gen = 8, .base = GEN8_BSD2_RING_BASE } + }, }, [VCS3] = { .hw_id = VCS3_HW, .uabi_id = I915_EXEC_BSD, .class = VIDEO_DECODE_CLASS, .instance = 2, - .mmio_base = GEN11_BSD3_RING_BASE, - .irq_shift = 0, /* not used */ + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD3_RING_BASE } + }, }, [VCS4] = { .hw_id = VCS4_HW, .uabi_id = I915_EXEC_BSD, .class = VIDEO_DECODE_CLASS, .instance = 3, - .mmio_base = GEN11_BSD4_RING_BASE, - .irq_shift = 0, /* not used */ + .mmio_bases = { + { .gen = 11, .base = GEN11_BSD4_RING_BASE } + }, }, [VECS] = { .hw_id = VECS_HW, .uabi_id = I915_EXEC_VEBOX, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 0, - .mmio_base = VEBOX_RING_BASE, - .irq_shift = GEN8_VECS_IRQ_SHIFT, + .mmio_bases = { + { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, + { .gen = 7, .base = VEBOX_RING_BASE } + }, }, [VECS2] = { .hw_id = VECS2_HW, .uabi_id = I915_EXEC_VEBOX, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 1, - .mmio_base = GEN11_VEBOX2_RING_BASE, - .irq_shift = 0, /* not used */ + .mmio_bases = { + { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } + }, }, }; @@ -223,16 +239,36 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) } } +static u32 __engine_mmio_base(struct drm_i915_private *i915, + const struct engine_mmio_base *bases) +{ + int i; + + for (i = 0; i < MAX_MMIO_BASES; i++) + if (INTEL_GEN(i915) >= bases[i].gen) + break; + + GEM_BUG_ON(i == MAX_MMIO_BASES); + GEM_BUG_ON(!bases[i].base); + + return bases[i].base; +} + +static void __sprint_engine_name(char *name, const struct engine_info *info) +{ + WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u", + intel_engine_classes[info->class].name, + info->instance) >= INTEL_ENGINE_CS_MAX_NAME); +} + static int intel_engine_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) { const struct engine_info *info = &intel_engines[id]; - const struct engine_class_info *class_info; struct intel_engine_cs *engine; GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); - class_info = &intel_engine_classes[info->class]; BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); @@ -253,35 +289,14 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->id = id; engine->i915 = dev_priv; - WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u", - class_info->name, info->instance) >= - sizeof(engine->name)); + __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; - if (INTEL_GEN(dev_priv) >= 11) { - switch (engine->id) { - case VCS: - engine->mmio_base = GEN11_BSD_RING_BASE; - break; - case VCS2: - engine->mmio_base = GEN11_BSD2_RING_BASE; - break; - case VECS: - engine->mmio_base = GEN11_VEBOX_RING_BASE; - break; - default: - /* take the original value for all other engines */ - engine->mmio_base = info->mmio_base; - break; - } - } else { - engine->mmio_base = info->mmio_base; - } - engine->irq_shift = info->irq_shift; + engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); engine->class = info->class; engine->instance = info->instance; engine->uabi_id = info->uabi_id; - engine->uabi_class = class_info->uabi_class; + engine->uabi_class = intel_engine_classes[info->class].uabi_class; engine->context_size = __intel_engine_context_size(dev_priv, engine->class); @@ -441,6 +456,11 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; } +static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) +{ + i915_gem_batch_pool_init(&engine->batch_pool, engine); +} + static bool csb_force_mmio(struct drm_i915_private *i915) { /* @@ -485,11 +505,9 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) void intel_engine_setup_common(struct intel_engine_cs *engine) { intel_engine_init_execlist(engine); - intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); - i915_gem_batch_pool_init(engine, &engine->batch_pool); - + intel_engine_init_batch_pool(engine); intel_engine_init_cmd_parser(engine); } @@ -782,10 +800,24 @@ static inline uint32_t read_subslice_reg(struct drm_i915_private *dev_priv, int slice, int subslice, i915_reg_t reg) { + uint32_t mcr_slice_subslice_mask; + uint32_t mcr_slice_subslice_select; uint32_t mcr; uint32_t ret; enum forcewake_domains fw_domains; + if (INTEL_GEN(dev_priv) >= 11) { + mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | + GEN11_MCR_SUBSLICE_MASK; + mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | + GEN11_MCR_SUBSLICE(subslice); + } else { + mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | + GEN8_MCR_SUBSLICE_MASK; + mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | + GEN8_MCR_SUBSLICE(subslice); + } + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, @@ -800,14 +832,14 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, * The HW expects the slice and sublice selectors to be reset to 0 * after reading out the registers. */ - WARN_ON_ONCE(mcr & (GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK)); - mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK); - mcr |= GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + WARN_ON_ONCE(mcr & mcr_slice_subslice_mask); + mcr &= ~mcr_slice_subslice_mask; + mcr |= mcr_slice_subslice_select; I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); ret = I915_READ_FW(reg); - mcr &= ~(GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK); + mcr &= ~mcr_slice_subslice_mask; I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); intel_uncore_forcewake_put__locked(dev_priv, fw_domains); @@ -1713,13 +1745,15 @@ static void print_request(struct drm_printer *m, struct i915_request *rq, const char *prefix) { + const char *name = rq->fence.ops->get_timeline_name(&rq->fence); + drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix, rq->global_seqno, i915_request_completed(rq) ? "!" : "", rq->fence.context, rq->fence.seqno, rq->priotree.priority, jiffies_to_msecs(jiffies - rq->emitted_jiffies), - rq->timeline->common->name); + name); } static void hexdump(struct drm_printer *m, const void *buf, size_t len) @@ -1825,12 +1859,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", read, execlists->csb_head, write, intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), yesno(test_bit(ENGINE_IRQ_EXECLIST, - &engine->irq_posted))); + &engine->irq_posted)), + yesno(test_bit(TASKLET_STATE_SCHED, + &engine->execlists.tasklet.state)), + enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); if (read >= GEN8_CSB_ENTRIES) read = 0; if (write >= GEN8_CSB_ENTRIES) @@ -1929,12 +1966,16 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->head, rq->postfix, rq->tail, rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); - drm_printf(m, "\t\tring->start: 0x%08x\n", + drm_printf(m, "\t\tring->start: 0x%08x\n", i915_ggtt_offset(rq->ring->vma)); - drm_printf(m, "\t\tring->head: 0x%08x\n", + drm_printf(m, "\t\tring->head: 0x%08x\n", rq->ring->head); - drm_printf(m, "\t\tring->tail: 0x%08x\n", + drm_printf(m, "\t\tring->tail: 0x%08x\n", rq->ring->tail); + drm_printf(m, "\t\tring->emit: 0x%08x\n", + rq->ring->emit); + drm_printf(m, "\t\tring->space: 0x%08x\n", + rq->ring->space); } rcu_read_unlock(); @@ -2109,4 +2150,5 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_engine.c" +#include "selftests/intel_engine_cs.c" #endif diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6f12adc06365..65a3313723c9 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -221,6 +221,9 @@ static int intelfb_create(struct drm_fb_helper *helper, goto out_unlock; } + fb = &ifbdev->fb->base; + intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB); + info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { DRM_ERROR("Failed to allocate fb_info\n"); @@ -230,8 +233,6 @@ static int intelfb_create(struct drm_fb_helper *helper, info->par = helper; - fb = &ifbdev->fb->base; - ifbdev->helper.fb = fb; strcpy(info->fix.id, "inteldrmfb"); diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 3a8d3d06c26a..7fff0a0eceb4 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -80,7 +80,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, } might_sleep(); - intel_psr_invalidate(dev_priv, frontbuffer_bits); + intel_psr_invalidate(dev_priv, frontbuffer_bits, origin); intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits); intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin); } diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h new file mode 100644 index 000000000000..105e2a9e874a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_gpu_commands.h @@ -0,0 +1,274 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright � 2003-2018 Intel Corporation + */ + +#ifndef _INTEL_GPU_COMMANDS_H_ +#define _INTEL_GPU_COMMANDS_H_ + +/* + * Instruction field definitions used by the command parser + */ +#define INSTR_CLIENT_SHIFT 29 +#define INSTR_MI_CLIENT 0x0 +#define INSTR_BC_CLIENT 0x2 +#define INSTR_RC_CLIENT 0x3 +#define INSTR_SUBCLIENT_SHIFT 27 +#define INSTR_SUBCLIENT_MASK 0x18000000 +#define INSTR_MEDIA_SUBCLIENT 0x2 +#define INSTR_26_TO_24_MASK 0x7000000 +#define INSTR_26_TO_24_SHIFT 24 + +/* + * Memory interface instructions used by the kernel + */ +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) +/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */ +#define MI_GLOBAL_GTT (1<<22) + +#define MI_NOOP MI_INSTR(0, 0) +#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) +#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) +#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) +#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) +#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) +#define MI_FLUSH MI_INSTR(0x04, 0) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ +#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ +#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */ +#define MI_REPORT_HEAD MI_INSTR(0x07, 0) +#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) +#define MI_ARB_ENABLE (1<<0) +#define MI_ARB_DISABLE (0<<0) +#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) +#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) +#define MI_SUSPEND_FLUSH_EN (1<<0) +#define MI_SET_APPID MI_INSTR(0x0e, 0) +#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) +#define MI_OVERLAY_CONTINUE (0x0<<21) +#define MI_OVERLAY_ON (0x1<<21) +#define MI_OVERLAY_OFF (0x2<<21) +#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) +#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) +#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) +#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) +/* IVB has funny definitions for which plane to flip. */ +#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) +#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) +#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) +/* SKL ones */ +#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8) +#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8) +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) +#define MI_SEMAPHORE_SYNC_VR (0<<16) /* RCS wait for VCS (RVSYNC) */ +#define MI_SEMAPHORE_SYNC_VER (1<<16) /* RCS wait for VECS (RVESYNC) */ +#define MI_SEMAPHORE_SYNC_BR (2<<16) /* RCS wait for BCS (RBSYNC) */ +#define MI_SEMAPHORE_SYNC_BV (0<<16) /* VCS wait for BCS (VBSYNC) */ +#define MI_SEMAPHORE_SYNC_VEV (1<<16) /* VCS wait for VECS (VVESYNC) */ +#define MI_SEMAPHORE_SYNC_RV (2<<16) /* VCS wait for RCS (VRSYNC) */ +#define MI_SEMAPHORE_SYNC_RB (0<<16) /* BCS wait for RCS (BRSYNC) */ +#define MI_SEMAPHORE_SYNC_VEB (1<<16) /* BCS wait for VECS (BVESYNC) */ +#define MI_SEMAPHORE_SYNC_VB (2<<16) /* BCS wait for VCS (BVSYNC) */ +#define MI_SEMAPHORE_SYNC_BVE (0<<16) /* VECS wait for BCS (VEBSYNC) */ +#define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ +#define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ +#define MI_SEMAPHORE_SYNC_INVALID (3<<16) +#define MI_SEMAPHORE_SYNC_MASK (3<<16) +#define MI_SET_CONTEXT MI_INSTR(0x18, 0) +#define MI_MM_SPACE_GTT (1<<8) +#define MI_MM_SPACE_PHYSICAL (0<<8) +#define MI_SAVE_EXT_STATE_EN (1<<3) +#define MI_RESTORE_EXT_STATE_EN (1<<2) +#define MI_FORCE_RESTORE (1<<1) +#define MI_RESTORE_INHIBIT (1<<0) +#define HSW_MI_RS_SAVE_STATE_EN (1<<3) +#define HSW_MI_RS_RESTORE_STATE_EN (1<<2) +#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ +#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) +#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_POLL (1<<15) +#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) +#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) +#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) +#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ +#define MI_USE_GGTT (1 << 22) /* g4x+ */ +#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) +#define MI_STORE_DWORD_INDEX_SHIFT 2 +/* + * Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: + * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw + * simply ignores the register load under certain conditions. + * - One can actually load arbitrary many arbitrary registers: Simply issue x + * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! + */ +#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) +#define MI_LRI_FORCE_POSTED (1<<12) +#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) +#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) +#define MI_SRM_LRM_GLOBAL_GTT (1<<22) +#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ +#define MI_FLUSH_DW_STORE_INDEX (1<<21) +#define MI_INVALIDATE_TLB (1<<18) +#define MI_FLUSH_DW_OP_STOREDW (1<<14) +#define MI_FLUSH_DW_OP_MASK (3<<14) +#define MI_FLUSH_DW_NOTIFY (1<<8) +#define MI_INVALIDATE_BSD (1<<7) +#define MI_FLUSH_DW_USE_GTT (1<<2) +#define MI_FLUSH_DW_USE_PPGTT (0<<2) +#define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) +#define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) +#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) +#define MI_BATCH_NON_SECURE (1) +/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ +#define MI_BATCH_NON_SECURE_I965 (1<<8) +#define MI_BATCH_PPGTT_HSW (1<<8) +#define MI_BATCH_NON_SECURE_HSW (1<<13) +#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ +#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) +#define MI_BATCH_RESOURCE_STREAMER (1<<10) + +/* + * 3D instructions used by the kernel + */ +#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) + +#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4) +#define GEN9_MEDIA_POOL_ENABLE (1 << 31) +#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) +#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define SC_UPDATE_SCISSOR (0x1<<1) +#define SC_ENABLE_MASK (0x1<<0) +#define SC_ENABLE (0x1<<0) +#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) +#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) +#define SCI_YMIN_MASK (0xffff<<16) +#define SCI_XMIN_MASK (0xffff<<0) +#define SCI_YMAX_MASK (0xffff<<16) +#define SCI_XMAX_MASK (0xffff<<0) +#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) +#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) +#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) +#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) +#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) +#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) +#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) +#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) +#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) + +#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) +#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) +#define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) +#define BLT_WRITE_A (2<<20) +#define BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) +#define BLT_DEPTH_8 (0<<24) +#define BLT_DEPTH_16_565 (1<<24) +#define BLT_DEPTH_16_1555 (2<<24) +#define BLT_DEPTH_32 (3<<24) +#define BLT_ROP_SRC_COPY (0xcc<<16) +#define BLT_ROP_COLOR_COPY (0xf0<<16) +#define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ +#define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ +#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) +#define ASYNC_FLIP (1<<22) +#define DISPLAY_PLANE_A (0<<20) +#define DISPLAY_PLANE_B (1<<20) +#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) +#define PIPE_CONTROL_FLUSH_L3 (1<<27) +#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ +#define PIPE_CONTROL_MMIO_WRITE (1<<23) +#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) +#define PIPE_CONTROL_CS_STALL (1<<20) +#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) +#define PIPE_CONTROL_QW_WRITE (1<<14) +#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) +#define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_WRITE_FLUSH (1<<12) +#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ +#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ +#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) +#define PIPE_CONTROL_NOTIFY (1<<8) +#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ +#define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) +#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) +#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) +#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) +#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) +#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) +#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ + +/* + * Commands used only by the command parser + */ +#define MI_SET_PREDICATE MI_INSTR(0x01, 0) +#define MI_ARB_CHECK MI_INSTR(0x05, 0) +#define MI_RS_CONTROL MI_INSTR(0x06, 0) +#define MI_URB_ATOMIC_ALLOC MI_INSTR(0x09, 0) +#define MI_PREDICATE MI_INSTR(0x0C, 0) +#define MI_RS_CONTEXT MI_INSTR(0x0F, 0) +#define MI_TOPOLOGY_FILTER MI_INSTR(0x0D, 0) +#define MI_LOAD_SCAN_LINES_EXCL MI_INSTR(0x13, 0) +#define MI_URB_CLEAR MI_INSTR(0x19, 0) +#define MI_UPDATE_GTT MI_INSTR(0x23, 0) +#define MI_CLFLUSH MI_INSTR(0x27, 0) +#define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) +#define MI_REPORT_PERF_COUNT_GGTT (1<<0) +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) +#define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) +#define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) +#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) +#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0) + +#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16)) +#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16)) +#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16)) +#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18) +#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16)) +#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16)) +#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x39<<16)) +#define GFX_OP_3DSTATE_DX9_CONSTANTF_PS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x3A<<16)) +#define GFX_OP_3DSTATE_SO_DECL_LIST \ + ((0x3<<29)|(0x3<<27)|(0x1<<24)|(0x17<<16)) + +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x43<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x44<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x45<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x46<<16)) +#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \ + ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16)) + +#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16)) + +#define COLOR_BLT ((0x2<<29)|(0x40<<22)) +#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) + +#endif /* _INTEL_GPU_COMMANDS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index ff08ea0ebf49..a00a59a7d9ec 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -64,10 +64,12 @@ void intel_guc_init_early(struct intel_guc *guc) { intel_guc_fw_init_early(guc); intel_guc_ct_init_early(&guc->ct); - intel_guc_log_init_early(guc); + intel_guc_log_init_early(&guc->log); mutex_init(&guc->send_mutex); + spin_lock_init(&guc->irq_lock); guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; guc->notify = gen8_guc_raise_irq; } @@ -86,9 +88,10 @@ int intel_guc_init_wq(struct intel_guc *guc) * or scheduled later on resume. This way the handling of work * item can be kept same between system suspend & rpm suspend. */ - guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log", - WQ_HIGHPRI | WQ_FREEZABLE); - if (!guc->log.runtime.flush_wq) { + guc->log.relay.flush_wq = + alloc_ordered_workqueue("i915-guc_log", + WQ_HIGHPRI | WQ_FREEZABLE); + if (!guc->log.relay.flush_wq) { DRM_ERROR("Couldn't allocate workqueue for GuC log\n"); return -ENOMEM; } @@ -111,7 +114,7 @@ int intel_guc_init_wq(struct intel_guc *guc) guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt", WQ_HIGHPRI); if (!guc->preempt_wq) { - destroy_workqueue(guc->log.runtime.flush_wq); + destroy_workqueue(guc->log.relay.flush_wq); DRM_ERROR("Couldn't allocate workqueue for GuC " "preemption\n"); return -ENOMEM; @@ -129,7 +132,7 @@ void intel_guc_fini_wq(struct intel_guc *guc) USES_GUC_SUBMISSION(dev_priv)) destroy_workqueue(guc->preempt_wq); - destroy_workqueue(guc->log.runtime.flush_wq); + destroy_workqueue(guc->log.relay.flush_wq); } static int guc_shared_data_create(struct intel_guc *guc) @@ -169,7 +172,7 @@ int intel_guc_init(struct intel_guc *guc) return ret; GEM_BUG_ON(!guc->shared_data); - ret = intel_guc_log_create(guc); + ret = intel_guc_log_create(&guc->log); if (ret) goto err_shared; @@ -184,7 +187,7 @@ int intel_guc_init(struct intel_guc *guc) return 0; err_log: - intel_guc_log_destroy(guc); + intel_guc_log_destroy(&guc->log); err_shared: guc_shared_data_destroy(guc); return ret; @@ -196,7 +199,7 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(dev_priv); intel_guc_ads_destroy(guc); - intel_guc_log_destroy(guc); + intel_guc_log_destroy(&guc->log); guc_shared_data_destroy(guc); } @@ -220,17 +223,23 @@ static u32 get_core_family(struct drm_i915_private *dev_priv) } } -static u32 get_log_verbosity_flags(void) +static u32 get_log_control_flags(void) { - if (i915_modparams.guc_log_level > 0) { - u32 verbosity = i915_modparams.guc_log_level - 1; + u32 level = i915_modparams.guc_log_level; + u32 flags = 0; - GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); - return verbosity << GUC_LOG_VERBOSITY_SHIFT; - } + GEM_BUG_ON(level < 0); + + if (!GUC_LOG_LEVEL_IS_ENABLED(level)) + flags |= GUC_LOG_DEFAULT_DISABLED; + + if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) + flags |= GUC_LOG_DISABLED; + else + flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << + GUC_LOG_VERBOSITY_SHIFT; - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return GUC_LOG_DISABLED; + return flags; } /* @@ -265,12 +274,13 @@ void intel_guc_init_params(struct intel_guc *guc) params[GUC_CTL_LOG_PARAMS] = guc->log.flags; - params[GUC_CTL_DEBUG] = get_log_verbosity_flags(); + params[GUC_CTL_DEBUG] = get_log_control_flags(); /* If GuC submission is enabled, set up additional parameters here */ if (USES_GUC_SUBMISSION(dev_priv)) { - u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; - u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); + u32 ads = intel_guc_ggtt_offset(guc, + guc->ads_vma) >> PAGE_SHIFT; + u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; @@ -301,16 +311,23 @@ void intel_guc_init_params(struct intel_guc *guc) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER); } -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len) +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) { WARN(1, "Unexpected send: action=%#x\n", *action); return -ENODEV; } +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) +{ + WARN(1, "Unexpected event: no suitable handler\n"); +} + /* * This function implements the MMIO based host to GuC interface. */ -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) { struct drm_i915_private *dev_priv = guc_to_i915(guc); u32 status; @@ -320,6 +337,9 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) GEM_BUG_ON(!len); GEM_BUG_ON(len > guc->send_regs.count); + /* We expect only action code */ + GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); + /* If CT is available, we expect to use MMIO only during init/fini */ GEM_BUG_ON(HAS_GUC_CT(dev_priv) && *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && @@ -341,29 +361,74 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) */ ret = __intel_wait_for_register_fw(dev_priv, guc_send_reg(guc, 0), - INTEL_GUC_RECV_MASK, - INTEL_GUC_RECV_MASK, + INTEL_GUC_MSG_TYPE_MASK, + INTEL_GUC_MSG_TYPE_RESPONSE << + INTEL_GUC_MSG_TYPE_SHIFT, 10, 10, &status); - if (status != INTEL_GUC_STATUS_SUCCESS) { - /* - * Either the GuC explicitly returned an error (which - * we convert to -EIO here) or no response at all was - * received within the timeout limit (-ETIMEDOUT) - */ - if (ret != -ETIMEDOUT) - ret = -EIO; - - DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;" - " ret=%d status=0x%08X response=0x%08X\n", - action[0], ret, status, I915_READ(SOFT_SCRATCH(15))); + /* If GuC explicitly returned an error, convert it to -EIO */ + if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) + ret = -EIO; + + if (ret) { + DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;" + " ret=%d status=0x%08X response=0x%08X\n", + action[0], ret, status, + I915_READ(SOFT_SCRATCH(15))); + goto out; } + if (response_buf) { + int count = min(response_buf_size, guc->send_regs.count - 1); + + for (i = 0; i < count; i++) + response_buf[i] = I915_READ(guc_send_reg(guc, i + 1)); + } + + /* Use data from the GuC response as our return value */ + ret = INTEL_GUC_MSG_TO_DATA(status); + +out: intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains); mutex_unlock(&guc->send_mutex); return ret; } +void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 msg, val; + + /* + * Sample the log buffer flush related bits & clear them out now + * itself from the message identity register to minimize the + * probability of losing a flush interrupt, when there are back + * to back flush interrupts. + * There can be a new flush interrupt, for different log buffer + * type (like for ISR), whilst Host is handling one (for DPC). + * Since same bit is used in message register for ISR & DPC, it + * could happen that GuC sets the bit for 2nd interrupt but Host + * clears out the bit on handling the 1st interrupt. + */ + spin_lock(&guc->irq_lock); + val = I915_READ(SOFT_SCRATCH(15)); + msg = val & guc->msg_enabled_mask; + I915_WRITE(SOFT_SCRATCH(15), val & ~msg); + spin_unlock(&guc->irq_lock); + + intel_guc_to_host_process_recv_msg(guc, msg); +} + +void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg) +{ + /* Make sure to handle only enabled messages */ + msg &= guc->msg_enabled_mask; + + if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) + intel_guc_log_handle_flush_event(&guc->log); +} + int intel_guc_sample_forcewake(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -410,7 +475,7 @@ int intel_guc_suspend(struct intel_guc *guc) u32 data[] = { INTEL_GUC_ACTION_ENTER_S_STATE, GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ - guc_ggtt_offset(guc->shared_data) + intel_guc_ggtt_offset(guc, guc->shared_data) }; return intel_guc_send(guc, data, ARRAY_SIZE(data)); @@ -434,7 +499,7 @@ int intel_guc_reset_engine(struct intel_guc *guc, data[3] = 0; data[4] = 0; data[5] = guc->execbuf_client->stage_id; - data[6] = guc_ggtt_offset(guc->shared_data); + data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); return intel_guc_send(guc, data, ARRAY_SIZE(data)); } @@ -448,13 +513,66 @@ int intel_guc_resume(struct intel_guc *guc) u32 data[] = { INTEL_GUC_ACTION_EXIT_S_STATE, GUC_POWER_D0, - guc_ggtt_offset(guc->shared_data) + intel_guc_ggtt_offset(guc, guc->shared_data) }; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } /** + * DOC: GuC Address Space + * + * The layout of GuC address space is shown below: + * + * :: + * + * +==============> +====================+ <== GUC_GGTT_TOP + * ^ | | + * | | | + * | | DRAM | + * | | Memory | + * | | | + * GuC | | + * Address +========> +====================+ <== WOPCM Top + * Space ^ | HW contexts RSVD | + * | | | WOPCM | + * | | +==> +--------------------+ <== GuC WOPCM Top + * | GuC ^ | | + * | GGTT | | | + * | Pin GuC | GuC | + * | Bias WOPCM | WOPCM | + * | | Size | | + * | | | | | + * v v v | | + * +=====+=====+==> +====================+ <== GuC WOPCM Base + * | Non-GuC WOPCM | + * | (HuC/Reserved) | + * +====================+ <== WOPCM Base + * + * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM + * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped + * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and + * actual GuC WOPCM size. + */ + +/** + * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. + * @guc: intel_guc structure. + * + * This function will calculate and initialize the ggtt_pin_bias value based on + * overall WOPCM size and GuC WOPCM size. + */ +void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + + GEM_BUG_ON(!i915->wopcm.size); + GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base); + + guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base; +} + +/** * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage * @guc: the guc * @size: size of area to allocate (both virtual space and memory) @@ -462,7 +580,7 @@ int intel_guc_resume(struct intel_guc *guc) * This is a wrapper to create an object for use with the GuC. In order to * use it inside the GuC, an object needs to be pinned lifetime, so we allocate * both some backing storage and a range inside the Global GTT. We must pin - * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that + * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that * range is reserved inside GuC. * * Return: A i915_vma if successful, otherwise an ERR_PTR. @@ -483,7 +601,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) goto err; ret = i915_vma_pin(vma, 0, PAGE_SIZE, - PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias); if (ret) { vma = ERR_PTR(ret); goto err; @@ -495,14 +613,3 @@ err: i915_gem_object_put(obj); return vma; } - -u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv) -{ - u32 wopcm_size = GUC_WOPCM_TOP; - - /* On BXT, the top of WOPCM is reserved for RC6 context */ - if (IS_GEN9_LP(dev_priv)) - wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED; - - return wopcm_size; -} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index b9424ac644ac..f1265e122d30 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -49,11 +49,16 @@ struct intel_guc { struct intel_guc_log log; struct intel_guc_ct ct; + /* Offset where Non-WOPCM memory starts. */ + u32 ggtt_pin_bias; + /* Log snapshot if GuC errors during load */ struct drm_i915_gem_object *load_err_log; /* intel_guc_recv interrupt related state */ + spinlock_t irq_lock; bool interrupts_enabled; + unsigned int msg_enabled_mask; struct i915_vma *ads_vma; struct i915_vma *stage_desc_pool; @@ -83,7 +88,11 @@ struct intel_guc { struct mutex send_mutex; /* GuC's FW specific send function */ - int (*send)(struct intel_guc *guc, const u32 *data, u32 len); + int (*send)(struct intel_guc *guc, const u32 *data, u32 len, + u32 *response_buf, u32 response_buf_size); + + /* GuC's FW specific event handler function */ + void (*handler)(struct intel_guc *guc); /* GuC's FW specific notify function */ void (*notify)(struct intel_guc *guc); @@ -92,7 +101,14 @@ struct intel_guc { static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) { - return guc->send(guc, action, len); + return guc->send(guc, action, len, NULL, 0); +} + +static inline int +intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + return guc->send(guc, action, len, response_buf, response_buf_size); } static inline void intel_guc_notify(struct intel_guc *guc) @@ -100,17 +116,33 @@ static inline void intel_guc_notify(struct intel_guc *guc) guc->notify(guc); } -/* - * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP), - * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is - * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects - * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM. +static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) +{ + guc->handler(guc); +} + +/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ +#define GUC_GGTT_TOP 0xFEE00000 + +/** + * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma + * @guc: intel_guc structure. + * @vma: i915 graphics virtual memory area. + * + * GuC does not allow any gfx GGTT address that falls into range + * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. + * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from + * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() + * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias. + * + * Return: GGTT offset of the @vma. */ -static inline u32 guc_ggtt_offset(struct i915_vma *vma) +static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, + struct i915_vma *vma) { u32 offset = i915_ggtt_offset(vma); - GEM_BUG_ON(offset < GUC_WOPCM_TOP); + GEM_BUG_ON(offset < guc->ggtt_pin_bias); GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); return offset; @@ -119,17 +151,43 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma) void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_init_params(struct intel_guc *guc); +void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc); int intel_guc_init_wq(struct intel_guc *guc); void intel_guc_fini_wq(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len); -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler(struct intel_guc *guc); +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); +void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc); +void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg); int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); -u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); + +static inline int intel_guc_sanitize(struct intel_guc *guc) +{ + intel_uc_fw_sanitize(&guc->fw); + return 0; +} + +static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask |= mask; + spin_unlock_irq(&guc->irq_lock); +} + +static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask &= ~mask; + spin_unlock_irq(&guc->irq_lock); +} #endif diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index ac627534667d..334cb5202e1c 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -75,7 +75,7 @@ static void guc_policies_init(struct guc_policies *policies) int intel_guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct i915_vma *vma; + struct i915_vma *vma, *kernel_ctx_vma; struct page *page; /* The ads obj includes the struct itself and buffers passed to GuC */ struct { @@ -121,9 +121,9 @@ int intel_guc_ads_create(struct intel_guc *guc) * to find it. Note that we have to skip our header (1 page), * because our GuC shared data is there. */ + kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state; blob->ads.golden_context_lrca = - guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + - skipped_offset; + intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; /* * The GuC expects us to exclude the portion of the context image that @@ -135,7 +135,7 @@ int intel_guc_ads_create(struct intel_guc *guc) blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size; - base = guc_ggtt_offset(vma); + base = intel_guc_ggtt_offset(guc, vma); blob->ads.scheduler_policies = base + ptr_offset(blob, policies); blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index 24ad55752396..990141d5f195 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -24,14 +24,49 @@ #include "i915_drv.h" #include "intel_guc_ct.h" +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__) +#else +#define CT_DEBUG_DRIVER(...) do { } while (0) +#endif + +struct ct_request { + struct list_head link; + u32 fence; + u32 status; + u32 response_len; + u32 *response_buf; +}; + +struct ct_incoming_request { + struct list_head link; + u32 msg[]; +}; + enum { CTB_SEND = 0, CTB_RECV = 1 }; enum { CTB_OWNER_HOST = 0 }; +static void ct_incoming_request_worker_func(struct work_struct *w); + +/** + * intel_guc_ct_init_early - Initialize CT state without requiring device access + * @ct: pointer to CT struct + */ void intel_guc_ct_init_early(struct intel_guc_ct *ct) { /* we're using static channel owners */ ct->host_channel.owner = CTB_OWNER_HOST; + + spin_lock_init(&ct->lock); + INIT_LIST_HEAD(&ct->pending_requests); + INIT_LIST_HEAD(&ct->incoming_requests); + INIT_WORK(&ct->worker, ct_incoming_request_worker_func); +} + +static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) +{ + return container_of(ct, struct intel_guc, ct); } static inline const char *guc_ct_buffer_type_to_str(u32 type) @@ -49,8 +84,8 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type) static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, u32 cmds_addr, u32 size, u32 owner) { - DRM_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", - desc, cmds_addr, size, owner); + CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", + desc, cmds_addr, size, owner); memset(desc, 0, sizeof(*desc)); desc->addr = cmds_addr; desc->size = size; @@ -59,8 +94,8 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) { - DRM_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", - desc, desc->head, desc->tail); + CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", + desc, desc->head, desc->tail); desc->head = 0; desc->tail = 0; desc->is_in_error = 0; @@ -79,7 +114,7 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, int err; /* Can't use generic send(), CT registration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); if (err) DRM_ERROR("CT: register %s buffer failed; err=%d\n", guc_ct_buffer_type_to_str(type), err); @@ -98,7 +133,7 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, int err; /* Can't use generic send(), CT deregistration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action)); + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); if (err) DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", guc_ct_buffer_type_to_str(type), owner, err); @@ -156,7 +191,8 @@ static int ctch_init(struct intel_guc *guc, err = PTR_ERR(blob); goto err_vma; } - DRM_DEBUG_DRIVER("CT: vma base=%#x\n", guc_ggtt_offset(ctch->vma)); + CT_DEBUG_DRIVER("CT: vma base=%#x\n", + intel_guc_ggtt_offset(guc, ctch->vma)); /* store pointers to desc and cmds */ for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { @@ -170,8 +206,8 @@ static int ctch_init(struct intel_guc *guc, err_vma: i915_vma_unpin_and_release(&ctch->vma); err_out: - DRM_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", - ctch->owner, err); + CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", + ctch->owner, err); return err; } @@ -191,8 +227,8 @@ static int ctch_open(struct intel_guc *guc, int err; int i; - DRM_DEBUG_DRIVER("CT: channel %d reopen=%s\n", - ctch->owner, yesno(ctch_is_open(ctch))); + CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n", + ctch->owner, yesno(ctch_is_open(ctch))); if (!ctch->vma) { err = ctch_init(guc, ctch); @@ -202,7 +238,7 @@ static int ctch_open(struct intel_guc *guc, } /* vma should be already allocated and map'ed */ - base = guc_ggtt_offset(ctch->vma); + base = intel_guc_ggtt_offset(guc, ctch->vma); /* (re)initialize descriptors * cmds buffers are in the second half of the blob page @@ -263,10 +299,29 @@ static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) return ++ctch->next_fence; } +/** + * DOC: CTB Host to GuC request + * + * Format of the CTB Host to GuC request message is as follows:: + * + * +------------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | ... | [n-1] | + * +------------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+ + * | | 0 | 1 | ... | n | + * +============+=========+=========+=========+=========+ + * | len >= 1 | FENCE | request specific data | + * +------+-----+---------+---------+---------+---------+ + * + * ^-----------------len-------------------^ + */ + static int ctb_write(struct intel_guc_ct_buffer *ctb, const u32 *action, u32 len /* in dwords */, - u32 fence) + u32 fence, + bool want_response) { struct guc_ct_buffer_desc *desc = ctb->desc; u32 head = desc->head / 4; /* in dwords */ @@ -295,15 +350,21 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, if (unlikely(used + len + 1 >= size)) return -ENOSPC; - /* Write the message. The format is the following: + /* + * Write the message. The format is the following: * DW0: header (including action code) * DW1: fence * DW2+: action data */ header = (len << GUC_CT_MSG_LEN_SHIFT) | (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | + (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | (action[0] << GUC_CT_MSG_ACTION_SHIFT); + CT_DEBUG_DRIVER("CT: writing %*phn %*phn %*phn\n", + 4, &header, 4, &fence, + 4 * (len - 1), &action[1]); + cmds[tail] = header; tail = (tail + 1) % size; @@ -322,16 +383,25 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, return 0; } -/* Wait for the response from the GuC. +/** + * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. + * @desc: buffer descriptor * @fence: response fence * @status: placeholder for status - * return: 0 response received (status is valid) - * -ETIMEDOUT no response within hardcoded timeout - * -EPROTO no response, ct buffer was in error + * + * Guc will update CT buffer descriptor with new fence and status + * after processing the command identified by the fence. Wait for + * specified fence and then read from the descriptor status of the + * command. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + * * -EPROTO no response, CT buffer is in error */ -static int wait_for_response(struct guc_ct_buffer_desc *desc, - u32 fence, - u32 *status) +static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, + u32 fence, + u32 *status) { int err; @@ -363,71 +433,440 @@ static int wait_for_response(struct guc_ct_buffer_desc *desc, return err; } -static int ctch_send(struct intel_guc *guc, +/** + * wait_for_ct_request_update - Wait for CT request state update. + * @req: pointer to pending request + * @status: placeholder for status + * + * For each sent request, Guc shall send bac CT response message. + * Our message handler will update status of tracked request once + * response message with given fence is received. Wait here and + * check for valid response status value. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + */ +static int wait_for_ct_request_update(struct ct_request *req, u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) + DRM_ERROR("CT: fence %u err %d\n", req->fence, err); + + *status = req->status; + return err; +} + +static int ctch_send(struct intel_guc_ct *ct, struct intel_guc_ct_channel *ctch, const u32 *action, u32 len, + u32 *response_buf, + u32 response_buf_size, u32 *status) { struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; struct guc_ct_buffer_desc *desc = ctb->desc; + struct ct_request request; + unsigned long flags; u32 fence; int err; GEM_BUG_ON(!ctch_is_open(ctch)); GEM_BUG_ON(!len); GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); + GEM_BUG_ON(!response_buf && response_buf_size); fence = ctch_get_next_fence(ctch); - err = ctb_write(ctb, action, len, fence); + request.fence = fence; + request.status = 0; + request.response_len = response_buf_size; + request.response_buf = response_buf; + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request.link, &ct->pending_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + err = ctb_write(ctb, action, len, fence, !!response_buf); if (unlikely(err)) - return err; + goto unlink; - intel_guc_notify(guc); + intel_guc_notify(ct_to_guc(ct)); - err = wait_for_response(desc, fence, status); + if (response_buf) + err = wait_for_ct_request_update(&request, status); + else + err = wait_for_ctb_desc_update(desc, fence, status); if (unlikely(err)) - return err; - if (*status != INTEL_GUC_STATUS_SUCCESS) - return -EIO; - return 0; + goto unlink; + + if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { + err = -EIO; + goto unlink; + } + + if (response_buf) { + /* There shall be no data in the status */ + WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); + /* Return actual response len */ + err = request.response_len; + } else { + /* There shall be no response payload */ + WARN_ON(request.response_len); + /* Return data decoded from the status dword */ + err = INTEL_GUC_MSG_TO_DATA(*status); + } + +unlink: + spin_lock_irqsave(&ct->lock, flags); + list_del(&request.link); + spin_unlock_irqrestore(&ct->lock, flags); + + return err; } /* * Command Transport (CT) buffer based GuC send function. */ -static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len) +static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) { - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; + struct intel_guc_ct *ct = &guc->ct; + struct intel_guc_ct_channel *ctch = &ct->host_channel; u32 status = ~0; /* undefined */ - int err; + int ret; mutex_lock(&guc->send_mutex); - err = ctch_send(guc, ctch, action, len, &status); - if (unlikely(err)) { + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, + &status); + if (unlikely(ret < 0)) { DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", - action[0], err, status); + action[0], ret, status); + } else if (unlikely(ret)) { + CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n", + action[0], ret, ret); } mutex_unlock(&guc->send_mutex); - return err; + return ret; +} + +static inline unsigned int ct_header_get_len(u32 header) +{ + return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK; +} + +static inline unsigned int ct_header_get_action(u32 header) +{ + return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK; +} + +static inline bool ct_header_is_response(u32 header) +{ + return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT; +} + +static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) +{ + struct guc_ct_buffer_desc *desc = ctb->desc; + u32 head = desc->head / 4; /* in dwords */ + u32 tail = desc->tail / 4; /* in dwords */ + u32 size = desc->size / 4; /* in dwords */ + u32 *cmds = ctb->cmds; + s32 available; /* in dwords */ + unsigned int len; + unsigned int i; + + GEM_BUG_ON(desc->size % 4); + GEM_BUG_ON(desc->head % 4); + GEM_BUG_ON(desc->tail % 4); + GEM_BUG_ON(tail >= size); + GEM_BUG_ON(head >= size); + + /* tail == head condition indicates empty */ + available = tail - head; + if (unlikely(available == 0)) + return -ENODATA; + + /* beware of buffer wrap case */ + if (unlikely(available < 0)) + available += size; + CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); + GEM_BUG_ON(available < 0); + + data[0] = cmds[head]; + head = (head + 1) % size; + + /* message len with header */ + len = ct_header_get_len(data[0]) + 1; + if (unlikely(len > (u32)available)) { + DRM_ERROR("CT: incomplete message %*phn %*phn %*phn\n", + 4, data, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + return -EPROTO; + } + + for (i = 1; i < len; i++) { + data[i] = cmds[head]; + head = (head + 1) % size; + } + CT_DEBUG_DRIVER("CT: received %*phn\n", 4 * len, data); + + desc->head = head * 4; + return 0; } /** - * Enable buffer based command transport + * DOC: CTB GuC to Host response + * + * Format of the CTB GuC to Host response message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len >= 2 | FENCE | STATUS | response specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + u32 fence; + u32 status; + u32 datalen; + struct ct_request *req; + bool found = false; + + GEM_BUG_ON(!ct_header_is_response(header)); + GEM_BUG_ON(!in_irq()); + + /* Response payload shall at least include fence and status */ + if (unlikely(len < 2)) { + DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg); + return -EPROTO; + } + + fence = msg[1]; + status = msg[2]; + datalen = len - 2; + + /* Format of the status follows RESPONSE message */ + if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { + DRM_ERROR("CT: corrupted response %*phn\n", 4 * msglen, msg); + return -EPROTO; + } + + CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); + + spin_lock(&ct->lock); + list_for_each_entry(req, &ct->pending_requests, link) { + if (unlikely(fence != req->fence)) { + CT_DEBUG_DRIVER("CT: request %u awaits response\n", + req->fence); + continue; + } + if (unlikely(datalen > req->response_len)) { + DRM_ERROR("CT: response %u too long %*phn\n", + req->fence, 4 * msglen, msg); + datalen = 0; + } + if (datalen) + memcpy(req->response_buf, msg + 3, 4 * datalen); + req->response_len = datalen; + WRITE_ONCE(req->status, status); + found = true; + break; + } + spin_unlock(&ct->lock); + + if (!found) + DRM_ERROR("CT: unsolicited response %*phn\n", 4 * msglen, msg); + return 0; +} + +static void ct_process_request(struct intel_guc_ct *ct, + u32 action, u32 len, const u32 *payload) +{ + struct intel_guc *guc = ct_to_guc(ct); + + CT_DEBUG_DRIVER("CT: request %x %*phn\n", action, 4 * len, payload); + + switch (action) { + case INTEL_GUC_ACTION_DEFAULT: + if (unlikely(len < 1)) + goto fail_unexpected; + intel_guc_to_host_process_recv_msg(guc, *payload); + break; + + default: +fail_unexpected: + DRM_ERROR("CT: unexpected request %x %*phn\n", + action, 4 * len, payload); + break; + } +} + +static bool ct_process_incoming_requests(struct intel_guc_ct *ct) +{ + unsigned long flags; + struct ct_incoming_request *request; + u32 header; + u32 *payload; + bool done; + + spin_lock_irqsave(&ct->lock, flags); + request = list_first_entry_or_null(&ct->incoming_requests, + struct ct_incoming_request, link); + if (request) + list_del(&request->link); + done = !!list_empty(&ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + if (!request) + return true; + + header = request->msg[0]; + payload = &request->msg[1]; + ct_process_request(ct, + ct_header_get_action(header), + ct_header_get_len(header), + payload); + + kfree(request); + return done; +} + +static void ct_incoming_request_worker_func(struct work_struct *w) +{ + struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); + bool done; + + done = ct_process_incoming_requests(ct); + if (!done) + queue_work(system_unbound_wq, &ct->worker); +} + +/** + * DOC: CTB GuC to Host request + * + * Format of the CTB GuC to Host request message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len | request specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + struct ct_incoming_request *request; + unsigned long flags; + + GEM_BUG_ON(ct_header_is_response(header)); + + request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC); + if (unlikely(!request)) { + DRM_ERROR("CT: dropping request %*phn\n", 4 * msglen, msg); + return 0; /* XXX: -ENOMEM ? */ + } + memcpy(request->msg, msg, 4 * msglen); + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request->link, &ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + queue_work(system_unbound_wq, &ct->worker); + return 0; +} + +static void ct_process_host_channel(struct intel_guc_ct *ct) +{ + struct intel_guc_ct_channel *ctch = &ct->host_channel; + struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV]; + u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ + int err = 0; + + if (!ctch_is_open(ctch)) + return; + + do { + err = ctb_read(ctb, msg); + if (err) + break; + + if (ct_header_is_response(msg[0])) + err = ct_handle_response(ct, msg); + else + err = ct_handle_request(ct, msg); + } while (!err); + + if (GEM_WARN_ON(err == -EPROTO)) { + DRM_ERROR("CT: corrupted message detected!\n"); + ctb->desc->is_in_error = 1; + } +} + +/* + * When we're communicating with the GuC over CT, GuC uses events + * to notify us about new messages being posted on the RECV buffer. + */ +static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) +{ + struct intel_guc_ct *ct = &guc->ct; + + ct_process_host_channel(ct); +} + +/** + * intel_guc_ct_enable - Enable buffer based command transport. + * @ct: pointer to CT struct + * * Shall only be called for platforms with HAS_GUC_CT. - * @guc: the guc - * return: 0 on success - * non-zero on failure + * + * Return: 0 on success, a negative errno code on failure. */ -int intel_guc_enable_ct(struct intel_guc *guc) +int intel_guc_ct_enable(struct intel_guc_ct *ct) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; + struct intel_guc *guc = ct_to_guc(ct); + struct drm_i915_private *i915 = guc_to_i915(guc); + struct intel_guc_ct_channel *ctch = &ct->host_channel; int err; - GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); + GEM_BUG_ON(!HAS_GUC_CT(i915)); err = ctch_open(guc, ctch); if (unlikely(err)) @@ -435,21 +874,24 @@ int intel_guc_enable_ct(struct intel_guc *guc) /* Switch into cmd transport buffer based send() */ guc->send = intel_guc_send_ct; + guc->handler = intel_guc_to_host_event_handler_ct; DRM_INFO("CT: %s\n", enableddisabled(true)); return 0; } /** - * Disable buffer based command transport. + * intel_guc_ct_disable - Disable buffer based command transport. + * @ct: pointer to CT struct + * * Shall only be called for platforms with HAS_GUC_CT. - * @guc: the guc */ -void intel_guc_disable_ct(struct intel_guc *guc) +void intel_guc_ct_disable(struct intel_guc_ct *ct) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_guc_ct_channel *ctch = &guc->ct.host_channel; + struct intel_guc *guc = ct_to_guc(ct); + struct drm_i915_private *i915 = guc_to_i915(guc); + struct intel_guc_ct_channel *ctch = &ct->host_channel; - GEM_BUG_ON(!HAS_GUC_CT(dev_priv)); + GEM_BUG_ON(!HAS_GUC_CT(i915)); if (!ctch_is_open(ctch)) return; @@ -458,5 +900,6 @@ void intel_guc_disable_ct(struct intel_guc *guc) /* Disable send */ guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; DRM_INFO("CT: %s\n", enableddisabled(false)); } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index 6d97f36fcc62..d774895ab143 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -75,12 +75,22 @@ struct intel_guc_ct_channel { struct intel_guc_ct { struct intel_guc_ct_channel host_channel; /* other channels are tbd */ + + /** @lock: protects pending requests list */ + spinlock_t lock; + + /** @pending_requests: list of requests waiting for response */ + struct list_head pending_requests; + + /** @incoming_requests: list of incoming requests */ + struct list_head incoming_requests; + + /** @worker: worker for handling incoming requests */ + struct work_struct worker; }; void intel_guc_ct_init_early(struct intel_guc_ct *ct); - -/* XXX: move to intel_uc.h ? don't fit there either */ -int intel_guc_enable_ct(struct intel_guc *guc); -void intel_guc_disable_ct(struct intel_guc *guc); +int intel_guc_ct_enable(struct intel_guc_ct *ct); +void intel_guc_ct_disable(struct intel_guc_ct *ct); #endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index d07f2b985f1c..a9e6fcce467c 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -165,7 +165,7 @@ static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ - offset = guc_ggtt_offset(vma) + guc_fw->header_offset; + offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -275,9 +275,8 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) * Called from intel_uc_init_hw() during driver load, resume from sleep and * after a GPU reset. * - * The firmware image should have already been fetched into memory by the - * earlier call to intel_uc_init_fw(), so here we need to only check that - * fetch succeeded, and then transfer the image to the h/w. + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. * * Return: non-zero code on error */ diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 6a10aa6f04d3..d73673f5d30c 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -127,7 +127,7 @@ #define GUC_PROFILE_ENABLED (1 << 7) #define GUC_WQ_TRACK_ENABLED (1 << 8) #define GUC_ADS_ENABLED (1 << 9) -#define GUC_DEBUG_RESERVED (1 << 10) +#define GUC_LOG_DEFAULT_DISABLED (1 << 10) #define GUC_ADS_ADDR_SHIFT 11 #define GUC_ADS_ADDR_MASK 0xfffff800 @@ -327,6 +327,58 @@ struct guc_stage_desc { u64 desc_private; } __packed; +/** + * DOC: CTB based communication + * + * The CTB (command transport buffer) communication between Host and GuC + * is based on u32 data stream written to the shared buffer. One buffer can + * be used to transmit data only in one direction (one-directional channel). + * + * Current status of the each buffer is stored in the buffer descriptor. + * Buffer descriptor holds tail and head fields that represents active data + * stream. The tail field is updated by the data producer (sender), and head + * field is updated by the data consumer (receiver):: + * + * +------------+ + * | DESCRIPTOR | +=================+============+========+ + * +============+ | | MESSAGE(s) | | + * | address |--------->+=================+============+========+ + * +------------+ + * | head | ^-----head--------^ + * +------------+ + * | tail | ^---------tail-----------------^ + * +------------+ + * | size | ^---------------size--------------------^ + * +------------+ + * + * Each message in data stream starts with the single u32 treated as a header, + * followed by optional set of u32 data that makes message specific payload:: + * + * +------------+---------+---------+---------+ + * | MESSAGE | + * +------------+---------+---------+---------+ + * | msg[0] | [1] | ... | [n-1] | + * +------------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+ + * | | 0 | ... | n | + * +======+=====+=========+=========+=========+ + * | 31:16| code| | | | + * +------+-----+ | | | + * | 15:5|flags| | | | + * +------+-----+ | | | + * | 4:0| len| | | | + * +------+-----+---------+---------+---------+ + * + * ^-------------len-------------^ + * + * The message header consists of: + * + * - **len**, indicates length of the message payload (in u32) + * - **code**, indicates message code + * - **flags**, holds various bits to control message handling + */ + /* * Describes single command transport buffer. * Used by both guc-master and clients. @@ -534,16 +586,6 @@ struct guc_log_buffer_state { u32 version; } __packed; -union guc_log_control { - struct { - u32 logging_enabled:1; - u32 reserved1:3; - u32 verbosity:4; - u32 reserved2:24; - }; - u32 value; -} __packed; - struct guc_ctx_report { u32 report_return_status; u32 reserved1[64]; @@ -570,7 +612,68 @@ struct guc_shared_ctx_data { struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; } __packed; -/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ +/** + * DOC: MMIO based communication + * + * The MMIO based communication between Host and GuC uses software scratch + * registers, where first register holds data treated as message header, + * and other registers are used to hold message payload. + * + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8 + * + * +-----------+---------+---------+---------+ + * | MMIO[0] | MMIO[1] | ... | MMIO[n] | + * +-----------+---------+---------+---------+ + * | header | optional payload | + * +======+====+=========+=========+=========+ + * | 31:28|type| | | | + * +------+----+ | | | + * | 27:16|data| | | | + * +------+----+ | | | + * | 15:0|code| | | | + * +------+----+---------+---------+---------+ + * + * The message header consists of: + * + * - **type**, indicates message type + * - **code**, indicates message code, is specific for **type** + * - **data**, indicates message data, optional, depends on **code** + * + * The following message **types** are supported: + * + * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code + * must be priovided in **code** field. Optional action specific parameters + * can be provided in remaining payload registers or **data** field. + * + * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, + * action response status will be provided in **code** field. Optional + * response data can be returned in remaining payload registers or **data** + * field. + */ + +#define INTEL_GUC_MSG_TYPE_SHIFT 28 +#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) +#define INTEL_GUC_MSG_DATA_SHIFT 16 +#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) +#define INTEL_GUC_MSG_CODE_SHIFT 0 +#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) + +#define __INTEL_GUC_MSG_GET(T, m) \ + (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) +#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) +#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) +#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) + +enum intel_guc_msg_type { + INTEL_GUC_MSG_TYPE_REQUEST = 0x0, + INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, +}; + +#define __INTEL_GUC_MSG_TYPE_IS(T, m) \ + (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) +#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) +#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) + enum intel_guc_action { INTEL_GUC_ACTION_DEFAULT = 0x0, INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, @@ -602,24 +705,22 @@ enum intel_guc_report_status { INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, }; -/* - * The GuC sends its response to a command by overwriting the - * command in SS0. The response is distinguishable from a command - * by the fact that all the MASK bits are set. The remaining bits - * give more detail. - */ -#define INTEL_GUC_RECV_MASK ((u32)0xF0000000) -#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK) -#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x)) - -/* GUC will return status back to SOFT_SCRATCH_O_REG */ -enum intel_guc_status { - INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0), - INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10), - INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20), - INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000) +#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) +#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 +#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) +#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) + +enum intel_guc_response_status { + INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, }; +#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ + (typecheck(u32, (m)) && \ + ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ + ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \ + (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT))) + /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ enum intel_guc_recv_message { INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index c0c2e7d1c7d7..401e1704d61e 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -23,12 +23,11 @@ */ #include <linux/debugfs.h> -#include <linux/relay.h> #include "intel_guc_log.h" #include "i915_drv.h" -static void guc_log_capture_logs(struct intel_guc *guc); +static void guc_log_capture_logs(struct intel_guc_log *log); /** * DOC: GuC firmware log @@ -39,7 +38,7 @@ static void guc_log_capture_logs(struct intel_guc *guc); * registers value. */ -static int guc_log_flush_complete(struct intel_guc *guc) +static int guc_action_flush_log_complete(struct intel_guc *guc) { u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE @@ -48,7 +47,7 @@ static int guc_log_flush_complete(struct intel_guc *guc) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -static int guc_log_flush(struct intel_guc *guc) +static int guc_action_flush_log(struct intel_guc *guc) { u32 action[] = { INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, @@ -58,22 +57,40 @@ static int guc_log_flush(struct intel_guc *guc) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -static int guc_log_control(struct intel_guc *guc, bool enable, u32 verbosity) +static int guc_action_control_log(struct intel_guc *guc, bool enable, + bool default_logging, u32 verbosity) { - union guc_log_control control_val = { - { - .logging_enabled = enable, - .verbosity = verbosity, - }, - }; u32 action[] = { INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, - control_val.value + (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | + (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | + (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) }; + GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } +static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) +{ + return container_of(log, struct intel_guc, log); +} + +static void guc_log_enable_flush_events(struct intel_guc_log *log) +{ + intel_guc_enable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + +static void guc_log_disable_flush_events(struct intel_guc_log *log) +{ + intel_guc_disable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + /* * Sub buffer switch callback. Called whenever relay has to switch to a new * sub buffer, relay stays on the same sub buffer if 0 is returned. @@ -121,14 +138,7 @@ static struct dentry *create_buf_file_callback(const char *filename, if (!parent) return NULL; - /* - * Not using the channel filename passed as an argument, since for each - * channel relay appends the corresponding CPU number to the filename - * passed in relay_open(). This should be fine as relay just needs a - * dentry of the file associated with the channel buffer and that file's - * name need not be same as the filename passed as an argument. - */ - buf_file = debugfs_create_file("guc_log", mode, + buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); return buf_file; } @@ -149,59 +159,7 @@ static struct rchan_callbacks relay_callbacks = { .remove_buf_file = remove_buf_file_callback, }; -static int guc_log_relay_file_create(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct dentry *log_dir; - int ret; - - if (!i915_modparams.guc_log_level) - return 0; - - mutex_lock(&guc->log.runtime.relay_lock); - - /* For now create the log file in /sys/kernel/debug/dri/0 dir */ - log_dir = dev_priv->drm.primary->debugfs_root; - - /* - * If /sys/kernel/debug/dri/0 location do not exist, then debugfs is - * not mounted and so can't create the relay file. - * The relay API seems to fit well with debugfs only, for availing relay - * there are 3 requirements which can be met for debugfs file only in a - * straightforward/clean manner :- - * i) Need the associated dentry pointer of the file, while opening the - * relay channel. - * ii) Should be able to use 'relay_file_operations' fops for the file. - * iii) Set the 'i_private' field of file's inode to the pointer of - * relay channel buffer. - */ - if (!log_dir) { - DRM_ERROR("Debugfs dir not available yet for GuC log file\n"); - ret = -ENODEV; - goto out_unlock; - } - - ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir); - if (ret < 0 && ret != -EEXIST) { - DRM_ERROR("Couldn't associate relay chan with file %d\n", ret); - goto out_unlock; - } - - ret = 0; - -out_unlock: - mutex_unlock(&guc->log.runtime.relay_lock); - return ret; -} - -static bool guc_log_has_relay(struct intel_guc *guc) -{ - lockdep_assert_held(&guc->log.runtime.relay_lock); - - return guc->log.runtime.relay_chan != NULL; -} - -static void guc_move_to_next_buf(struct intel_guc *guc) +static void guc_move_to_next_buf(struct intel_guc_log *log) { /* * Make sure the updates made in the sub buffer are visible when @@ -209,21 +167,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc) */ smp_wmb(); - if (!guc_log_has_relay(guc)) - return; - /* All data has been written, so now move the offset of sub buffer. */ - relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size); + relay_reserve(log->relay.channel, log->vma->obj->base.size); /* Switch to the next sub buffer */ - relay_flush(guc->log.runtime.relay_chan); + relay_flush(log->relay.channel); } -static void *guc_get_write_buffer(struct intel_guc *guc) +static void *guc_get_write_buffer(struct intel_guc_log *log) { - if (!guc_log_has_relay(guc)) - return NULL; - /* * Just get the base address of a new sub buffer and copy data into it * ourselves. NULL will be returned in no-overwrite mode, if all sub @@ -233,25 +185,25 @@ static void *guc_get_write_buffer(struct intel_guc *guc) * done without using relay_reserve() along with relay_write(). So its * better to use relay_reserve() alone. */ - return relay_reserve(guc->log.runtime.relay_chan, 0); + return relay_reserve(log->relay.channel, 0); } -static bool guc_check_log_buf_overflow(struct intel_guc *guc, +static bool guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type, unsigned int full_cnt) { - unsigned int prev_full_cnt = guc->log.prev_overflow_count[type]; + unsigned int prev_full_cnt = log->stats[type].sampled_overflow; bool overflow = false; if (full_cnt != prev_full_cnt) { overflow = true; - guc->log.prev_overflow_count[type] = full_cnt; - guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt; + log->stats[type].overflow = full_cnt; + log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; if (full_cnt < prev_full_cnt) { /* buffer_full_cnt is a 4 bit counter */ - guc->log.total_overflow_count[type] += 16; + log->stats[type].sampled_overflow += 16; } DRM_ERROR_RATELIMITED("GuC log buffer overflow\n"); } @@ -275,7 +227,7 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) return 0; } -static void guc_read_update_log_buffer(struct intel_guc *guc) +static void guc_read_update_log_buffer(struct intel_guc_log *log) { unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; @@ -284,16 +236,16 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) void *src_data, *dst_data; bool new_overflow; - if (WARN_ON(!guc->log.runtime.buf_addr)) - return; + mutex_lock(&log->relay.lock); - /* Get the pointer to shared GuC log buffer */ - log_buf_state = src_data = guc->log.runtime.buf_addr; + if (WARN_ON(!intel_guc_log_relay_enabled(log))) + goto out_unlock; - mutex_lock(&guc->log.runtime.relay_lock); + /* Get the pointer to shared GuC log buffer */ + log_buf_state = src_data = log->relay.buf_addr; /* Get the pointer to local buffer to store the logs */ - log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc); + log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); if (unlikely(!log_buf_snapshot_state)) { /* @@ -301,10 +253,9 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) * getting consumed by User at a slow rate. */ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); - guc->log.capture_miss_count++; - mutex_unlock(&guc->log.runtime.relay_lock); + log->relay.full_count++; - return; + goto out_unlock; } /* Actual logs are present from the 2nd page */ @@ -325,8 +276,8 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) full_cnt = log_buf_state_local.buffer_full_cnt; /* Bookkeeping stuff */ - guc->log.flush_count[type] += log_buf_state_local.flush_to_file; - new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt); + log->stats[type].flush += log_buf_state_local.flush_to_file; + new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); /* Update the state of shared log buffer */ log_buf_state->read_ptr = write_offset; @@ -373,38 +324,35 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) dst_data += buffer_size; } - guc_move_to_next_buf(guc); + guc_move_to_next_buf(log); - mutex_unlock(&guc->log.runtime.relay_lock); +out_unlock: + mutex_unlock(&log->relay.lock); } static void capture_logs_work(struct work_struct *work) { - struct intel_guc *guc = - container_of(work, struct intel_guc, log.runtime.flush_work); - - guc_log_capture_logs(guc); -} + struct intel_guc_log *log = + container_of(work, struct intel_guc_log, relay.flush_work); -static bool guc_log_has_runtime(struct intel_guc *guc) -{ - return guc->log.runtime.buf_addr != NULL; + guc_log_capture_logs(log); } -static int guc_log_runtime_create(struct intel_guc *guc) +static int guc_log_map(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); void *vaddr; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&log->relay.lock); - if (!guc->log.vma) + if (!log->vma) return -ENODEV; - GEM_BUG_ON(guc_log_has_runtime(guc)); - - ret = i915_gem_object_set_to_wc_domain(guc->log.vma->obj, true); + mutex_lock(&dev_priv->drm.struct_mutex); + ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true); + mutex_unlock(&dev_priv->drm.struct_mutex); if (ret) return ret; @@ -413,49 +361,40 @@ static int guc_log_runtime_create(struct intel_guc *guc) * buffer pages, so that we can directly get the data * (up-to-date) from memory. */ - vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC); + vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); if (IS_ERR(vaddr)) { DRM_ERROR("Couldn't map log buffer pages %d\n", ret); return PTR_ERR(vaddr); } - guc->log.runtime.buf_addr = vaddr; + log->relay.buf_addr = vaddr; return 0; } -static void guc_log_runtime_destroy(struct intel_guc *guc) +static void guc_log_unmap(struct intel_guc_log *log) { - /* - * It's possible that the runtime stuff was never allocated because - * GuC log was disabled at the boot time. - */ - if (!guc_log_has_runtime(guc)) - return; + lockdep_assert_held(&log->relay.lock); - i915_gem_object_unpin_map(guc->log.vma->obj); - guc->log.runtime.buf_addr = NULL; + i915_gem_object_unpin_map(log->vma->obj); + log->relay.buf_addr = NULL; } -void intel_guc_log_init_early(struct intel_guc *guc) +void intel_guc_log_init_early(struct intel_guc_log *log) { - mutex_init(&guc->log.runtime.relay_lock); - INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work); + mutex_init(&log->relay.lock); + INIT_WORK(&log->relay.flush_work, capture_logs_work); } -int intel_guc_log_relay_create(struct intel_guc *guc) +static int guc_log_relay_create(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; - if (!i915_modparams.guc_log_level) - return 0; - - mutex_lock(&guc->log.runtime.relay_lock); - - GEM_BUG_ON(guc_log_has_relay(guc)); + lockdep_assert_held(&log->relay.lock); /* Keep the size of sub buffers same as shared log buffer */ subbuf_size = GUC_LOG_SIZE; @@ -468,157 +407,56 @@ int intel_guc_log_relay_create(struct intel_guc *guc) */ n_subbufs = 8; - /* - * Create a relay channel, so that we have buffers for storing - * the GuC firmware logs, the channel will be linked with a file - * later on when debugfs is registered. - */ - guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size, - n_subbufs, &relay_callbacks, dev_priv); + guc_log_relay_chan = relay_open("guc_log", + dev_priv->drm.primary->debugfs_root, + subbuf_size, n_subbufs, + &relay_callbacks, dev_priv); if (!guc_log_relay_chan) { DRM_ERROR("Couldn't create relay chan for GuC logging\n"); ret = -ENOMEM; - goto err; + return ret; } GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); - guc->log.runtime.relay_chan = guc_log_relay_chan; - - mutex_unlock(&guc->log.runtime.relay_lock); + log->relay.channel = guc_log_relay_chan; return 0; - -err: - mutex_unlock(&guc->log.runtime.relay_lock); - /* logging will be off */ - i915_modparams.guc_log_level = 0; - return ret; -} - -void intel_guc_log_relay_destroy(struct intel_guc *guc) -{ - mutex_lock(&guc->log.runtime.relay_lock); - - /* - * It's possible that the relay was never allocated because - * GuC log was disabled at the boot time. - */ - if (!guc_log_has_relay(guc)) - goto out_unlock; - - relay_close(guc->log.runtime.relay_chan); - guc->log.runtime.relay_chan = NULL; - -out_unlock: - mutex_unlock(&guc->log.runtime.relay_lock); } -static int guc_log_late_setup(struct intel_guc *guc) +static void guc_log_relay_destroy(struct intel_guc_log *log) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - - if (!guc_log_has_runtime(guc)) { - /* - * If log was disabled at boot time, then setup needed to handle - * log buffer flush interrupts would not have been done yet, so - * do that now. - */ - ret = intel_guc_log_relay_create(guc); - if (ret) - goto err; - - mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); - ret = guc_log_runtime_create(guc); - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - if (ret) - goto err_relay; - } - - ret = guc_log_relay_file_create(guc); - if (ret) - goto err_runtime; - - return 0; + lockdep_assert_held(&log->relay.lock); -err_runtime: - mutex_lock(&dev_priv->drm.struct_mutex); - guc_log_runtime_destroy(guc); - mutex_unlock(&dev_priv->drm.struct_mutex); -err_relay: - intel_guc_log_relay_destroy(guc); -err: - /* logging will remain off */ - i915_modparams.guc_log_level = 0; - return ret; + relay_close(log->relay.channel); + log->relay.channel = NULL; } -static void guc_log_capture_logs(struct intel_guc *guc) +static void guc_log_capture_logs(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); - guc_read_update_log_buffer(guc); + guc_read_update_log_buffer(log); /* * Generally device is expected to be active only at this * time, so get/put should be really quick. */ intel_runtime_pm_get(dev_priv); - guc_log_flush_complete(guc); - intel_runtime_pm_put(dev_priv); -} - -static void guc_flush_logs(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level) - return; - - /* First disable the interrupts, will be renabled afterwards */ - mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); - gen9_disable_guc_interrupts(dev_priv); - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - /* - * Before initiating the forceful flush, wait for any pending/ongoing - * flush to complete otherwise forceful flush may not actually happen. - */ - flush_work(&guc->log.runtime.flush_work); - - /* Ask GuC to update the log buffer state */ - intel_runtime_pm_get(dev_priv); - guc_log_flush(guc); + guc_action_flush_log_complete(guc); intel_runtime_pm_put(dev_priv); - - /* GuC would have updated log buffer by now, so capture it */ - guc_log_capture_logs(guc); } -int intel_guc_log_create(struct intel_guc *guc) +int intel_guc_log_create(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); struct i915_vma *vma; unsigned long offset; u32 flags; int ret; - GEM_BUG_ON(guc->log.vma); - - /* - * We require SSE 4.1 for fast reads from the GuC log buffer and - * it should be present on the chipsets supporting GuC based - * submisssions. - */ - if (WARN_ON(!i915_has_memcpy_from_wc())) { - ret = -EINVAL; - goto err; - } + GEM_BUG_ON(log->vma); vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE); if (IS_ERR(vma)) { @@ -626,13 +464,7 @@ int intel_guc_log_create(struct intel_guc *guc) goto err; } - guc->log.vma = vma; - - if (i915_modparams.guc_log_level) { - ret = guc_log_runtime_create(guc); - if (ret < 0) - goto err_vma; - } + log->vma = vma; /* each allocated unit is a page */ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | @@ -640,117 +472,159 @@ int intel_guc_log_create(struct intel_guc *guc) (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ - guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; + offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT; + log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; return 0; -err_vma: - i915_vma_unpin_and_release(&guc->log.vma); err: /* logging will be off */ i915_modparams.guc_log_level = 0; return ret; } -void intel_guc_log_destroy(struct intel_guc *guc) +void intel_guc_log_destroy(struct intel_guc_log *log) +{ + i915_vma_unpin_and_release(&log->vma); +} + +int intel_guc_log_level_get(struct intel_guc_log *log) { - guc_log_runtime_destroy(guc); - i915_vma_unpin_and_release(&guc->log.vma); + GEM_BUG_ON(!log->vma); + GEM_BUG_ON(i915_modparams.guc_log_level < 0); + + return i915_modparams.guc_log_level; } -int intel_guc_log_control(struct intel_guc *guc, u64 control_val) +int intel_guc_log_level_set(struct intel_guc_log *log, u64 val) { + struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); - bool enable_logging = control_val > 0; - u32 verbosity; int ret; - if (!guc->log.vma) - return -ENODEV; + BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); + GEM_BUG_ON(!log->vma); + GEM_BUG_ON(i915_modparams.guc_log_level < 0); - BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN); - if (control_val > 1 + GUC_LOG_VERBOSITY_MAX) + /* + * GuC is recognizing log levels starting from 0 to max, we're using 0 + * as indication that logging should be disabled. + */ + if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX) return -EINVAL; - /* This combination doesn't make sense & won't have any effect */ - if (!enable_logging && !i915_modparams.guc_log_level) - return 0; + mutex_lock(&dev_priv->drm.struct_mutex); - verbosity = enable_logging ? control_val - 1 : 0; + if (i915_modparams.guc_log_level == val) { + ret = 0; + goto out_unlock; + } - ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); - if (ret) - return ret; intel_runtime_pm_get(dev_priv); - ret = guc_log_control(guc, enable_logging, verbosity); + ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val), + GUC_LOG_LEVEL_IS_ENABLED(val), + GUC_LOG_LEVEL_TO_VERBOSITY(val)); intel_runtime_pm_put(dev_priv); + if (ret) { + DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); + goto out_unlock; + } + + i915_modparams.guc_log_level = val; + +out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); - if (ret < 0) { - DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret); - return ret; - } + return ret; +} - if (enable_logging) { - i915_modparams.guc_log_level = 1 + verbosity; +bool intel_guc_log_relay_enabled(const struct intel_guc_log *log) +{ + return log->relay.buf_addr; +} - /* - * If log was disabled at boot time, then the relay channel file - * wouldn't have been created by now and interrupts also would - * not have been enabled. Try again now, just in case. - */ - ret = guc_log_late_setup(guc); - if (ret < 0) { - DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret); - return ret; - } +int intel_guc_log_relay_open(struct intel_guc_log *log) +{ + int ret; - /* GuC logging is currently the only user of Guc2Host interrupts */ - mutex_lock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_get(dev_priv); - gen9_enable_guc_interrupts(dev_priv); - intel_runtime_pm_put(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - } else { - /* - * Once logging is disabled, GuC won't generate logs & send an - * interrupt. But there could be some data in the log buffer - * which is yet to be captured. So request GuC to update the log - * buffer state and then collect the left over logs. - */ - guc_flush_logs(guc); + mutex_lock(&log->relay.lock); - /* As logging is disabled, update log level to reflect that */ - i915_modparams.guc_log_level = 0; + if (intel_guc_log_relay_enabled(log)) { + ret = -EEXIST; + goto out_unlock; } - return ret; -} + /* + * We require SSE 4.1 for fast reads from the GuC log buffer and + * it should be present on the chipsets supporting GuC based + * submisssions. + */ + if (!i915_has_memcpy_from_wc()) { + ret = -ENXIO; + goto out_unlock; + } -void i915_guc_log_register(struct drm_i915_private *dev_priv) -{ - if (!USES_GUC_SUBMISSION(dev_priv) || !i915_modparams.guc_log_level) - return; + ret = guc_log_relay_create(log); + if (ret) + goto out_unlock; + + ret = guc_log_map(log); + if (ret) + goto out_relay; - guc_log_late_setup(&dev_priv->guc); + mutex_unlock(&log->relay.lock); + + guc_log_enable_flush_events(log); + + /* + * When GuC is logging without us relaying to userspace, we're ignoring + * the flush notification. This means that we need to unconditionally + * flush on relay enabling, since GuC only notifies us once. + */ + queue_work(log->relay.flush_wq, &log->relay.flush_work); + + return 0; + +out_relay: + guc_log_relay_destroy(log); +out_unlock: + mutex_unlock(&log->relay.lock); + + return ret; } -void i915_guc_log_unregister(struct drm_i915_private *dev_priv) +void intel_guc_log_relay_flush(struct intel_guc_log *log) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + + /* + * Before initiating the forceful flush, wait for any pending/ongoing + * flush to complete otherwise forceful flush may not actually happen. + */ + flush_work(&log->relay.flush_work); - if (!USES_GUC_SUBMISSION(dev_priv)) - return; + intel_runtime_pm_get(i915); + guc_action_flush_log(guc); + intel_runtime_pm_put(i915); - mutex_lock(&dev_priv->drm.struct_mutex); - /* GuC logging is currently the only user of Guc2Host interrupts */ - intel_runtime_pm_get(dev_priv); - gen9_disable_guc_interrupts(dev_priv); - intel_runtime_pm_put(dev_priv); + /* GuC would have updated log buffer by now, so capture it */ + guc_log_capture_logs(log); +} - guc_log_runtime_destroy(guc); - mutex_unlock(&dev_priv->drm.struct_mutex); +void intel_guc_log_relay_close(struct intel_guc_log *log) +{ + guc_log_disable_flush_events(log); + flush_work(&log->relay.flush_work); + + mutex_lock(&log->relay.lock); + GEM_BUG_ON(!intel_guc_log_relay_enabled(log)); + guc_log_unmap(log); + guc_log_relay_destroy(log); + mutex_unlock(&log->relay.lock); +} - intel_guc_log_relay_destroy(guc); +void intel_guc_log_handle_flush_event(struct intel_guc_log *log) +{ + queue_work(log->relay.flush_wq, &log->relay.flush_work); } diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h index dab0e949567a..fa80535a6f9d 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.h +++ b/drivers/gpu/drm/i915/intel_guc_log.h @@ -25,11 +25,12 @@ #ifndef _INTEL_GUC_LOG_H_ #define _INTEL_GUC_LOG_H_ +#include <linux/mutex.h> +#include <linux/relay.h> #include <linux/workqueue.h> #include "intel_guc_fwif.h" -struct drm_i915_private; struct intel_guc; /* @@ -39,33 +40,53 @@ struct intel_guc; #define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \ 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT) +/* + * While we're using plain log level in i915, GuC controls are much more... + * "elaborate"? We have a couple of bits for verbosity, separate bit for actual + * log enabling, and separate bit for default logging - which "conveniently" + * ignores the enable bit. + */ +#define GUC_LOG_LEVEL_DISABLED 0 +#define GUC_LOG_LEVEL_NON_VERBOSE 1 +#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED) +#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE) +#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \ + typeof(x) _x = (x); \ + GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \ +}) +#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2) +#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX) + struct intel_guc_log { u32 flags; struct i915_vma *vma; - /* The runtime stuff gets created only when GuC logging gets enabled */ struct { void *buf_addr; struct workqueue_struct *flush_wq; struct work_struct flush_work; - struct rchan *relay_chan; - /* To serialize the access to relay_chan */ - struct mutex relay_lock; - } runtime; + struct rchan *channel; + struct mutex lock; + u32 full_count; + } relay; /* logging related stats */ - u32 capture_miss_count; - u32 flush_interrupt_count; - u32 prev_overflow_count[GUC_MAX_LOG_BUFFER]; - u32 total_overflow_count[GUC_MAX_LOG_BUFFER]; - u32 flush_count[GUC_MAX_LOG_BUFFER]; + struct { + u32 sampled_overflow; + u32 overflow; + u32 flush; + } stats[GUC_MAX_LOG_BUFFER]; }; -int intel_guc_log_create(struct intel_guc *guc); -void intel_guc_log_destroy(struct intel_guc *guc); -void intel_guc_log_init_early(struct intel_guc *guc); -int intel_guc_log_relay_create(struct intel_guc *guc); -void intel_guc_log_relay_destroy(struct intel_guc *guc); -int intel_guc_log_control(struct intel_guc *guc, u64 control_val); -void i915_guc_log_register(struct drm_i915_private *dev_priv); -void i915_guc_log_unregister(struct drm_i915_private *dev_priv); +void intel_guc_log_init_early(struct intel_guc_log *log); +int intel_guc_log_create(struct intel_guc_log *log); +void intel_guc_log_destroy(struct intel_guc_log *log); + +int intel_guc_log_level_get(struct intel_guc_log *log); +int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val); +bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); +int intel_guc_log_relay_open(struct intel_guc_log *log); +void intel_guc_log_relay_flush(struct intel_guc_log *log); +void intel_guc_log_relay_close(struct intel_guc_log *log); + +void intel_guc_log_handle_flush_event(struct intel_guc_log *log); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index 19a9247c5664..d86084742a4a 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -66,22 +66,20 @@ #define UOS_MOVE (1<<4) #define START_DMA (1<<0) #define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340) +#define GUC_WOPCM_OFFSET_VALID (1<<0) #define HUC_LOADING_AGENT_VCR (0<<1) #define HUC_LOADING_AGENT_GUC (1<<1) -#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ +#define GUC_WOPCM_OFFSET_SHIFT 14 +#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT) #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) #define HUC_STATUS2 _MMIO(0xD3B0) #define HUC_FW_VERIFIED (1<<7) -/* Defines WOPCM space available to GuC firmware */ #define GUC_WOPCM_SIZE _MMIO(0xc050) -/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ -#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ -#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ - -/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ -#define GUC_GGTT_TOP 0xFEE00000 +#define GUC_WOPCM_SIZE_LOCKED (1<<0) +#define GUC_WOPCM_SIZE_SHIFT 12 +#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT) #define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 8a8ad2fe158d..207cda062626 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -231,8 +231,8 @@ static int create_doorbell(struct intel_guc_client *client) if (ret) { __destroy_doorbell(client); __update_doorbell_desc(client, GUC_DOORBELL_INVALID); - DRM_ERROR("Couldn't create client %u doorbell: %d\n", - client->stage_id, ret); + DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", + client->stage_id, ret); return ret; } @@ -386,8 +386,8 @@ static void guc_stage_desc_init(struct intel_guc *guc, lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - lrc->ring_lrca = - guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; + lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) + + LRC_STATE_PN * PAGE_SIZE; /* XXX: In direct submission, the GuC wants the HW context id * here. In proxy submission, it wants the stage id @@ -395,7 +395,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); - lrc->ring_begin = guc_ggtt_offset(ce->ring->vma); + lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma); lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; @@ -411,7 +411,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ - gfx_addr = guc_ggtt_offset(client->vma); + gfx_addr = intel_guc_ggtt_offset(guc, client->vma); desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + client->doorbell_offset; desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); @@ -584,7 +584,7 @@ static void inject_preempt_context(struct work_struct *work) data[3] = engine->guc_id; data[4] = guc->execbuf_client->priority; data[5] = guc->execbuf_client->stage_id; - data[6] = guc_ggtt_offset(guc->shared_data); + data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) { execlists_clear_active(&engine->execlists, diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index c8ea510629fa..fd0ffb8328d0 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -246,9 +246,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, BIT(engine->id), - "Kicking stuck wait on %s", - engine->name); + i915_handle_error(dev_priv, BIT(engine->id), 0, + "stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return ENGINE_WAIT_KICK; } @@ -258,8 +257,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) default: return ENGINE_DEAD; case 1: - i915_handle_error(dev_priv, ALL_ENGINES, - "Kicking stuck semaphore on %s", + i915_handle_error(dev_priv, ALL_ENGINES, 0, + "stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); return ENGINE_WAIT_KICK; @@ -386,13 +385,13 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, if (stuck != hung) hung &= ~stuck; len = scnprintf(msg, sizeof(msg), - "%s on ", stuck == hung ? "No progress" : "Hang"); + "%s on ", stuck == hung ? "no progress" : "hang"); for_each_engine_masked(engine, i915, hung, tmp) len += scnprintf(msg + len, sizeof(msg) - len, "%s, ", engine->name); msg[len-2] = '\0'; - return i915_handle_error(i915, hung, "%s", msg); + return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg); } /* diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1baef4ac7ecb..ee929f31f7db 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2082,41 +2082,33 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c * it enables scrambling. This should be called before enabling the HDMI * 2.0 port, as the sink can choose to disable the scrambling if it doesn't * detect a scrambled clock within 100 ms. + * + * Returns: + * True on success, false on failure. */ -void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, +bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, bool scrambling) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_scrambling *sink_scrambling = - &connector->display_info.hdmi.scdc.scrambling; - struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv, - intel_hdmi->ddc_bus); - bool ret; + &connector->display_info.hdmi.scdc.scrambling; + struct i2c_adapter *adapter = + intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); if (!sink_scrambling->supported) - return; - - DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n", - encoder->base.name, connector->name); + return true; - /* Set TMDS bit clock ratio to 1/40 or 1/10 */ - ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio); - if (!ret) { - DRM_ERROR("Set TMDS ratio failed\n"); - return; - } - - /* Enable/disable sink scrambling */ - ret = drm_scdc_set_scrambling(adptr, scrambling); - if (!ret) { - DRM_ERROR("Set sink scrambling failed\n"); - return; - } + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n", + connector->base.id, connector->name, + yesno(scrambling), high_tmds_clock_ratio ? 40 : 10); - DRM_DEBUG_KMS("sink scrambling handled\n"); + /* Set TMDS bit clock ratio to 1/40 or 1/10, and enable/disable scrambling */ + return drm_scdc_set_high_tmds_clock_ratio(adapter, + high_tmds_clock_ratio) && + drm_scdc_set_scrambling(adapter, scrambling); } static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 0e3d3e89d66a..43aa92beff2a 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -100,6 +100,8 @@ enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, if (IS_CNL_WITH_PORT_F(dev_priv)) return PORT_F; return PORT_E; + case HPD_PORT_F: + return PORT_F; default: return PORT_NONE; /* no port for this pin */ } @@ -132,6 +134,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, case PORT_F: if (IS_CNL_WITH_PORT_F(dev_priv)) return HPD_PORT_E; + return HPD_PORT_F; default: MISSING_CASE(port); return HPD_NONE; diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 65e2afb9b955..291285277403 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -55,7 +55,7 @@ int intel_huc_auth(struct intel_huc *huc) return -ENOEXEC; vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + PIN_OFFSET_BIAS | guc->ggtt_pin_bias); if (IS_ERR(vma)) { ret = PTR_ERR(vma); DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret); @@ -63,7 +63,8 @@ int intel_huc_auth(struct intel_huc *huc) } ret = intel_guc_auth_huc(guc, - guc_ggtt_offset(vma) + huc->fw.rsa_offset); + intel_guc_ggtt_offset(guc, vma) + + huc->fw.rsa_offset); if (ret) { DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); goto fail_unpin; @@ -91,3 +92,28 @@ fail: DRM_ERROR("HuC: Authentication failed %d\n", ret); return ret; } + +/** + * intel_huc_check_status() - check HuC status + * @huc: intel_huc structure + * + * This function reads status register to verify if HuC + * firmware was successfully loaded. + * + * Returns positive value if HuC firmware is loaded and verified + * and -ENODEV if HuC is not present. + */ +int intel_huc_check_status(struct intel_huc *huc) +{ + struct drm_i915_private *dev_priv = huc_to_i915(huc); + u32 status; + + if (!HAS_HUC(dev_priv)) + return -ENODEV; + + intel_runtime_pm_get(dev_priv); + status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; + intel_runtime_pm_put(dev_priv); + + return status; +} diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index 5d6e804f9771..aa854907abac 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h @@ -37,5 +37,12 @@ struct intel_huc { void intel_huc_init_early(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); +int intel_huc_check_status(struct intel_huc *huc); + +static inline int intel_huc_sanitize(struct intel_huc *huc) +{ + intel_uc_fw_sanitize(&huc->fw); + return 0; +} #endif diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index c66afa9b989a..f93d2384d482 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -118,7 +118,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Set the source address for the uCode */ - offset = guc_ggtt_offset(vma) + huc_fw->header_offset; + offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) + + huc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -154,9 +155,8 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) * Called from intel_uc_init_hw() during driver load, resume from sleep and * after a GPU reset. Note that HuC must be loaded before GuC. * - * The firmware image should have already been fetched into memory by the - * earlier call to intel_uc_init_fw(), so here we need to only check that - * fetch succeeded, and then transfer the image to the h/w. + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. * * Return: non-zero code on error */ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..f60b61bf8b3b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -454,10 +454,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) desc = execlists_update_context(rq); GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); - GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n", + GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n", engine->name, n, port[n].context_id, count, rq->global_seqno, + intel_engine_get_seqno(engine), rq_prio(rq)); } else { GEM_BUG_ON(!n); @@ -577,6 +578,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * know the next preemption status we see corresponds * to this ELSP update. */ + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); if (port_count(&port[0]) > 1) goto unlock; @@ -738,6 +741,59 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) memset(port, 0, sizeof(*port)); port++; } + + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); +} + +static void clear_gtiir(struct intel_engine_cs *engine) +{ + static const u8 gtiir[] = { + [RCS] = 0, + [BCS] = 0, + [VCS] = 1, + [VCS2] = 1, + [VECS] = 3, + }; + struct drm_i915_private *dev_priv = engine->i915; + int i; + + /* TODO: correctly reset irqs for gen11 */ + if (WARN_ON_ONCE(INTEL_GEN(engine->i915) >= 11)) + return; + + GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); + + /* + * Clear any pending interrupt state. + * + * We do it twice out of paranoia that some of the IIR are + * double buffered, and so if we only reset it once there may + * still be an interrupt pending. + */ + for (i = 0; i < 2; i++) { + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + engine->irq_keep_mask); + POSTING_READ(GEN8_GT_IIR(gtiir[engine->id])); + } + GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) & + engine->irq_keep_mask); +} + +static void reset_irq(struct intel_engine_cs *engine) +{ + /* Mark all CS interrupts as complete */ + smp_store_mb(engine->execlists.active, 0); + synchronize_hardirq(engine->i915->drm.irq); + + clear_gtiir(engine); + + /* + * The port is checked prior to scheduling a tasklet, but + * just in case we have suspended the tasklet to do the + * wedging make sure that when it wakes, it decides there + * is no work to do by clearing the irq_posted bit. + */ + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -767,6 +823,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Cancel the requests on the HW and clear the ELSP tracker. */ execlists_cancel_port_requests(execlists); + reset_irq(engine); spin_lock(&engine->timeline->lock); @@ -805,17 +862,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) spin_unlock(&engine->timeline->lock); - /* - * The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - - /* Mark all CS interrupts as complete */ - execlists->active = 0; - local_irq_restore(flags); } @@ -954,10 +1000,11 @@ static void execlists_submission_tasklet(unsigned long data) EXECLISTS_ACTIVE_USER)); rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n", + GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%d (current %d), prio=%d\n", engine->name, port->context_id, count, rq ? rq->global_seqno : 0, + intel_engine_get_seqno(engine), rq ? rq_prio(rq) : 0); /* Check the context/desc id for this event matches */ @@ -1001,6 +1048,11 @@ static void execlists_submission_tasklet(unsigned long data) if (fw) intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, @@ -1010,12 +1062,16 @@ static void queue_request(struct intel_engine_cs *engine, list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests); } +static void __submit_queue(struct intel_engine_cs *engine, int prio) +{ + engine->execlists.queue_priority = prio; + tasklet_hi_schedule(&engine->execlists.tasklet); +} + static void submit_queue(struct intel_engine_cs *engine, int prio) { - if (prio > engine->execlists.queue_priority) { - engine->execlists.queue_priority = prio; - tasklet_hi_schedule(&engine->execlists.tasklet); - } + if (prio > engine->execlists.queue_priority) + __submit_queue(engine, prio); } static void execlists_submit_request(struct i915_request *request) @@ -1148,7 +1204,10 @@ static void execlists_schedule(struct i915_request *request, int prio) __list_del_entry(&pt->link); queue_request(engine, pt, prio); } - submit_queue(engine, prio); + + if (prio > engine->execlists.queue_priority && + i915_sw_fence_done(&pt_to_request(pt)->submit)) + __submit_queue(engine, prio); } spin_unlock_irq(&engine->timeline->lock); @@ -1215,6 +1274,7 @@ execlists_context_pin(struct intel_engine_cs *engine, ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ce->ring->vma); + ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head; ce->state->obj->pin_global++; i915_gem_context_get(ctx); @@ -1565,14 +1625,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } -static u8 gtiir[] = { - [RCS] = 0, - [BCS] = 0, - [VCS] = 1, - [VCS2] = 1, - [VECS] = 3, -}; - static void enable_execlists(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -1656,31 +1708,6 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) return init_workarounds_ring(engine); } -static void reset_irq(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int i; - - GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); - - /* - * Clear any pending interrupt state. - * - * We do it twice out of paranoia that some of the IIR are double - * buffered, and if we only reset it once there may still be - * an interrupt pending. - */ - for (i = 0; i < 2; i++) { - I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), - GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); - POSTING_READ(GEN8_GT_IIR(gtiir[engine->id])); - } - GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) & - (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift)); - - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); -} - static void reset_common_ring(struct intel_engine_cs *engine, struct i915_request *request) { @@ -1694,8 +1721,6 @@ static void reset_common_ring(struct intel_engine_cs *engine, /* See execlists_cancel_requests() for the irq/spinlock split. */ local_irq_save(flags); - reset_irq(engine); - /* * Catch up with any missed context-switch interrupts. * @@ -1706,15 +1731,13 @@ static void reset_common_ring(struct intel_engine_cs *engine, * requests were completed. */ execlists_cancel_port_requests(execlists); + reset_irq(engine); /* Push back any incomplete requests for replay after the reset. */ spin_lock(&engine->timeline->lock); __unwind_incomplete_requests(engine); spin_unlock(&engine->timeline->lock); - /* Mark all CS interrupts as complete */ - execlists->active = 0; - local_irq_restore(flags); /* @@ -2109,7 +2132,20 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) static inline void logical_ring_default_irqs(struct intel_engine_cs *engine) { - unsigned shift = engine->irq_shift; + unsigned int shift = 0; + + if (INTEL_GEN(engine->i915) < 11) { + const u8 irq_shifts[] = { + [RCS] = GEN8_RCS_IRQ_SHIFT, + [BCS] = GEN8_BCS_IRQ_SHIFT, + [VCS] = GEN8_VCS1_IRQ_SHIFT, + [VCS2] = GEN8_VCS2_IRQ_SHIFT, + [VECS] = GEN8_VECS_IRQ_SHIFT, + }; + + shift = irq_shifts[engine->id]; + } + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 36671a937fa4..c2f10d899329 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -807,6 +807,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ret = PTR_ERR(vma); goto out_pin_section; } + intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB); ret = i915_vma_put_fence(vma); if (ret) diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 1f5cd572a7ff..4f367c16e9e5 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -569,7 +569,8 @@ unlock: static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - uint32_t *val) + uint32_t *val, + bool set_wa) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) *source = INTEL_PIPE_CRC_SOURCE_PF; @@ -582,7 +583,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; case INTEL_PIPE_CRC_SOURCE_PF: - if ((IS_HASWELL(dev_priv) || + if (set_wa && (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && pipe == PIPE_A) hsw_pipe_A_crc_wa(dev_priv, true); @@ -600,7 +601,8 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, - enum intel_pipe_crc_source *source, u32 *val) + enum intel_pipe_crc_source *source, u32 *val, + bool set_wa) { if (IS_GEN2(dev_priv)) return i8xx_pipe_crc_ctl_reg(source, val); @@ -611,7 +613,7 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) return ilk_pipe_crc_ctl_reg(source, val); else - return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val); + return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); } static int pipe_crc_set_source(struct drm_i915_private *dev_priv, @@ -636,7 +638,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv, return -EIO; } - ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val); + ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true); if (ret != 0) goto out; @@ -916,7 +918,7 @@ int intel_pipe_crc_create(struct drm_minor *minor) int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt) { - struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; @@ -934,10 +936,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, return -EIO; } - ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val); + ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true); if (ret != 0) goto out; + pipe_crc->source = source; I915_WRITE(PIPE_CRC_CTL(crtc->index), val); POSTING_READ(PIPE_CRC_CTL(crtc->index)); @@ -959,3 +962,39 @@ out: return ret; } + +void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc) +{ + struct drm_crtc *crtc = &intel_crtc->base; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; + u32 val = 0; + + if (!crtc->crc.opened) + return; + + if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0) + return; + + /* Don't need pipe_crc->lock here, IRQs are not generated. */ + pipe_crc->skipped = 0; + + I915_WRITE(PIPE_CRC_CTL(crtc->index), val); + POSTING_READ(PIPE_CRC_CTL(crtc->index)); +} + +void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc) +{ + struct drm_crtc *crtc = &intel_crtc->base; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; + + /* Swallow crc's until we stop generating them. */ + spin_lock_irq(&pipe_crc->lock); + pipe_crc->skipped = INT_MIN; + spin_unlock_irq(&pipe_crc->lock); + + I915_WRITE(PIPE_CRC_CTL(crtc->index), 0); + POSTING_READ(PIPE_CRC_CTL(crtc->index)); + synchronize_irq(dev_priv->drm.irq); +} diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b8da4dcdd584..19e82aaa9863 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6890,15 +6890,18 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - int min_freq = 15; + const int min_freq = 15; + const int scaling_factor = 180; unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; unsigned int max_gpu_freq, min_gpu_freq; - int scaling_factor = 180; struct cpufreq_policy *policy; WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); + if (rps->max_freq <= rps->min_freq) + return; + policy = cpufreq_cpu_get(0); if (policy) { max_ia_freq = policy->cpuinfo.max_freq; @@ -6918,13 +6921,12 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); + min_gpu_freq = rps->min_freq; + max_gpu_freq = rps->max_freq; if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ - min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER; - max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER; - } else { - min_gpu_freq = rps->min_freq; - max_gpu_freq = rps->max_freq; + min_gpu_freq /= GEN9_FREQ_SCALER; + max_gpu_freq /= GEN9_FREQ_SCALER; } /* @@ -6933,7 +6935,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) * the PCU should use as a reference to determine the ring frequency. */ for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { - int diff = max_gpu_freq - gpu_freq; + const int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 23175c5c4a50..2d53f7398a6d 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -93,7 +93,7 @@ static void psr_aux_io_power_put(struct intel_dp *intel_dp) intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); } -static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp) +static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp) { uint8_t psr_caps = 0; @@ -122,6 +122,18 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) return alpm_caps & DP_ALPM_CAP; } +static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) +{ + u8 val = 0; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) + val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; + else + DRM_ERROR("Unable to get sink synchronization latency\n"); + return val; +} + void intel_psr_init_dpcd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = @@ -130,33 +142,36 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); - if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { + if (intel_dp->psr_dpcd[0]) { dev_priv->psr.sink_support = true; DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); } if (INTEL_GEN(dev_priv) >= 9 && - (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { - uint8_t frame_sync_cap; + (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { + /* + * All panels that supports PSR version 03h (PSR2 + + * Y-coordinate) can handle Y-coordinates in VSC but we are + * only sure that it is going to be used when required by the + * panel. This way panel is capable to do selective update + * without a aux frame sync. + * + * To support PSR version 02h and PSR version 03h without + * Y-coordinate requirement panels we would need to enable + * GTC first. + */ + dev_priv->psr.sink_psr2_support = + intel_dp_get_y_coord_required(intel_dp); + DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support + ? "supported" : "not supported"); - dev_priv->psr.sink_support = true; - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, - &frame_sync_cap) != 1) - frame_sync_cap = 0; - dev_priv->psr.aux_frame_sync = frame_sync_cap & DP_AUX_FRAME_SYNC_CAP; - /* PSR2 needs frame sync as well */ - dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; - DRM_DEBUG_KMS("PSR2 %s on sink", - dev_priv->psr.psr2_support ? "supported" : "not supported"); - - if (dev_priv->psr.psr2_support) { - dev_priv->psr.y_cord_support = - intel_dp_get_y_cord_status(intel_dp); + if (dev_priv->psr.sink_psr2_support) { dev_priv->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); dev_priv->psr.alpm = intel_dp_get_alpm_status(intel_dp); + dev_priv->psr.sink_sync_latency = + intel_dp_get_sink_sync_latency(intel_dp); } } } @@ -193,21 +208,17 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); struct edp_vsc_psr psr_vsc; - if (dev_priv->psr.psr2_support) { + if (dev_priv->psr.psr2_enabled) { /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ memset(&psr_vsc, 0, sizeof(psr_vsc)); psr_vsc.sdp_header.HB0 = 0; psr_vsc.sdp_header.HB1 = 0x7; - if (dev_priv->psr.colorimetry_support && - dev_priv->psr.y_cord_support) { + if (dev_priv->psr.colorimetry_support) { psr_vsc.sdp_header.HB2 = 0x5; psr_vsc.sdp_header.HB3 = 0x13; - } else if (dev_priv->psr.y_cord_support) { + } else { psr_vsc.sdp_header.HB2 = 0x4; psr_vsc.sdp_header.HB3 = 0xe; - } else { - psr_vsc.sdp_header.HB2 = 0x3; - psr_vsc.sdp_header.HB3 = 0xc; } } else { /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ @@ -228,31 +239,12 @@ static void vlv_psr_enable_sink(struct intel_dp *intel_dp) DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); } -static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, - enum port port) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DP_AUX_CH_CTL(port); - else - return EDP_PSR_AUX_CTL; -} - -static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, - enum port port, int index) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DP_AUX_CH_DATA(port, index); - else - return EDP_PSR_AUX_DATA(index); -} - -static void hsw_psr_enable_sink(struct intel_dp *intel_dp) +static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t aux_clock_divider; - i915_reg_t aux_ctl_reg; + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + u32 aux_clock_divider, aux_ctl; + int i; static const uint8_t aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, @@ -260,41 +252,47 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) [3] = 1 - 1, [4] = DP_SET_POWER_D0, }; - enum port port = dig_port->base.port; - u32 aux_ctl; - int i; + u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | + EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | + EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | + EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; BUILD_BUG_ON(sizeof(aux_msg) > 20); + for (i = 0; i < sizeof(aux_msg); i += 4) + I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), + intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); - /* Enable AUX frame sync at sink */ - if (dev_priv->psr.aux_frame_sync) - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, - DP_AUX_FRAME_SYNC_ENABLE); + /* Start with bits set for DDI_AUX_CTL register */ + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_clock_divider); + + /* Select only valid bits for SRD_AUX_CTL */ + aux_ctl &= psr_aux_mask; + I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); +} + +static void hsw_psr_enable_sink(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + u8 dpcd_val = DP_PSR_ENABLE; + /* Enable ALPM at sink for psr2 */ - if (dev_priv->psr.psr2_support && dev_priv->psr.alpm) + if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm) drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, DP_ALPM_ENABLE); - if (dev_priv->psr.link_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE); - aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); - - /* Setup AUX registers */ - for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), - intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); + if (dev_priv->psr.psr2_enabled) + dpcd_val |= DP_PSR_ENABLE_PSR2; + if (dev_priv->psr.link_standby) + dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); - aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), - aux_clock_divider); - I915_WRITE(aux_ctl_reg, aux_ctl); + drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } static void vlv_psr_enable_source(struct intel_dp *intel_dp, @@ -396,25 +394,17 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * with the 5 or 6 idle patterns. */ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - uint32_t val; - uint8_t sink_latency; - - val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; /* FIXME: selective update is probably totally broken because it doesn't * mesh at all with our frontbuffer tracking. And the hw alone isn't * good enough. */ - val |= EDP_PSR2_ENABLE | - EDP_SU_TRACK_ENABLE; - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_SYNCHRONIZATION_LATENCY_IN_SINK, - &sink_latency) == 1) { - sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK; - } else { - sink_latency = 0; + val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + val |= EDP_Y_COORDINATE_VALID | EDP_Y_COORDINATE_ENABLE; } - val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1); + + val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) val |= EDP_PSR2_TP2_TIME_2500; @@ -440,7 +430,7 @@ static void hsw_psr_activate(struct intel_dp *intel_dp) */ /* psr1 and psr2 are mutually exclusive.*/ - if (dev_priv->psr.psr2_support) + if (dev_priv->psr.psr2_enabled) hsw_activate_psr2(intel_dp); else hsw_activate_psr1(intel_dp); @@ -460,7 +450,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * dynamically during PSR enable, and extracted from sink * caps during eDP detection. */ - if (!dev_priv->psr.psr2_support) + if (!dev_priv->psr.sink_psr2_support) return false; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { @@ -478,15 +468,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - /* - * FIXME:enable psr2 only for y-cordinate psr2 panels - * After gtc implementation , remove this restriction. - */ - if (!dev_priv->psr.y_cord_support) { - DRM_DEBUG_KMS("PSR2 not enabled, panel does not support Y coordinate\n"); - return false; - } - return true; } @@ -568,7 +549,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - if (dev_priv->psr.psr2_support) + if (dev_priv->psr.psr2_enabled) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); else WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); @@ -586,14 +567,24 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp, struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 chicken; psr_aux_io_power_get(intel_dp); - if (dev_priv->psr.psr2_support) { - chicken = PSR2_VSC_ENABLE_PROG_HEADER; - if (dev_priv->psr.y_cord_support) - chicken |= PSR2_ADD_VERTICAL_LINE_COUNT; + /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ + * use hardcoded values PSR AUX transactions + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + hsw_psr_setup_aux(intel_dp); + + if (dev_priv->psr.psr2_enabled) { + u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); + + if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) + chicken |= (PSR2_VSC_ENABLE_PROG_HEADER + | PSR2_ADD_VERTICAL_LINE_COUNT); + + else + chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); I915_WRITE(EDP_PSR_DEBUG, @@ -644,7 +635,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, goto unlock; } - dev_priv->psr.psr2_support = crtc_state->has_psr2; + dev_priv->psr.psr2_enabled = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.setup_vsc(intel_dp, crtc_state); @@ -714,12 +705,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, i915_reg_t psr_status; u32 psr_status_mask; - if (dev_priv->psr.aux_frame_sync) - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, - 0); - - if (dev_priv->psr.psr2_support) { + if (dev_priv->psr.psr2_enabled) { psr_status = EDP_PSR2_STATUS; psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; @@ -743,7 +729,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, dev_priv->psr.active = false; } else { - if (dev_priv->psr.psr2_support) + if (dev_priv->psr.psr2_enabled) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); else WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); @@ -803,7 +789,7 @@ static void intel_psr_work(struct work_struct *work) * and be ready for re-enable. */ if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_support) { + if (dev_priv->psr.psr2_enabled) { if (intel_wait_for_register(dev_priv, EDP_PSR2_STATUS, EDP_PSR2_STATUS_STATE_MASK, @@ -862,11 +848,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) return; if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.aux_frame_sync) - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, - 0); - if (dev_priv->psr.psr2_support) { + if (dev_priv->psr.psr2_enabled) { val = I915_READ(EDP_PSR2_CTL); WARN_ON(!(val & EDP_PSR2_ENABLE)); I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); @@ -957,6 +939,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, * intel_psr_invalidate - Invalidade PSR * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits + * @origin: which operation caused the invalidate * * Since the hardware frontbuffer tracking has gaps we need to integrate * with the software frontbuffer tracking. This function gets called every @@ -966,7 +949,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." */ void intel_psr_invalidate(struct drm_i915_private *dev_priv, - unsigned frontbuffer_bits) + unsigned frontbuffer_bits, enum fb_op_origin origin) { struct drm_crtc *crtc; enum pipe pipe; @@ -974,6 +957,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; + if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + return; + mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { mutex_unlock(&dev_priv->psr.lock); @@ -1014,6 +1000,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; + if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + return; + mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { mutex_unlock(&dev_priv->psr.lock); @@ -1027,8 +1016,23 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; /* By definition flush = invalidate + flush */ - if (frontbuffer_bits) - intel_psr_exit(dev_priv); + if (frontbuffer_bits) { + if (dev_priv->psr.psr2_enabled || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + intel_psr_exit(dev_priv); + } else { + /* + * Display WA #0884: all + * This documented WA for bxt can be safely applied + * broadly so we can force HW tracking to exit PSR + * instead of disabling and re-enabling. + * Workaround tells us to write 0 to CUR_SURFLIVE_A, + * but it makes more sense write to the current active + * pipe. + */ + I915_WRITE(CURSURFLIVE(pipe), 0); + } + } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) if (!work_busy(&dev_priv->psr.work.work)) @@ -1090,6 +1094,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.activate = vlv_psr_activate; dev_priv->psr.setup_vsc = vlv_psr_setup_vsc; } else { + dev_priv->psr.has_hw_tracking = true; dev_priv->psr.enable_source = hsw_psr_enable_source; dev_priv->psr.disable_source = hsw_psr_disable; dev_priv->psr.enable_sink = hsw_psr_enable_sink; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1d599524a759..04d9d9a946a7 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1593,6 +1593,7 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) if (intel_ring_update_space(ring) >= bytes) return 0; + GEM_BUG_ON(list_empty(&ring->request_list)); list_for_each_entry(target, &ring->request_list, ring_link) { /* Would completion of this request free enough space? */ if (bytes <= __intel_ring_space(target->postfix, @@ -1692,17 +1693,18 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) need_wrap &= ~1; GEM_BUG_ON(need_wrap > ring->space); GEM_BUG_ON(ring->emit + need_wrap > ring->size); + GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); /* Fill the tail with MI_NOOP */ - memset(ring->vaddr + ring->emit, 0, need_wrap); - ring->emit = 0; + memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); ring->space -= need_wrap; + ring->emit = 0; } GEM_BUG_ON(ring->emit > ring->size - bytes); GEM_BUG_ON(ring->space < bytes); cs = ring->vaddr + ring->emit; - GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes)); + GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); ring->emit += bytes; ring->space -= bytes; @@ -1943,8 +1945,6 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, static void intel_ring_init_irq(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine) { - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift; - if (INTEL_GEN(dev_priv) >= 6) { engine->irq_enable = gen6_irq_enable; engine->irq_disable = gen6_irq_disable; @@ -2029,6 +2029,8 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) if (HAS_L3_DPF(dev_priv)) engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; @@ -2079,7 +2081,6 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) engine->emit_flush = gen6_bsd_ring_flush; engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; } else { - engine->mmio_base = BSD_RING_BASE; engine->emit_flush = bsd_ring_flush; if (IS_GEN5(dev_priv)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 0320c2c4cfba..a02c7b3b9d55 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -7,9 +7,11 @@ #include "i915_gem_batch_pool.h" #include "i915_gem_timeline.h" +#include "i915_reg.h" #include "i915_pmu.h" #include "i915_request.h" #include "i915_selftest.h" +#include "intel_gpu_commands.h" struct drm_printer; @@ -84,7 +86,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) } #define I915_MAX_SLICES 3 -#define I915_MAX_SUBSLICES 3 +#define I915_MAX_SUBSLICES 8 #define instdone_slice_mask(dev_priv__) \ (INTEL_GEN(dev_priv__) == 7 ? \ @@ -330,7 +332,6 @@ struct intel_engine_cs { u8 instance; u32 context_size; u32 mmio_base; - unsigned int irq_shift; struct intel_ring *buffer; struct intel_timeline *timeline; @@ -939,7 +940,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, struct intel_wait *wait); void intel_engine_remove_wait(struct intel_engine_cs *engine, struct intel_wait *wait); -void intel_engine_enable_signaling(struct i915_request *request, bool wakeup); +bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup); void intel_engine_cancel_signaling(struct i915_request *request); static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index e5bf0d37bf43..1cffaf7b5dbe 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -69,13 +69,15 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv) static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) { - int guc_log_level = 0; /* disabled */ + int guc_log_level; - /* Enable if we're running on platform with GuC and debug config */ - if (HAS_GUC(dev_priv) && intel_uc_is_using_guc() && - (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || - IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))) - guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX; + if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc()) + guc_log_level = GUC_LOG_LEVEL_DISABLED; + else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + guc_log_level = GUC_LOG_LEVEL_MAX; + else + guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE; /* Any platform specific fine-tuning can be done here */ @@ -83,7 +85,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) } /** - * intel_uc_sanitize_options - sanitize uC related modparam options + * sanitize_options_early - sanitize uC related modparam options * @dev_priv: device private * * In case of "enable_guc" option this function will attempt to modify @@ -99,7 +101,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) * unless GuC is enabled on given platform and the driver is compiled with * debug config when this modparam will default to "enable(1..4)". */ -void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) +static void sanitize_options_early(struct drm_i915_private *dev_priv) { struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; @@ -142,51 +144,53 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) i915_modparams.guc_log_level = 0; } - if (i915_modparams.guc_log_level > 1 + GUC_LOG_VERBOSITY_MAX) { + if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "guc_log_level", i915_modparams.guc_log_level, "verbosity too high"); - i915_modparams.guc_log_level = 1 + GUC_LOG_VERBOSITY_MAX; + i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX; } - DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s verbosity:%d)\n", + DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n", i915_modparams.guc_log_level, yesno(i915_modparams.guc_log_level), - i915_modparams.guc_log_level - 1); + yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)), + GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level)); /* Make sure that sanitization was done */ GEM_BUG_ON(i915_modparams.enable_guc < 0); GEM_BUG_ON(i915_modparams.guc_log_level < 0); } -void intel_uc_init_early(struct drm_i915_private *dev_priv) +void intel_uc_init_early(struct drm_i915_private *i915) { - intel_guc_init_early(&dev_priv->guc); - intel_huc_init_early(&dev_priv->huc); -} + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; -void intel_uc_init_fw(struct drm_i915_private *dev_priv) -{ - if (!USES_GUC(dev_priv)) - return; + intel_guc_init_early(guc); + intel_huc_init_early(huc); - if (USES_HUC(dev_priv)) - intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw); + sanitize_options_early(i915); - intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw); + if (USES_GUC(i915)) + intel_uc_fw_fetch(i915, &guc->fw); + + if (USES_HUC(i915)) + intel_uc_fw_fetch(i915, &huc->fw); } -void intel_uc_fini_fw(struct drm_i915_private *dev_priv) +void intel_uc_cleanup_early(struct drm_i915_private *i915) { - if (!USES_GUC(dev_priv)) - return; + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; - intel_uc_fw_fini(&dev_priv->guc.fw); + if (USES_HUC(i915)) + intel_uc_fw_fini(&huc->fw); - if (USES_HUC(dev_priv)) - intel_uc_fw_fini(&dev_priv->huc.fw); + if (USES_GUC(i915)) + intel_uc_fw_fini(&guc->fw); - guc_free_load_err_log(&dev_priv->guc); + guc_free_load_err_log(guc); } /** @@ -223,10 +227,13 @@ static int guc_enable_communication(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); + gen9_enable_guc_interrupts(dev_priv); + if (HAS_GUC_CT(dev_priv)) - return intel_guc_enable_ct(guc); + return intel_guc_ct_enable(&guc->ct); guc->send = intel_guc_send_mmio; + guc->handler = intel_guc_to_host_event_handler_mmio; return 0; } @@ -235,9 +242,12 @@ static void guc_disable_communication(struct intel_guc *guc) struct drm_i915_private *dev_priv = guc_to_i915(guc); if (HAS_GUC_CT(dev_priv)) - intel_guc_disable_ct(guc); + intel_guc_ct_disable(&guc->ct); + + gen9_disable_guc_interrupts(dev_priv); guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; } int intel_uc_init_misc(struct drm_i915_private *dev_priv) @@ -248,24 +258,13 @@ int intel_uc_init_misc(struct drm_i915_private *dev_priv) if (!USES_GUC(dev_priv)) return 0; - ret = intel_guc_init_wq(guc); - if (ret) { - DRM_ERROR("Couldn't allocate workqueues for GuC\n"); - goto err; - } + intel_guc_init_ggtt_pin_bias(guc); - ret = intel_guc_log_relay_create(guc); - if (ret) { - DRM_ERROR("Couldn't allocate relay for GuC log\n"); - goto err_relay; - } + ret = intel_guc_init_wq(guc); + if (ret) + return ret; return 0; - -err_relay: - intel_guc_fini_wq(guc); -err: - return ret; } void intel_uc_fini_misc(struct drm_i915_private *dev_priv) @@ -276,8 +275,6 @@ void intel_uc_fini_misc(struct drm_i915_private *dev_priv) return; intel_guc_fini_wq(guc); - - intel_guc_log_relay_destroy(guc); } int intel_uc_init(struct drm_i915_private *dev_priv) @@ -325,6 +322,24 @@ void intel_uc_fini(struct drm_i915_private *dev_priv) intel_guc_fini(guc); } +void intel_uc_sanitize(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; + + if (!USES_GUC(i915)) + return; + + GEM_BUG_ON(!HAS_GUC(i915)); + + guc_disable_communication(guc); + + intel_huc_sanitize(huc); + intel_guc_sanitize(guc); + + __intel_uc_reset_hw(i915); +} + int intel_uc_init_hw(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; @@ -336,14 +351,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) GEM_BUG_ON(!HAS_GUC(dev_priv)); - guc_disable_communication(guc); gen9_reset_guc_interrupts(dev_priv); - /* init WOPCM */ - I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv)); - I915_WRITE(DMA_GUC_WOPCM_OFFSET, - GUC_WOPCM_OFFSET_VALUE | HUC_LOADING_AGENT_GUC); - /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ if (IS_GEN9(dev_priv)) @@ -390,12 +399,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) } if (USES_GUC_SUBMISSION(dev_priv)) { - if (i915_modparams.guc_log_level) - gen9_enable_guc_interrupts(dev_priv); - ret = intel_guc_submission_enable(guc); if (ret) - goto err_interrupts; + goto err_communication; } dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n", @@ -410,8 +416,6 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) /* * We've failed to load the firmware :( */ -err_interrupts: - gen9_disable_guc_interrupts(dev_priv); err_communication: guc_disable_communication(guc); err_log_capture: @@ -441,9 +445,6 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) intel_guc_submission_disable(guc); guc_disable_communication(guc); - - if (USES_GUC_SUBMISSION(dev_priv)) - gen9_disable_guc_interrupts(dev_priv); } int intel_uc_suspend(struct drm_i915_private *i915) @@ -479,8 +480,7 @@ int intel_uc_resume(struct drm_i915_private *i915) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - if (i915_modparams.guc_log_level) - gen9_enable_guc_interrupts(i915); + gen9_enable_guc_interrupts(i915); err = intel_guc_resume(guc); if (err) { diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index f76d51d1ce70..25d73ada74ae 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -28,13 +28,12 @@ #include "intel_huc.h" #include "i915_params.h" -void intel_uc_sanitize_options(struct drm_i915_private *dev_priv); void intel_uc_init_early(struct drm_i915_private *dev_priv); +void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); void intel_uc_init_mmio(struct drm_i915_private *dev_priv); -void intel_uc_init_fw(struct drm_i915_private *dev_priv); -void intel_uc_fini_fw(struct drm_i915_private *dev_priv); int intel_uc_init_misc(struct drm_i915_private *dev_priv); void intel_uc_fini_misc(struct drm_i915_private *dev_priv); +void intel_uc_sanitize(struct drm_i915_private *dev_priv); int intel_uc_init_hw(struct drm_i915_private *dev_priv); void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_uc_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index 3ec0ce505b76..6e8e0b546743 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -95,15 +95,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); - /* Header and uCode will be loaded to WOPCM */ - size = uc_fw->header_size + uc_fw->ucode_size; - if (size > intel_guc_wopcm_size(dev_priv)) { - DRM_WARN("%s: Firmware is too large to fit in WOPCM\n", - intel_uc_fw_type_repr(uc_fw->type)); - err = -E2BIG; - goto fail; - } - /* now RSA */ if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) { DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n", @@ -209,6 +200,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct i915_vma *vma)) { struct i915_vma *vma; + u32 ggtt_pin_bias; int err; DRM_DEBUG_DRIVER("%s fw load %s\n", @@ -230,8 +222,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, goto fail; } + ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias; vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + PIN_OFFSET_BIAS | ggtt_pin_bias); if (IS_ERR(vma)) { err = PTR_ERR(vma); DRM_DEBUG_DRIVER("%s fw ggtt-pin err=%d\n", diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h index d5fd4609c785..dc33b12394de 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/intel_uc_fw.h @@ -115,6 +115,28 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) return uc_fw->path != NULL; } +static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) +{ + if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS) + uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; +} + +/** + * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded. + * @uc_fw: uC firmware. + * + * Get the size of the firmware and header that will be uploaded to WOPCM. + * + * Return: Upload firmware size, or zero on firmware fetch failure. + */ +static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) +{ + if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + return 0; + + return uc_fw->header_size + uc_fw->ucode_size; +} + void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4df7c2ef8576..e7540bb9786c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -62,6 +62,11 @@ static inline void fw_domain_reset(struct drm_i915_private *i915, const struct intel_uncore_forcewake_domain *d) { + /* + * We don't really know if the powerwell for the forcewake domain we are + * trying to reset here does exist at this point (engines could be fused + * off in ICL+), so no waiting for acks + */ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); } @@ -1353,6 +1358,23 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, fw_domain_reset(dev_priv, d); } +static void fw_domain_fini(struct drm_i915_private *dev_priv, + enum forcewake_domain_id domain_id) +{ + struct intel_uncore_forcewake_domain *d; + + if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) + return; + + d = &dev_priv->uncore.fw_domain[domain_id]; + + WARN_ON(d->wake_count); + WARN_ON(hrtimer_cancel(&d->timer)); + memset(d, 0, sizeof(*d)); + + dev_priv->uncore.fw_domains &= ~BIT(domain_id); +} + static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) @@ -1565,6 +1587,40 @@ void intel_uncore_init(struct drm_i915_private *dev_priv) &dev_priv->uncore.pmic_bus_access_nb); } +/* + * We might have detected that some engines are fused off after we initialized + * the forcewake domains. Prune them, to make sure they only reference existing + * engines. + */ +void intel_uncore_prune(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 11) { + enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains; + enum forcewake_domain_id domain_id; + int i; + + for (i = 0; i < I915_MAX_VCS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; + + if (HAS_ENGINE(dev_priv, _VCS(i))) + continue; + + if (fw_domains & BIT(domain_id)) + fw_domain_fini(dev_priv, domain_id); + } + + for (i = 0; i < I915_MAX_VECS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; + + if (HAS_ENGINE(dev_priv, _VECS(i))) + continue; + + if (fw_domains & BIT(domain_id)) + fw_domain_fini(dev_priv, domain_id); + } + } +} + void intel_uncore_fini(struct drm_i915_private *dev_priv) { /* Paranoia: make sure we have disabled everything before we exit. */ @@ -1646,11 +1702,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) const i915_reg_t mode = RING_MI_MODE(base); I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register_fw(dev_priv, - mode, - MODE_IDLE, - MODE_IDLE, - 500)) + if (__intel_wait_for_register_fw(dev_priv, + mode, MODE_IDLE, MODE_IDLE, + 500, 0, + NULL)) DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); @@ -1804,9 +1859,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); /* Wait for the device to ack the reset requests */ - err = intel_wait_for_register_fw(dev_priv, - GEN6_GDRST, hw_domain_mask, 0, - 500); + err = __intel_wait_for_register_fw(dev_priv, + GEN6_GDRST, hw_domain_mask, 0, + 500, 0, + NULL); if (err) DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", hw_domain_mask); @@ -1940,7 +1996,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, u32 reg_value; int ret; - might_sleep(); + might_sleep_if(slow_timeout_ms); spin_lock_irq(&dev_priv->uncore.lock); intel_uncore_forcewake_get__locked(dev_priv, fw); @@ -1952,7 +2008,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, intel_uncore_forcewake_put__locked(dev_priv, fw); spin_unlock_irq(&dev_priv->uncore.lock); - if (ret) + if (ret && slow_timeout_ms) ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), (reg_value & mask) == value, slow_timeout_ms * 1000, 10, 1000); @@ -1971,11 +2027,12 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine) I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - ret = intel_wait_for_register_fw(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700); + ret = __intel_wait_for_register_fw(dev_priv, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700, 0, + NULL); if (ret) DRM_ERROR("%s: reset request timeout\n", engine->name); @@ -2038,15 +2095,31 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) int retry; int ret; - might_sleep(); + /* + * We want to perform per-engine reset from atomic context (e.g. + * softirq), which imposes the constraint that we cannot sleep. + * However, experience suggests that spending a bit of time waiting + * for a reset helps in various cases, so for a full-device reset + * we apply the opposite rule and wait if we want to. As we should + * always follow up a failed per-engine reset with a full device reset, + * being a little faster, stricter and more error prone for the + * atomic case seems an acceptable compromise. + * + * Unfortunately this leads to a bimodal routine, when the goal was + * to have a single reset function that worked for resetting any + * number of engines simultaneously. + */ + might_sleep_if(engine_mask == ALL_ENGINES); - /* If the power well sleeps during the reset, the reset + /* + * If the power well sleeps during the reset, the reset * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); for (retry = 0; retry < 3; retry++) { - /* We stop engines, otherwise we might get failed reset and a + /* + * We stop engines, otherwise we might get failed reset and a * dead gpu (on elk). Also as modern gpu as kbl can suffer * from system hang if batchbuffer is progressing when * the reset is issued, regardless of READY_TO_RESET ack. @@ -2060,9 +2133,11 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) i915_stop_engines(dev_priv, engine_mask); ret = -ENODEV; - if (reset) + if (reset) { + GEM_TRACE("engine_mask=%x\n", engine_mask); ret = reset(dev_priv, engine_mask); - if (ret != -ETIMEDOUT) + } + if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) break; cond_resched(); diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index dfdf444e4bcc..47478d609630 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -140,6 +140,7 @@ struct intel_uncore { void intel_uncore_sanitize(struct drm_i915_private *dev_priv); void intel_uncore_init(struct drm_i915_private *dev_priv); +void intel_uncore_prune(struct drm_i915_private *dev_priv); bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); void intel_uncore_fini(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c new file mode 100644 index 000000000000..74bf76f3fddc --- /dev/null +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -0,0 +1,275 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#include "intel_wopcm.h" +#include "i915_drv.h" + +/** + * DOC: WOPCM Layout + * + * The layout of the WOPCM will be fixed after writing to GuC WOPCM size and + * offset registers whose values are calculated and determined by HuC/GuC + * firmware size and set of hardware requirements/restrictions as shown below: + * + * :: + * + * +=========> +====================+ <== WOPCM Top + * ^ | HW contexts RSVD | + * | +===> +====================+ <== GuC WOPCM Top + * | ^ | | + * | | | | + * | | | | + * | GuC | | + * | WOPCM | | + * | Size +--------------------+ + * WOPCM | | GuC FW RSVD | + * | | +--------------------+ + * | | | GuC Stack RSVD | + * | | +------------------- + + * | v | GuC WOPCM RSVD | + * | +===> +====================+ <== GuC WOPCM base + * | | WOPCM RSVD | + * | +------------------- + <== HuC Firmware Top + * v | HuC FW | + * +=========> +====================+ <== WOPCM Base + * + * GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top. + * The top part of the WOPCM is reserved for hardware contexts (e.g. RC6 + * context). + */ + +/* Default WOPCM size 1MB. */ +#define GEN9_WOPCM_SIZE (1024 * 1024) +/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */ +#define WOPCM_RESERVED_SIZE (16 * 1024) + +/* 16KB reserved at the beginning of GuC WOPCM. */ +#define GUC_WOPCM_RESERVED (16 * 1024) +/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */ +#define GUC_WOPCM_STACK_RESERVED (8 * 1024) + +/* GuC WOPCM Offset value needs to be aligned to 16KB. */ +#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT) + +/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */ +#define BXT_WOPCM_RC6_CTX_RESERVED (24 * 1024) +/* 36KB WOPCM reserved at the end of WOPCM on CNL. */ +#define CNL_WOPCM_HW_CTX_RESERVED (36 * 1024) + +/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */ +#define GEN9_GUC_FW_RESERVED (128 * 1024) +#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED) + +/** + * intel_wopcm_init_early() - Early initialization of the WOPCM. + * @wopcm: pointer to intel_wopcm. + * + * Setup the size of WOPCM which will be used by later on WOPCM partitioning. + */ +void intel_wopcm_init_early(struct intel_wopcm *wopcm) +{ + wopcm->size = GEN9_WOPCM_SIZE; + + DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024); +} + +static inline u32 context_reserved_size(struct drm_i915_private *i915) +{ + if (IS_GEN9_LP(i915)) + return BXT_WOPCM_RC6_CTX_RESERVED; + else if (INTEL_GEN(i915) >= 10) + return CNL_WOPCM_HW_CTX_RESERVED; + else + return 0; +} + +static inline int gen9_check_dword_gap(u32 guc_wopcm_base, u32 guc_wopcm_size) +{ + u32 offset; + + /* + * GuC WOPCM size shall be at least a dword larger than the offset from + * WOPCM base (GuC WOPCM offset from WOPCM base + GEN9_GUC_WOPCM_OFFSET) + * due to hardware limitation on Gen9. + */ + offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET; + if (offset > guc_wopcm_size || + (guc_wopcm_size - offset) < sizeof(u32)) { + DRM_ERROR("GuC WOPCM size %uKiB is too small. %uKiB needed.\n", + guc_wopcm_size / 1024, + (u32)(offset + sizeof(u32)) / 1024); + return -E2BIG; + } + + return 0; +} + +static inline int gen9_check_huc_fw_fits(u32 guc_wopcm_size, u32 huc_fw_size) +{ + /* + * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM + * size to be larger than or equal to HuC firmware size. Otherwise, + * firmware uploading would fail. + */ + if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) { + DRM_ERROR("HuC FW (%uKiB) won't fit in GuC WOPCM (%uKiB).\n", + huc_fw_size / 1024, + (guc_wopcm_size - GUC_WOPCM_RESERVED) / 1024); + return -E2BIG; + } + + return 0; +} + +static inline int check_hw_restriction(struct drm_i915_private *i915, + u32 guc_wopcm_base, u32 guc_wopcm_size, + u32 huc_fw_size) +{ + int err = 0; + + if (IS_GEN9(i915)) + err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size); + + if (!err && + (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0))) + err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size); + + return err; +} + +/** + * intel_wopcm_init() - Initialize the WOPCM structure. + * @wopcm: pointer to intel_wopcm. + * + * This function will partition WOPCM space based on GuC and HuC firmware sizes + * and will allocate max remaining for use by GuC. This function will also + * enforce platform dependent hardware restrictions on GuC WOPCM offset and + * size. It will fail the WOPCM init if any of these checks were failed, so that + * the following GuC firmware uploading would be aborted. + * + * Return: 0 on success, non-zero error code on failure. + */ +int intel_wopcm_init(struct intel_wopcm *wopcm) +{ + struct drm_i915_private *i915 = wopcm_to_i915(wopcm); + u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw); + u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw); + u32 ctx_rsvd = context_reserved_size(i915); + u32 guc_wopcm_base; + u32 guc_wopcm_size; + u32 guc_wopcm_rsvd; + int err; + + GEM_BUG_ON(!wopcm->size); + + if (guc_fw_size >= wopcm->size) { + DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.", + guc_fw_size / 1024); + return -E2BIG; + } + + if (huc_fw_size >= wopcm->size) { + DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.", + huc_fw_size / 1024); + return -E2BIG; + } + + guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE, + GUC_WOPCM_OFFSET_ALIGNMENT); + if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) { + DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n", + guc_wopcm_base / 1024); + return -E2BIG; + } + + guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd; + guc_wopcm_size &= GUC_WOPCM_SIZE_MASK; + + DRM_DEBUG_DRIVER("Calculated GuC WOPCM Region: [%uKiB, %uKiB)\n", + guc_wopcm_base / 1024, guc_wopcm_size / 1024); + + guc_wopcm_rsvd = GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED; + if ((guc_fw_size + guc_wopcm_rsvd) > guc_wopcm_size) { + DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n", + (guc_fw_size + guc_wopcm_rsvd) / 1024, + guc_wopcm_size / 1024); + return -E2BIG; + } + + err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size, + huc_fw_size); + if (err) + return err; + + wopcm->guc.base = guc_wopcm_base; + wopcm->guc.size = guc_wopcm_size; + + return 0; +} + +static inline int write_and_verify(struct drm_i915_private *dev_priv, + i915_reg_t reg, u32 val, u32 mask, + u32 locked_bit) +{ + u32 reg_val; + + GEM_BUG_ON(val & ~mask); + + I915_WRITE(reg, val); + + reg_val = I915_READ(reg); + + return (reg_val & mask) != (val | locked_bit) ? -EIO : 0; +} + +/** + * intel_wopcm_init_hw() - Setup GuC WOPCM registers. + * @wopcm: pointer to intel_wopcm. + * + * Setup the GuC WOPCM size and offset registers with the calculated values. It + * will verify the register values to make sure the registers are locked with + * correct values. + * + * Return: 0 on success. -EIO if registers were locked with incorrect values. + */ +int intel_wopcm_init_hw(struct intel_wopcm *wopcm) +{ + struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm); + u32 huc_agent; + u32 mask; + int err; + + if (!USES_GUC(dev_priv)) + return 0; + + GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!wopcm->guc.size); + GEM_BUG_ON(!wopcm->guc.base); + + err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size, + GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED, + GUC_WOPCM_SIZE_LOCKED); + if (err) + goto err_out; + + huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0; + mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; + err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET, + wopcm->guc.base | huc_agent, mask, + GUC_WOPCM_OFFSET_VALID); + if (err) + goto err_out; + + return 0; + +err_out: + DRM_ERROR("Failed to init WOPCM registers:\n"); + DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n", + I915_READ(DMA_GUC_WOPCM_OFFSET)); + DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE)); + + return err; +} diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h new file mode 100644 index 000000000000..6298910a384c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#ifndef _INTEL_WOPCM_H_ +#define _INTEL_WOPCM_H_ + +#include <linux/types.h> + +/** + * struct intel_wopcm - Overall WOPCM info and WOPCM regions. + * @size: Size of overall WOPCM. + * @guc: GuC WOPCM Region info. + * @guc.base: GuC WOPCM base which is offset from WOPCM base. + * @guc.size: Size of the GuC WOPCM region. + */ +struct intel_wopcm { + u32 size; + struct { + u32 base; + u32 size; + } guc; +}; + +void intel_wopcm_init_early(struct intel_wopcm *wopcm); +int intel_wopcm_init(struct intel_wopcm *wopcm); +int intel_wopcm_init_hw(struct intel_wopcm *wopcm); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 9a48aa441743..d16d74178e9d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -14,6 +14,7 @@ selftest(fence, i915_sw_fence_mock_selftests) selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) +selftest(engine, intel_engine_cs_mock_selftests) selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) selftest(timelines, i915_gem_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c new file mode 100644 index 000000000000..cfaa6b296835 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2018 Intel Corporation + */ + +#include "../i915_selftest.h" + +static int intel_mmio_bases_check(void *arg) +{ + int i, j; + + for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { + const struct engine_info *info = &intel_engines[i]; + char name[INTEL_ENGINE_CS_MAX_NAME]; + u8 prev = U8_MAX; + + __sprint_engine_name(name, info); + + for (j = 0; j < MAX_MMIO_BASES; j++) { + u8 gen = info->mmio_bases[j].gen; + u32 base = info->mmio_bases[j].base; + + if (gen >= prev) { + pr_err("%s: %s: mmio base for gen %x " + "is before the one for gen %x\n", + __func__, name, prev, gen); + return -EINVAL; + } + + if (gen == 0) + break; + + if (!base) { + pr_err("%s: %s: invalid mmio base (%x) " + "for gen %x at entry %u\n", + __func__, name, base, gen, j); + return -EINVAL; + } + + prev = gen; + } + + pr_info("%s: min gen supported for %s = %d\n", + __func__, name, prev); + } + + return 0; +} + +int intel_engine_cs_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_mmio_bases_check), + }; + + return i915_subtests(tests, NULL); +} diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index df7898c8edcb..9e4e0ad62724 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -25,6 +25,7 @@ #include <linux/kthread.h> #include "../i915_selftest.h" +#include "i915_random.h" #include "mock_context.h" #include "mock_drm.h" @@ -260,8 +261,11 @@ static void wedge_me(struct work_struct *work) { struct wedge_me *w = container_of(work, typeof(*w), work.work); - pr_err("%pS timed out, cancelling all further testing.\n", - w->symbol); + pr_err("%pS timed out, cancelling all further testing.\n", w->symbol); + + GEM_TRACE("%pS timed out.\n", w->symbol); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(w->i915); } @@ -433,7 +437,7 @@ static int igt_global_reset(void *arg) mutex_lock(&i915->drm.struct_mutex); reset_count = i915_reset_count(&i915->gpu_error); - i915_reset(i915, I915_RESET_QUIET); + i915_reset(i915); if (i915_reset_count(&i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); @@ -483,6 +487,8 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { + u32 seqno = intel_engine_get_seqno(engine); + if (active) { struct i915_request *rq; @@ -511,14 +517,15 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } + GEM_BUG_ON(!rq->global_seqno); + seqno = rq->global_seqno - 1; i915_request_put(rq); } engine->hangcheck.stalled = true; - engine->hangcheck.seqno = - intel_engine_get_seqno(engine); + engine->hangcheck.seqno = seqno; - err = i915_reset_engine(engine, I915_RESET_QUIET); + err = i915_reset_engine(engine, NULL); if (err) { pr_err("i915_reset_engine failed\n"); break; @@ -573,11 +580,25 @@ static int igt_reset_active_engine(void *arg) return __igt_reset_engine(arg, true); } +struct active_engine { + struct task_struct *task; + struct intel_engine_cs *engine; + unsigned long resets; + unsigned int flags; +}; + +#define TEST_ACTIVE BIT(0) +#define TEST_OTHERS BIT(1) +#define TEST_SELF BIT(2) +#define TEST_PRIORITY BIT(3) + static int active_engine(void *data) { - struct intel_engine_cs *engine = data; - struct i915_request *rq[2] = {}; - struct i915_gem_context *ctx[2]; + I915_RND_STATE(prng); + struct active_engine *arg = data; + struct intel_engine_cs *engine = arg->engine; + struct i915_request *rq[8] = {}; + struct i915_gem_context *ctx[ARRAY_SIZE(rq)]; struct drm_file *file; unsigned long count = 0; int err = 0; @@ -586,25 +607,20 @@ static int active_engine(void *data) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&engine->i915->drm.struct_mutex); - ctx[0] = live_context(engine->i915, file); - mutex_unlock(&engine->i915->drm.struct_mutex); - if (IS_ERR(ctx[0])) { - err = PTR_ERR(ctx[0]); - goto err_file; - } - - mutex_lock(&engine->i915->drm.struct_mutex); - ctx[1] = live_context(engine->i915, file); - mutex_unlock(&engine->i915->drm.struct_mutex); - if (IS_ERR(ctx[1])) { - err = PTR_ERR(ctx[1]); - i915_gem_context_put(ctx[0]); - goto err_file; + for (count = 0; count < ARRAY_SIZE(ctx); count++) { + mutex_lock(&engine->i915->drm.struct_mutex); + ctx[count] = live_context(engine->i915, file); + mutex_unlock(&engine->i915->drm.struct_mutex); + if (IS_ERR(ctx[count])) { + err = PTR_ERR(ctx[count]); + while (--count) + i915_gem_context_put(ctx[count]); + goto err_file; + } } while (!kthread_should_stop()) { - unsigned int idx = count++ & 1; + unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1); struct i915_request *old = rq[idx]; struct i915_request *new; @@ -616,14 +632,28 @@ static int active_engine(void *data) break; } + if (arg->flags & TEST_PRIORITY) + ctx[idx]->priority = + i915_prandom_u32_max_state(512, &prng); + rq[idx] = i915_request_get(new); i915_request_add(new); mutex_unlock(&engine->i915->drm.struct_mutex); if (old) { - i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT); + if (i915_request_wait(old, 0, HZ) < 0) { + GEM_TRACE("%s timed out.\n", engine->name); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(engine->i915); + i915_request_put(old); + err = -EIO; + break; + } i915_request_put(old); } + + cond_resched(); } for (count = 0; count < ARRAY_SIZE(rq); count++) @@ -634,8 +664,9 @@ err_file: return err; } -static int __igt_reset_engine_others(struct drm_i915_private *i915, - bool active) +static int __igt_reset_engines(struct drm_i915_private *i915, + const char *test_name, + unsigned int flags) { struct intel_engine_cs *engine, *other; enum intel_engine_id id, tmp; @@ -649,50 +680,61 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915, if (!intel_has_reset_engine(i915)) return 0; - if (active) { + if (flags & TEST_ACTIVE) { mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); mutex_unlock(&i915->drm.struct_mutex); if (err) return err; + + if (flags & TEST_PRIORITY) + h.ctx->priority = 1024; } for_each_engine(engine, i915, id) { - struct task_struct *threads[I915_NUM_ENGINES] = {}; - unsigned long resets[I915_NUM_ENGINES]; + struct active_engine threads[I915_NUM_ENGINES] = {}; unsigned long global = i915_reset_count(&i915->gpu_error); - unsigned long count = 0; + unsigned long count = 0, reported; IGT_TIMEOUT(end_time); - if (active && !intel_engine_can_store_dword(engine)) + if (flags & TEST_ACTIVE && + !intel_engine_can_store_dword(engine)) continue; memset(threads, 0, sizeof(threads)); for_each_engine(other, i915, tmp) { struct task_struct *tsk; - resets[tmp] = i915_reset_engine_count(&i915->gpu_error, - other); + threads[tmp].resets = + i915_reset_engine_count(&i915->gpu_error, + other); + + if (!(flags & TEST_OTHERS)) + continue; - if (other == engine) + if (other == engine && !(flags & TEST_SELF)) continue; - tsk = kthread_run(active_engine, other, + threads[tmp].engine = other; + threads[tmp].flags = flags; + + tsk = kthread_run(active_engine, &threads[tmp], "igt/%s", other->name); if (IS_ERR(tsk)) { err = PTR_ERR(tsk); goto unwind; } - threads[tmp] = tsk; + threads[tmp].task = tsk; get_task_struct(tsk); } set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); do { - if (active) { - struct i915_request *rq; + u32 seqno = intel_engine_get_seqno(engine); + struct i915_request *rq = NULL; + if (flags & TEST_ACTIVE) { mutex_lock(&i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { @@ -718,33 +760,38 @@ static int __igt_reset_engine_others(struct drm_i915_private *i915, break; } - i915_request_put(rq); + GEM_BUG_ON(!rq->global_seqno); + seqno = rq->global_seqno - 1; } engine->hangcheck.stalled = true; - engine->hangcheck.seqno = - intel_engine_get_seqno(engine); + engine->hangcheck.seqno = seqno; - err = i915_reset_engine(engine, I915_RESET_QUIET); + err = i915_reset_engine(engine, NULL); if (err) { - pr_err("i915_reset_engine(%s:%s) failed, err=%d\n", - engine->name, active ? "active" : "idle", err); + pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", + engine->name, test_name, err); break; } engine->hangcheck.stalled = false; count++; + + if (rq) { + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + } } while (time_before(jiffies, end_time)); clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); pr_info("i915_reset_engine(%s:%s): %lu resets\n", - engine->name, active ? "active" : "idle", count); - - if (i915_reset_engine_count(&i915->gpu_error, engine) - - resets[engine->id] != (active ? count : 0)) { - pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", - engine->name, active ? "active" : "idle", count, - i915_reset_engine_count(&i915->gpu_error, - engine) - resets[engine->id]); + engine->name, test_name, count); + + reported = i915_reset_engine_count(&i915->gpu_error, engine); + reported -= threads[engine->id].resets; + if (reported != (flags & TEST_ACTIVE ? count : 0)) { + pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu, expected %lu reported\n", + engine->name, test_name, count, reported, + (flags & TEST_ACTIVE ? count : 0)); if (!err) err = -EINVAL; } @@ -753,24 +800,26 @@ unwind: for_each_engine(other, i915, tmp) { int ret; - if (!threads[tmp]) + if (!threads[tmp].task) continue; - ret = kthread_stop(threads[tmp]); + ret = kthread_stop(threads[tmp].task); if (ret) { pr_err("kthread for other engine %s failed, err=%d\n", other->name, ret); if (!err) err = ret; } - put_task_struct(threads[tmp]); + put_task_struct(threads[tmp].task); - if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error, - other)) { + if (other != engine && + threads[tmp].resets != + i915_reset_engine_count(&i915->gpu_error, other)) { pr_err("Innocent engine %s was reset (count=%ld)\n", other->name, i915_reset_engine_count(&i915->gpu_error, - other) - resets[tmp]); + other) - + threads[tmp].resets); if (!err) err = -EINVAL; } @@ -794,7 +843,7 @@ unwind: if (i915_terminally_wedged(&i915->gpu_error)) err = -EIO; - if (active) { + if (flags & TEST_ACTIVE) { mutex_lock(&i915->drm.struct_mutex); hang_fini(&h); mutex_unlock(&i915->drm.struct_mutex); @@ -803,14 +852,42 @@ unwind: return err; } -static int igt_reset_idle_engine_others(void *arg) +static int igt_reset_engines(void *arg) { - return __igt_reset_engine_others(arg, false); -} + static const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "idle", 0 }, + { "active", TEST_ACTIVE }, + { "others-idle", TEST_OTHERS }, + { "others-active", TEST_OTHERS | TEST_ACTIVE }, + { + "others-priority", + TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY + }, + { + "self-priority", + TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF, + }, + { } + }; + struct drm_i915_private *i915 = arg; + typeof(*phases) *p; + int err; -static int igt_reset_active_engine_others(void *arg) -{ - return __igt_reset_engine_others(arg, true); + for (p = phases; p->name; p++) { + if (p->flags & TEST_PRIORITY) { + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + continue; + } + + err = __igt_reset_engines(arg, p->name, p->flags); + if (err) + return err; + } + + return 0; } static u32 fake_hangcheck(struct i915_request *rq) @@ -865,7 +942,6 @@ static int igt_wait_reset(void *arg) __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_reset(i915, 0); i915_gem_set_wedged(i915); err = -EIO; @@ -962,7 +1038,6 @@ static int igt_reset_queue(void *arg) i915_request_put(rq); i915_request_put(prev); - i915_reset(i915, 0); i915_gem_set_wedged(i915); err = -EIO; @@ -971,7 +1046,7 @@ static int igt_reset_queue(void *arg) reset_count = fake_hangcheck(prev); - i915_reset(i915, I915_RESET_QUIET); + i915_reset(i915); GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags)); @@ -1069,7 +1144,6 @@ static int igt_handle_error(void *arg) __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_reset(i915, 0); i915_gem_set_wedged(i915); err = -EIO; @@ -1084,7 +1158,7 @@ static int igt_handle_error(void *arg) engine->hangcheck.stalled = true; engine->hangcheck.seqno = intel_engine_get_seqno(engine); - i915_handle_error(i915, intel_engine_flag(engine), "%s", __func__); + i915_handle_error(i915, intel_engine_flag(engine), 0, NULL); xchg(&i915->gpu_error.first_error, error); @@ -1112,8 +1186,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_hang_sanitycheck), SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), - SUBTEST(igt_reset_idle_engine_others), - SUBTEST(igt_reset_active_engine_others), + SUBTEST(igt_reset_engines), SUBTEST(igt_wait_reset), SUBTEST(igt_reset_queue), SUBTEST(igt_handle_error), @@ -1129,6 +1202,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) err = i915_subtests(tests, i915); + mutex_lock(&i915->drm.struct_mutex); + flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + i915_modparams.enable_hangcheck = saved_hangcheck; intel_runtime_pm_put(i915); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 62903bae0221..91c9bcd4196f 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -478,6 +478,7 @@ # define DP_PSR_FRAME_CAPTURE (1 << 3) # define DP_PSR_SELECTIVE_UPDATE (1 << 4) # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) +# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -794,6 +795,15 @@ # define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4) # define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4 +#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */ +# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */ +# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */ +# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */ +# define DP_SU_VALID (1 << 3) /* eDP 1.4 */ +# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */ +# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */ +# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */ + #define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ # define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) |