diff options
Diffstat (limited to 'drivers/gpu')
150 files changed, 6432 insertions, 5840 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d68b73ff92d2..8f05e28607e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1012,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, if (r) return r; - if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; - if (!parser->ctx->preamble_presented) { - parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; - parser->ctx->preamble_presented = true; - } - } + if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) + parser->job->preamble_status |= + AMDGPU_PREAMBLE_IB_PRESENT; if (parser->ring && parser->ring != ring) return -EINVAL; @@ -1207,26 +1203,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, int r; + job = p->job; + p->job = NULL; + + r = drm_sched_job_init(&job->base, entity, p->filp); + if (r) + goto error_unlock; + + /* No memory allocation is allowed while holding the mn lock */ amdgpu_mn_lock(p->mn); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = e->robj; if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { - amdgpu_mn_unlock(p->mn); - return -ERESTARTSYS; + r = -ERESTARTSYS; + goto error_abort; } } - job = p->job; - p->job = NULL; - - r = drm_sched_job_init(&job->base, entity, p->filp); - if (r) { - amdgpu_job_free(job); - amdgpu_mn_unlock(p->mn); - return r; - } - job->owner = p->filp; p->fence = dma_fence_get(&job->base.s_fence->finished); @@ -1241,6 +1235,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_cs_post_dependencies(p); + if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && + !p->ctx->preamble_presented) { + job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; + p->ctx->preamble_presented = true; + } + cs->out.handle = seq; job->uf_sequence = seq; @@ -1258,6 +1258,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_mn_unlock(p->mn); return 0; + +error_abort: + dma_fence_put(&job->base.s_fence->finished); + job->base.s_fence = NULL; + +error_unlock: + amdgpu_job_free(job); + amdgpu_mn_unlock(p->mn); + return r; } int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 5518e623fed2..51b5e977ca88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return r; } + need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || + (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; dma_fence_put(tmp); @@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } skip_preamble = ring->current_ctx == fence_ctx; - need_ctx_switch = ring->current_ctx != fence_ctx; if (job && ring->funcs->emit_cntxcntl) { if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8f98629fbe59..7b4e657a95c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - mutex_lock(&adev->pm.mutex); - /* update battery/ac status */ - if (power_supply_is_system_supplied() > 0) - adev->pm.ac_power = true; - else - adev->pm.ac_power = false; - mutex_unlock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ece0ac703e27..b17771dd5ce7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is validated on next vm use to avoid fault. * */ list_move_tail(&base->vm_status, &vm->evicted); + base->moved = true; } /** @@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, uint64_t addr; int r; - addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; if (pte_support_ats) { @@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; + addr = amdgpu_bo_gpu_offset(bo); if (ats_entries) { uint64_t ats_value; @@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer - * @vm_size: the default vm size if it's set auto + * @min_vm_size: the minimum vm size in GB if it's set auto * @fragment_size_default: Default PTE fragment size * @max_level: max VMPT level * @max_bits: max address space size in bits * */ -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits) { + unsigned int max_size = 1 << (max_bits - 30); + unsigned int vm_size; uint64_t tmp; /* adjust vm size first */ if (amdgpu_vm_size != -1) { - unsigned max_size = 1 << (max_bits - 30); - vm_size = amdgpu_vm_size; if (vm_size > max_size) { dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", amdgpu_vm_size, max_size); vm_size = max_size; } + } else { + struct sysinfo si; + unsigned int phys_ram_gb; + + /* Optimal VM size depends on the amount of physical + * RAM available. Underlying requirements and + * assumptions: + * + * - Need to map system memory and VRAM from all GPUs + * - VRAM from other GPUs not known here + * - Assume VRAM <= system memory + * - On GFX8 and older, VM space can be segmented for + * different MTYPEs + * - Need to allow room for fragmentation, guard pages etc. + * + * This adds up to a rough guess of system memory x3. + * Round up to power of two to maximize the available + * VM size with the given page table size. + */ + si_meminfo(&si); + phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + + (1 << 30) - 1) >> 30; + vm_size = roundup_pow_of_two( + min(max(phys_ram_gb * 3, min_vm_size), max_size)); } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 67a15d439ac0..9fa9df0c5e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); -void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5cd45210113f..5a9534a82d40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->enter_safe_mode(adev); switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: @@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle, default: break; } - + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GFX_DMG)) + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 75317f283c69..ad151fefa41f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) amdgpu_gart_table_vram_unpin(adev); } -static void gmc_v6_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, u32 addr, u32 mc_client) { @@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v6_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 36dc367c4b45..f8d8a3a73e42 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) } /** - * gmc_v7_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - -/** * gmc_v7_0_vm_decode_fault - print human readable fault info * * @adev: amdgpu_device pointer @@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v7_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 70fc97b59b4f..9333109b210d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) } /** - * gmc_v8_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - -/** * gmc_v8_0_vm_decode_fault - print human readable fault info * * @adev: amdgpu_device pointer @@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); kfree(adev->gmc.vm_fault_info); - gmc_v8_0_gart_fini(adev); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 399a5db27649..72f8018fa2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle) return 0; } -/** - * gmc_v9_0_gart_fini - vm fini callback - * - * @adev: amdgpu_device pointer - * - * Tears down the driver GART/VM setup (CIK). - */ -static void gmc_v9_0_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); -} - static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); - gmc_v9_0_gart_fini(adev); /* * TODO: @@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle) */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); + amdgpu_gart_fini(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 3f57f6463dc8..cb79a93c2eb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, int min_temp, int max_temp); static int kv_init_fps_limits(struct amdgpu_device *adev); -static void kv_dpm_powergate_uvd(void *handle, bool gate); -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); @@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev) return ret; } - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); - if (adev->irq.installed && amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); @@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev) static void kv_dpm_disable(struct amdgpu_device *adev) { + struct kv_power_info *pi = kv_get_pi(adev); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, @@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev) /* powerup blocks */ kv_dpm_powergate_acp(adev, false); kv_dpm_powergate_samu(adev, false); - kv_dpm_powergate_vce(adev, false); - kv_dpm_powergate_uvd(adev, false); + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (pi->caps_uvd_pg) /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); kv_enable_smc_cac(adev, false); kv_enable_didt(adev, false); @@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, int ret; if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - kv_dpm_powergate_vce(adev, false); if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - kv_dpm_powergate_vce(adev, true); } return 0; @@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) } } -static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) +static void kv_dpm_powergate_vce(void *handle, bool gate) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->vce_power_gated == gate) - return; + int ret; pi->vce_power_gated = gate; - if (!pi->caps_vce_pg) - return; - - if (gate) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - else - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (gate) { + /* stop the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + kv_enable_vce_dpm(adev, false); + if (pi->caps_vce_pg) /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + kv_enable_vce_dpm(adev, true); + /* re-init the VCE block */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } + static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); @@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } @@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_UVD: kv_dpm_powergate_uvd(handle, gate); break; + case AMD_IP_BLOCK_TYPE_VCE: + kv_dpm_powergate_vce(handle, gate); + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index db327b412562..1de96995e690 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); - ni_update_current_ps(adev, boot_ps); return 0; } @@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - + amdgpu_pm_compute_clocks(adev); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index fbe878ae1e8c..4ba0003a9d32 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, { struct dc_context *ctx = pp->ctx; struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct pp_display_clock_request clock = {0}; - if (!pp_funcs || !pp_funcs->display_configuration_changed) + if (!pp_funcs || !pp_funcs->display_clock_voltage_request) return; - amdgpu_dpm_display_configuration_changed(adev); + clock.clock_type = amd_pp_dcf_clock; + clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); + + clock.clock_type = amd_pp_f_clock; + clock.clock_freq_in_khz = req->hard_min_fclk_khz; + pp_funcs->display_clock_voltage_request(pp_handle, &clock); } void pp_rv_set_wm_ranges(struct pp_smu *pp, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 567867915d32..37eaf72ace54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * fail-safe mode */ if (dc_is_hdmi_signal(link->connector_signal) || - dc_is_dvi_signal(link->connector_signal)) + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); + return false; + } default: break; } diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 459f8f88a34c..9e36ffb5eb7c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -30,6 +30,7 @@ config DRM_I915_DEBUG select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_SW_FENCE_DEBUG_OBJECTS select DRM_I915_SELFTEST + select DRM_I915_DEBUG_RUNTIME_PM default n help Choose this option to turn on extra driver debugging that may affect @@ -167,3 +168,14 @@ config DRM_I915_DEBUG_VBLANK_EVADE the vblank. If in doubt, say "N". + +config DRM_I915_DEBUG_RUNTIME_PM + bool "Enable extra state checking for runtime PM" + depends on DRM_I915 + default n + help + Choose this option to turn on extra state checking for the + runtime PM functionality. This may introduce overhead during + driver loading, suspend and resume operations. + + If in doubt, say "N" diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index c62346fdc05d..19cf1bbe059d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -56,6 +56,10 @@ static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { /** * vgpu_pci_cfg_mem_write - write virtual cfg space memory + * @vgpu: target vgpu + * @off: offset + * @src: src ptr to write + * @bytes: number of bytes * * Use this function to write virtual cfg space memory. * For standard cfg space, only RW bits can be changed, @@ -91,6 +95,10 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read + * @vgpu: target vgpu + * @offset: offset + * @p_data: return data ptr + * @bytes: number of bytes to read * * Returns: * Zero on success, negative error code if failed. @@ -278,6 +286,10 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write + * @vgpu: target vgpu + * @offset: offset + * @p_data: write data ptr + * @bytes: number of bytes to write * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a614db310ea2..77edbfcb0f75 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1840,6 +1840,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) return ret; } +static int mi_noop_index; + static struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, @@ -2525,7 +2527,12 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + /* fastpath for MI_NOOP */ + if (cmd == MI_NOOP) + info = &cmd_info[mi_noop_index]; + else + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", cmd, get_opcode(cmd, s->ring_id), @@ -2928,6 +2935,8 @@ static int init_cmd_table(struct intel_gvt *gvt) kfree(e); return -EEXIST; } + if (cmd_info[i].opcode == OP_MI_NOOP) + mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3019dbc39aef..df1e14145747 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -462,6 +462,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) /** * intel_vgpu_init_display- initialize vGPU virtual display emulation * @vgpu: a vGPU + * @resolution: resolution index for intel_vgpu_edid * * This function is used to initialize vGPU virtual display emulation stuffs * diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 4b98539025c5..5d4bb35bb889 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -340,6 +340,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, /** * intel_gvt_i2c_handle_gmbus_read - emulate gmbus register mmio read * @vgpu: a vGPU + * @offset: reg offset + * @p_data: data return buffer + * @bytes: access data length * * This function is used to emulate gmbus register mmio read * @@ -365,6 +368,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, /** * intel_gvt_i2c_handle_gmbus_write - emulate gmbus register mmio write * @vgpu: a vGPU + * @offset: reg offset + * @p_data: data return buffer + * @bytes: access data length * * This function is used to emulate gmbus register mmio write * @@ -437,6 +443,9 @@ static inline int get_aux_ch_reg(unsigned int offset) /** * intel_gvt_i2c_handle_aux_ch_write - emulate AUX channel register write * @vgpu: a vGPU + * @port_idx: port index + * @offset: reg offset + * @p_data: write ptr * * This function is used to emulate AUX channel register write * diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 00aad8164dec..2402395a068d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1113,6 +1113,10 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, } /** + * Check if can do 2M page + * @vgpu: target vgpu + * @entry: target pfn's gtt entry + * * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, * negtive if found err. */ @@ -1945,7 +1949,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) /** * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object - * @vgpu: a vGPU + * @mm: target vgpu mm * * This function is called when user wants to use a vGPU mm object. If this * mm object hasn't been shadowed yet, the shadow will be populated at this @@ -2521,8 +2525,7 @@ fail: /** * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object * @vgpu: a vGPU - * @page_table_level: PPGTT page table level - * @root_entry: PPGTT page table root pointers + * @pdps: pdp root array * * This function is used to find a PPGTT mm object from mm object pool * diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 46c8b720e336..6ef5a7fc70df 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -189,7 +189,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { /** * intel_gvt_init_host - Load MPT modules and detect if we're running in host - * @gvt: intel gvt device * * This function is called at the driver loading stage. If failed to find a * loadable MPT module or detect currently we're running in a VM, then GVT-g @@ -303,7 +302,7 @@ static int init_service_thread(struct intel_gvt *gvt) /** * intel_gvt_clean_device - clean a GVT device - * @gvt: intel gvt device + * @dev_priv: i915 private * * This function is called at the driver unloading stage, to free the * resources owned by a GVT device. diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca555197..d26258786e3f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1287,12 +1287,13 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, { write_vreg(vgpu, offset, p_data, bytes); - if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL)) + if (vgpu_vreg(vgpu, offset) & + HSW_PWR_WELL_CTL_REQ(HSW_PW_CTL_IDX_GLOBAL)) vgpu_vreg(vgpu, offset) |= - HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); + HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); else vgpu_vreg(vgpu, offset) &= - ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL); + ~HSW_PWR_WELL_CTL_STATE(HSW_PW_CTL_IDX_GLOBAL); return 0; } @@ -2118,7 +2119,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read, gmbus_mmio_write); - MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); + MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL); MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, @@ -2443,17 +2444,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL); MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL); MMIO_D(GEN6_PMINTRMSK, D_ALL); - /* - * Use an arbitrary power well controlled by the PWR_WELL_CTL - * register. - */ - MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write); - MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL, - power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write); + MMIO_DH(HSW_PWR_WELL_CTL4, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write); MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write); @@ -2804,13 +2798,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - /* - * Use an arbitrary power well controlled by the PWR_WELL_CTL - * register. - */ - MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS); - MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, - skl_power_well_ctl_write); + MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); MMIO_D(_MMIO(0xa210), D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); @@ -3434,6 +3423,7 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, * @offset: register offset * @pdata: data buffer * @bytes: data length + * @is_read: read or write * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a45f46d8537f..71751be329e3 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1712,7 +1712,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return pfn; } -int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, +static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { struct kvmgt_guest_info *info; @@ -1761,7 +1761,7 @@ static void __gvt_dma_release(struct kref *ref) __gvt_cache_remove_entry(entry->vgpu, entry); } -void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) +static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct kvmgt_guest_info *info; struct gvt_dma *entry; diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 994366035364..4db817c21ed8 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -39,6 +39,7 @@ /** * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset * @vgpu: a vGPU + * @gpa: guest physical address * * Returns: * Zero on success, negative error code if failed @@ -228,7 +229,7 @@ out: /** * intel_vgpu_reset_mmio - reset virtual MMIO space * @vgpu: a vGPU - * + * @dmlr: whether this is device model level reset */ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) { diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2c..7e702c6a32af 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -37,19 +37,6 @@ #include "gvt.h" #include "trace.h" -/** - * Defined in Intel Open Source PRM. - * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms - */ -#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4) -#define TRNULLDETCT _MMIO(0x4de8) -#define TRINVTILEDETCT _MMIO(0x4dec) -#define TRVADR _MMIO(0x4df0) -#define TRTTE _MMIO(0x4df4) -#define RING_EXCC(base) _MMIO((base) + 0x28) -#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) -#define VF_GUARDBAND _MMIO(0x83a4) - #define GEN9_MOCS_SIZE 64 /* Raw offset is appened to each line for convenience. */ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 5c3b9ff9f96a..f7eaa442403f 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -53,5 +53,8 @@ bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); +#define IS_RESTORE_INHIBIT(a) \ + (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \ + ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT))) #endif diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index fa75a2eead90..82586c8e434f 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -216,7 +216,6 @@ static void virt_vbt_generation(struct vbt *v) /** * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion * @vgpu: a vGPU - * @gpa: guest physical address of opregion * * Returns: * Zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 256d0db8bbb1..84856022528e 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -41,6 +41,8 @@ struct intel_vgpu_page_track *intel_vgpu_find_page_track( * intel_vgpu_register_page_track - register a guest page to be tacked * @vgpu: a vGPU * @gfn: the gfn of guest page + * @handler: page track handler + * @priv: tracker private * * Returns: * zero on success, negative error code if failed. diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index d4f7ce6dc1d7..428d252344f1 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -77,4 +77,22 @@ #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \ I915_GTT_PAGE_SIZE) +#define PCH_GPIO_BASE _MMIO(0xc5010) + +#define PCH_GMBUS0 _MMIO(0xc5100) +#define PCH_GMBUS1 _MMIO(0xc5104) +#define PCH_GMBUS2 _MMIO(0xc5108) +#define PCH_GMBUS3 _MMIO(0xc510c) +#define PCH_GMBUS4 _MMIO(0xc5110) +#define PCH_GMBUS5 _MMIO(0xc5120) + +#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i) * 4) +#define TRNULLDETCT _MMIO(0x4de8) +#define TRINVTILEDETCT _MMIO(0x4dec) +#define TRVADR _MMIO(0x4df0) +#define TRTTE _MMIO(0x4df4) +#define RING_EXCC(base) _MMIO((base) + 0x28) +#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) +#define VF_GUARDBAND _MMIO(0x83a4) + #endif diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43aa058e29fc..ea34003d6dd2 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) unsigned long context_gpa, context_page_num; int i; - gvt_dbg_sched("ring id %d workload lrca %x", ring_id, - workload->ctx_desc.lrca); - - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; - - context_page_num = context_page_num >> PAGE_SHIFT; - - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) - context_page_num = 19; - - i = 2; - - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((workload->ctx_desc.lrca + i) << - I915_GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("Invalid guest context descriptor\n"); - return -EFAULT; - } - - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, - I915_GTT_PAGE_SIZE); - kunmap(page); - i++; - } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); @@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) sr_oa_regs(workload, (u32 *)shadow_ring_context, false); kunmap(page); + + if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) + return 0; + + gvt_dbg_sched("ring id %d workload lrca %x", ring_id, + workload->ctx_desc.lrca); + + context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + + context_page_num = context_page_num >> PAGE_SHIFT; + + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + context_page_num = 19; + + i = 2; + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + (u32)((workload->ctx_desc.lrca + i) << + I915_GTT_PAGE_SHIFT)); + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("Invalid guest context descriptor\n"); + return -EFAULT; + } + + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + I915_GTT_PAGE_SIZE); + kunmap(page); + i++; + } return 0; } @@ -1138,6 +1140,7 @@ out_shadow_ctx: /** * intel_vgpu_select_submission_ops - select virtual submission interface * @vgpu: a vGPU + * @engine_mask: either ALL_ENGINES or target engine mask * @interface: expected vGPU virtual submission interface * * This function is called when guest configures submission interface. @@ -1190,7 +1193,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, /** * intel_vgpu_destroy_workload - destroy a vGPU workload - * @vgpu: a vGPU + * @workload: workload to destroy * * This function is called when destroy a vGPU workload. * @@ -1282,6 +1285,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU + * @ring_id: ring index * @desc: a guest context descriptor * * This function is called when creating a vGPU workload. diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9ce35da4123..1f7051e97afb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1953,7 +1953,10 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { - seq_printf(m, "HW context %u ", ctx->hw_id); + seq_puts(m, "HW context "); + if (!list_empty(&ctx->hw_id_link)) + seq_printf(m, "%x [pin %u]", ctx->hw_id, + atomic_read(&ctx->hw_id_pin_count)); if (ctx->pid) { struct task_struct *task; @@ -2708,7 +2711,9 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); - seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); + seq_printf(m, "PSR mode: %s\n", + dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1"); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); @@ -2735,7 +2740,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) psr_source_status(dev_priv, m); mutex_unlock(&dev_priv->psr.lock); - if (READ_ONCE(dev_priv->psr.debug)) { + if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) { seq_printf(m, "Last attempted entry at: %lld\n", dev_priv->psr.last_entry_attempt); seq_printf(m, "Last exit at: %lld\n", @@ -2750,17 +2755,32 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; + struct drm_modeset_acquire_ctx ctx; + int ret; if (!CAN_PSR(dev_priv)) return -ENODEV; - DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val)); + DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); intel_runtime_pm_get(dev_priv); - intel_psr_irq_control(dev_priv, !!val); + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + +retry: + ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val); + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + intel_runtime_pm_put(dev_priv); - return 0; + return ret; } static int @@ -2845,10 +2865,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) enum intel_display_power_domain power_domain; power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->name, + seq_printf(m, "%-25s %d\n", power_well->desc->name, power_well->count); - for_each_power_domain(power_domain, power_well->domains) + for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); @@ -4114,13 +4134,17 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, #define DROP_FREED BIT(4) #define DROP_SHRINK_ALL BIT(5) #define DROP_IDLE BIT(6) +#define DROP_RESET_ACTIVE BIT(7) +#define DROP_RESET_SEQNO BIT(8) #define DROP_ALL (DROP_UNBOUND | \ DROP_BOUND | \ DROP_RETIRE | \ DROP_ACTIVE | \ DROP_FREED | \ DROP_SHRINK_ALL |\ - DROP_IDLE) + DROP_IDLE | \ + DROP_RESET_ACTIVE | \ + DROP_RESET_SEQNO) static int i915_drop_caches_get(void *data, u64 *val) { @@ -4132,53 +4156,69 @@ i915_drop_caches_get(void *data, u64 *val) static int i915_drop_caches_set(void *data, u64 val) { - struct drm_i915_private *dev_priv = data; - struct drm_device *dev = &dev_priv->drm; + struct drm_i915_private *i915 = data; int ret = 0; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); + if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) + i915_gem_set_wedged(i915); + /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ - if (val & (DROP_ACTIVE | DROP_RETIRE)) { - ret = mutex_lock_interruptible(&dev->struct_mutex); + if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) return ret; if (val & DROP_ACTIVE) - ret = i915_gem_wait_for_idle(dev_priv, + ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); + if (val & DROP_RESET_SEQNO) { + intel_runtime_pm_get(i915); + ret = i915_gem_set_global_seqno(&i915->drm, 1); + intel_runtime_pm_put(i915); + } + if (val & DROP_RETIRE) - i915_retire_requests(dev_priv); + i915_retire_requests(i915); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&i915->drm.struct_mutex); + } + + if (val & DROP_RESET_ACTIVE && + i915_terminally_wedged(&i915->gpu_error)) { + i915_handle_error(i915, ALL_ENGINES, 0, NULL); + wait_on_bit(&i915->gpu_error.flags, + I915_RESET_HANDOFF, + TASK_UNINTERRUPTIBLE); } fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) - i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); + i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); if (val & DROP_UNBOUND) - i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); + i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); if (val & DROP_SHRINK_ALL) - i915_gem_shrink_all(dev_priv); + i915_gem_shrink_all(i915); fs_reclaim_release(GFP_KERNEL); if (val & DROP_IDLE) { do { - if (READ_ONCE(dev_priv->gt.active_requests)) - flush_delayed_work(&dev_priv->gt.retire_work); - drain_delayed_work(&dev_priv->gt.idle_work); - } while (READ_ONCE(dev_priv->gt.awake)); + if (READ_ONCE(i915->gt.active_requests)) + flush_delayed_work(&i915->gt.retire_work); + drain_delayed_work(&i915->gt.idle_work); + } while (READ_ONCE(i915->gt.awake)); } if (val & DROP_FREED) - i915_gem_drain_freed_objects(dev_priv); + i915_gem_drain_freed_objects(i915); return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f8cfd16be534..5dd7fc582e6f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -373,7 +373,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: - value = HAS_RESOURCE_STREAMER(dev_priv); + value = 0; break; case I915_PARAM_HAS_POOLED_EU: value = HAS_POOLED_EU(dev_priv); @@ -441,6 +441,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, case I915_PARAM_CS_TIMESTAMP_FREQUENCY: value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz; break; + case I915_PARAM_MMAP_GTT_COHERENT: + value = INTEL_INFO(dev_priv)->has_coherent_ggtt; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -709,7 +712,7 @@ cleanup_irq: intel_teardown_gmbus(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini(dev_priv); + intel_power_domains_fini_hw(dev_priv); vga_switcheroo_unregister_client(pdev); cleanup_vga_client: vga_client_register(pdev, NULL, NULL, NULL); @@ -867,7 +870,6 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) /** * i915_driver_init_early - setup state not requiring device access * @dev_priv: device private - * @ent: the matching pci_device_id * * Initialize everything that is a "SW-only" state, that is state not * requiring accessing the device or exposing the driver via kernel internal @@ -875,25 +877,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) * system memory allocation, setting up device specific attributes and * function hooks not requiring accessing the device. */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv, - const struct pci_device_id *ent) +static int i915_driver_init_early(struct drm_i915_private *dev_priv) { - const struct intel_device_info *match_info = - (struct intel_device_info *)ent->driver_data; - struct intel_device_info *device_info; int ret = 0; if (i915_inject_load_failure()) return -ENODEV; - /* Setup the write-once "constant" device info */ - device_info = mkwrite_device_info(dev_priv); - memcpy(device_info, match_info, sizeof(*device_info)); - device_info->device_id = dev_priv->drm.pdev->device; - - BUILD_BUG_ON(INTEL_MAX_PLATFORMS > - sizeof(device_info->platform_mask) * BITS_PER_BYTE); - BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); @@ -921,7 +911,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_uc_init_early(dev_priv); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); - intel_power_domains_init(dev_priv); + ret = intel_power_domains_init(dev_priv); + if (ret < 0) + goto err_uc; intel_irq_init(dev_priv); intel_hangcheck_init(dev_priv); intel_init_display_hooks(dev_priv); @@ -933,6 +925,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, return 0; +err_uc: + intel_uc_cleanup_early(dev_priv); + i915_gem_cleanup_early(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); err_engines: @@ -947,6 +942,7 @@ err_engines: static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); + intel_power_domains_cleanup(dev_priv); intel_uc_cleanup_early(dev_priv); i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); @@ -1272,6 +1268,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ if (INTEL_INFO(dev_priv)->num_pipes) drm_kms_helper_poll_init(dev); + + intel_power_domains_enable(dev_priv); + intel_runtime_pm_enable(dev_priv); } /** @@ -1280,6 +1279,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { + intel_runtime_pm_disable(dev_priv); + intel_power_domains_disable(dev_priv); + intel_fbdev_unregister(dev_priv); intel_audio_deinit(dev_priv); @@ -1316,6 +1318,52 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) DRM_INFO("DRM_I915_DEBUG enabled\n"); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) + DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n"); +} + +static struct drm_i915_private * +i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct intel_device_info *match_info = + (struct intel_device_info *)ent->driver_data; + struct intel_device_info *device_info; + struct drm_i915_private *i915; + + i915 = kzalloc(sizeof(*i915), GFP_KERNEL); + if (!i915) + return NULL; + + if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { + kfree(i915); + return NULL; + } + + i915->drm.pdev = pdev; + i915->drm.dev_private = i915; + pci_set_drvdata(pdev, &i915->drm); + + /* Setup the write-once "constant" device info */ + device_info = mkwrite_device_info(i915); + memcpy(device_info, match_info, sizeof(*device_info)); + device_info->device_id = pdev->device; + + BUILD_BUG_ON(INTEL_MAX_PLATFORMS > + sizeof(device_info->platform_mask) * BITS_PER_BYTE); + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + + return i915; +} + +static void i915_driver_destroy(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + drm_dev_fini(&i915->drm); + kfree(i915); + + /* And make sure we never chase our dangling pointer from pci_dev */ + pci_set_drvdata(pdev, NULL); } /** @@ -1340,38 +1388,19 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) driver.driver_features &= ~DRIVER_ATOMIC; - ret = -ENOMEM; - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (dev_priv) - ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "allocation failed\n"); - goto out_free; - } - - dev_priv->drm.pdev = pdev; - dev_priv->drm.dev_private = dev_priv; + dev_priv = i915_driver_create(pdev, ent); + if (!dev_priv) + return -ENOMEM; ret = pci_enable_device(pdev); if (ret) goto out_fini; - pci_set_drvdata(pdev, &dev_priv->drm); - /* - * Disable the system suspend direct complete optimization, which can - * leave the device suspended skipping the driver's suspend handlers - * if the device was already runtime suspended. This is needed due to - * the difference in our runtime and system suspend sequence and - * becaue the HDA driver may require us to enable the audio power - * domain during system suspend. - */ - dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); - - ret = i915_driver_init_early(dev_priv, ent); + ret = i915_driver_init_early(dev_priv); if (ret < 0) goto out_pci_disable; - intel_runtime_pm_get(dev_priv); + disable_rpm_wakeref_asserts(dev_priv); ret = i915_driver_init_mmio(dev_priv); if (ret < 0) @@ -1399,11 +1428,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) i915_driver_register(dev_priv); - intel_runtime_pm_enable(dev_priv); - intel_init_ipc(dev_priv); - intel_runtime_pm_put(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); i915_welcome_messages(dev_priv); @@ -1414,16 +1441,13 @@ out_cleanup_hw: out_cleanup_mmio: i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: - intel_runtime_pm_put(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); i915_driver_cleanup_early(dev_priv); out_pci_disable: pci_disable_device(pdev); out_fini: i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); - drm_dev_fini(&dev_priv->drm); -out_free: - kfree(dev_priv); - pci_set_drvdata(pdev, NULL); + i915_driver_destroy(dev_priv); return ret; } @@ -1432,13 +1456,13 @@ void i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; + disable_rpm_wakeref_asserts(dev_priv); + i915_driver_unregister(dev_priv); if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - drm_atomic_helper_shutdown(dev); intel_gvt_cleanup(dev_priv); @@ -1459,12 +1483,14 @@ void i915_driver_unload(struct drm_device *dev) i915_gem_fini(dev_priv); intel_fbc_cleanup_cfb(dev_priv); - intel_power_domains_fini(dev_priv); + intel_power_domains_fini_hw(dev_priv); i915_driver_cleanup_hw(dev_priv); i915_driver_cleanup_mmio(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + enable_rpm_wakeref_asserts(dev_priv); + + WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count)); } static void i915_driver_release(struct drm_device *dev) @@ -1472,9 +1498,7 @@ static void i915_driver_release(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); i915_driver_cleanup_early(dev_priv); - drm_dev_fini(&dev_priv->drm); - - kfree(dev_priv); + i915_driver_destroy(dev_priv); } static int i915_driver_open(struct drm_device *dev, struct drm_file *file) @@ -1573,7 +1597,7 @@ static int i915_drm_suspend(struct drm_device *dev) /* We do a lot of poking in a lot of registers, make sure they work * properly. */ - intel_display_set_init_power(dev_priv, true); + intel_power_domains_disable(dev_priv); drm_kms_helper_poll_disable(dev); @@ -1610,6 +1634,18 @@ static int i915_drm_suspend(struct drm_device *dev) return 0; } +static enum i915_drm_suspend_mode +get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate) +{ + if (hibernate) + return I915_DRM_SUSPEND_HIBERNATE; + + if (suspend_to_idle(dev_priv)) + return I915_DRM_SUSPEND_IDLE; + + return I915_DRM_SUSPEND_MEM; +} + static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -1620,21 +1656,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); - intel_display_set_init_power(dev_priv, false); intel_uncore_suspend(dev_priv); - /* - * In case of firmware assisted context save/restore don't manually - * deinit the power domains. This also means the CSR/DMC firmware will - * stay active, it will power down any HW resources as required and - * also enable deeper system power states that would be blocked if the - * firmware was inactive. - */ - if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) || - dev_priv->csr.dmc_payload == NULL) { - intel_power_domains_suspend(dev_priv); - dev_priv->power_domains_suspended = true; - } + intel_power_domains_suspend(dev_priv, + get_suspend_mode(dev_priv, hibernation)); ret = 0; if (IS_GEN9_LP(dev_priv)) @@ -1646,10 +1671,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) if (ret) { DRM_ERROR("Suspend complete failed: %d\n", ret); - if (dev_priv->power_domains_suspended) { - intel_power_domains_init_hw(dev_priv, true); - dev_priv->power_domains_suspended = false; - } + intel_power_domains_resume(dev_priv); goto out; } @@ -1755,7 +1777,7 @@ static int i915_drm_resume(struct drm_device *dev) /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't - * bother with the tiny race here where we might loose hotplug + * bother with the tiny race here where we might lose hotplug * notifications. * */ intel_hpd_init(dev_priv); @@ -1766,6 +1788,8 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_notify_adapter(dev_priv, PCI_D0); + intel_power_domains_enable(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); return 0; @@ -1800,7 +1824,7 @@ static int i915_drm_resume_early(struct drm_device *dev) ret = pci_set_power_state(pdev, PCI_D0); if (ret) { DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); - goto out; + return ret; } /* @@ -1816,10 +1840,8 @@ static int i915_drm_resume_early(struct drm_device *dev) * depend on the device enable refcount we can't anyway depend on them * disabling/enabling the device. */ - if (pci_enable_device(pdev)) { - ret = -EIO; - goto out; - } + if (pci_enable_device(pdev)) + return -EIO; pci_set_master(pdev); @@ -1842,18 +1864,12 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_sanitize(dev_priv); - if (dev_priv->power_domains_suspended) - intel_power_domains_init_hw(dev_priv, true); - else - intel_display_set_init_power(dev_priv, true); + intel_power_domains_resume(dev_priv); intel_engines_sanitize(dev_priv); enable_rpm_wakeref_asserts(dev_priv); -out: - dev_priv->power_domains_suspended = false; - return ret; } @@ -1915,7 +1931,6 @@ void i915_reset(struct drm_i915_private *i915, dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - disable_irq(i915->drm.irq); ret = i915_gem_reset_prepare(i915); if (ret) { dev_err(i915->drm.dev, "GPU recovery failed\n"); @@ -1977,8 +1992,6 @@ void i915_reset(struct drm_i915_private *i915, finish: i915_gem_reset_finish(i915); - enable_irq(i915->drm.irq); - wakeup: clear_bit(I915_RESET_HANDOFF, &error->flags); wake_up_bit(&error->flags, I915_RESET_HANDOFF); @@ -2073,6 +2086,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) goto out; out: + intel_engine_cancel_stop_cs(engine); i915_gem_reset_finish_engine(engine); return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bf62ccd3f2f8..7ea442033a57 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -87,8 +87,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180719" -#define DRIVER_TIMESTAMP 1532015279 +#define DRIVER_DATE "20180906" +#define DRIVER_TIMESTAMP 1536242083 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -612,8 +612,18 @@ struct i915_drrs { struct i915_psr { struct mutex lock; + +#define I915_PSR_DEBUG_MODE_MASK 0x0f +#define I915_PSR_DEBUG_DEFAULT 0x00 +#define I915_PSR_DEBUG_DISABLE 0x01 +#define I915_PSR_DEBUG_ENABLE 0x02 +#define I915_PSR_DEBUG_FORCE_PSR1 0x03 +#define I915_PSR_DEBUG_IRQ 0x10 + + u32 debug; bool sink_support; - struct intel_dp *enabled; + bool prepared, enabled; + struct intel_dp *dp; bool active; struct work_struct work; unsigned busy_frontbuffer_bits; @@ -623,7 +633,6 @@ struct i915_psr { bool alpm; bool psr2_enabled; u8 sink_sync_latency; - bool debug; ktime_t last_entry_attempt; ktime_t last_exit; }; @@ -868,14 +877,17 @@ struct i915_power_well_ops { struct i915_power_well *power_well); }; +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + /* Power well structure for haswell */ -struct i915_power_well { +struct i915_power_well_desc { const char *name; bool always_on; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; u64 domains; /* unique identifier for this power well */ enum i915_power_well_id id; @@ -885,9 +897,22 @@ struct i915_power_well { */ union { struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; + struct { enum dpio_phy phy; } bxt; struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; /* Mask of pipes whose IRQ logic is backed by the pw */ u8 irq_pipe_mask; /* The pw is backing the VGA functionality */ @@ -898,13 +923,21 @@ struct i915_power_well { const struct i915_power_well_ops *ops; }; +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; + struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend * time are on. They are kept on until after the first modeset. */ - bool init_power_on; bool initializing; + bool display_core_suspended; int power_well_count; struct mutex lock; @@ -1611,7 +1644,8 @@ struct drm_i915_private { struct mutex gmbus_mutex; /** - * Base address of the gmbus and gpio block. + * Base address of where the gmbus and gpio blocks are located (either + * on PCH or on SoC for platforms without PCH). */ uint32_t gpio_mmio_base; @@ -1633,7 +1667,6 @@ struct drm_i915_private { struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; - struct drm_dma_handle *status_page_dmah; struct resource mch_res; /* protects the irq masks */ @@ -1829,6 +1862,7 @@ struct drm_i915_private { struct mutex av_mutex; struct { + struct mutex mutex; struct list_head list; struct llist_head free_list; struct work_struct free_work; @@ -1841,6 +1875,7 @@ struct drm_i915_private { #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ #define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ + struct list_head hw_id_list; } contexts; u32 fdi_rx_config; @@ -2611,8 +2646,6 @@ intel_info(const struct drm_i915_private *dev_priv) #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission() #define USES_HUC(dev_priv) intel_uc_is_using_huc() -#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) - #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) #define INTEL_PCH_DEVICE_ID_MASK 0xff80 @@ -2776,6 +2809,8 @@ extern void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); +void i915_clear_error_registers(struct drm_i915_private *dev_priv); + static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503..89834ce19acd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -802,6 +802,11 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) * that was!). */ + wmb(); + + if (INTEL_INFO(dev_priv)->has_coherent_ggtt) + return; + i915_gem_chipset_flush(dev_priv); intel_runtime_pm_get(dev_priv); @@ -1906,7 +1911,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } -static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) +static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) { return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; } @@ -1965,7 +1970,7 @@ int i915_gem_mmap_gtt_version(void) } static inline struct i915_ggtt_view -compute_partial_view(struct drm_i915_gem_object *obj, +compute_partial_view(const struct drm_i915_gem_object *obj, pgoff_t page_offset, unsigned int chunk) { @@ -2013,7 +2018,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + bool write = area->vm_flags & VM_WRITE; struct i915_vma *vma; pgoff_t page_offset; int ret; @@ -2528,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) gfp_t noreclaim; int ret; - /* Assert that the object is not currently in any GPU domain. As it + /* + * Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + /* + * If there's no chance of allocating enough pages for the whole + * object, bail early. + */ + if (page_count > totalram_pages) + return -ENOMEM; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return -ENOMEM; @@ -2545,7 +2558,8 @@ rebuild_st: return -ENOMEM; } - /* Get the list of pages out of our struct file. They'll be pinned + /* + * Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. * * Fail silently without starting the shrinker @@ -2577,7 +2591,8 @@ rebuild_st: i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); cond_resched(); - /* We've tried hard to allocate the memory by reaping + /* + * We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. * @@ -2589,7 +2604,8 @@ rebuild_st: /* reclaim and warn, but no oom */ gfp = mapping_gfp_mask(mapping); - /* Our bo are always dirty and so we require + /* + * Our bo are always dirty and so we require * kswapd to reclaim our pages (direct reclaim * does not effectively begin pageout of our * buffers on its own). However, direct reclaim @@ -2633,7 +2649,8 @@ rebuild_st: ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { - /* DMA remapping failed? One possible cause is that + /* + * DMA remapping failed? One possible cause is that * it could not reserve enough large entries, asking * for PAGE_SIZE chunks instead may be helpful. */ @@ -2667,7 +2684,8 @@ err_pages: sg_free_table(st); kfree(st); - /* shmemfs first checks if there is enough memory to allocate the page + /* + * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual * ENOMEM for a genuine allocation failure. * @@ -3307,8 +3325,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_engine_dump(engine, &p, "%s\n", engine->name); } - set_bit(I915_WEDGED, &i915->gpu_error.flags); - smp_mb__after_atomic(); + if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags)) + goto out; /* * First, stop submission to hw, but do not yet complete requests by @@ -3324,7 +3342,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915->caps.scheduler = 0; /* Even if the GPU reset fails, it should still stop the engines */ - intel_gpu_reset(i915, ALL_ENGINES); + if (INTEL_GEN(i915) >= 5) + intel_gpu_reset(i915, ALL_ENGINES); /* * Make sure no one is running the old callback before we proceed with @@ -3367,6 +3386,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) i915_gem_reset_finish_engine(engine); } +out: GEM_TRACE("end\n"); wake_up_all(&i915->gpu_error.reset_queue); @@ -3816,6 +3836,12 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, if (timeout < 0) return timeout; } + if (GEM_SHOW_DEBUG() && !timeout) { + /* Presume that timeout was non-zero to begin with! */ + dev_warn(&i915->drm.pdev->dev, + "Missed idle-completion interrupt!\n"); + GEM_TRACE_DUMP(); + } err = wait_for_engines(i915); if (err) @@ -5592,6 +5618,8 @@ err_uc_misc: i915_gem_cleanup_userptr(dev_priv); if (ret == -EIO) { + mutex_lock(&dev_priv->drm.struct_mutex); + /* * Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, @@ -5602,7 +5630,14 @@ err_uc_misc: "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); } - ret = 0; + + /* Minimal basic recovery for KMS */ + ret = i915_ggtt_enable_hw(dev_priv); + i915_gem_restore_gtt_mappings(dev_priv); + i915_gem_restore_fences(dev_priv); + intel_init_clock_gating(dev_priv); + + mutex_unlock(&dev_priv->drm.struct_mutex); } i915_gem_drain_freed_objects(dev_priv); @@ -5612,6 +5647,7 @@ err_uc_misc: void i915_gem_fini(struct drm_i915_private *dev_priv) { i915_gem_suspend_late(dev_priv); + intel_disable_gt_powersave(dev_priv); /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); @@ -5623,6 +5659,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) i915_gem_contexts_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_cleanup_gt_powersave(dev_priv); + intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); @@ -6182,4 +6220,5 @@ err_unlock: #include "selftests/huge_pages.c" #include "selftests/i915_gem_object.c" #include "selftests/i915_gem_coherency.c" +#include "selftests/i915_gem.c" #endif diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index e46592956872..599c4f6eb1ea 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -82,12 +82,6 @@ static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) tasklet_unlock_wait(t); } -static inline void __tasklet_enable_sync_once(struct tasklet_struct *t) -{ - if (atomic_dec_return(&t->count) == 0) - tasklet_kill(t); -} - static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) { return !atomic_read(&t->count); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b10770cfccd2..747b8170a15a 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -115,6 +115,95 @@ static void lut_close(struct i915_gem_context *ctx) rcu_read_unlock(); } +static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) +{ + unsigned int max; + + lockdep_assert_held(&i915->contexts.mutex); + + if (INTEL_GEN(i915) >= 11) + max = GEN11_MAX_CONTEXT_HW_ID; + else if (USES_GUC_SUBMISSION(i915)) + /* + * When using GuC in proxy submission, GuC consumes the + * highest bit in the context id to indicate proxy submission. + */ + max = MAX_GUC_CONTEXT_HW_ID; + else + max = MAX_CONTEXT_HW_ID; + + return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp); +} + +static int steal_hw_id(struct drm_i915_private *i915) +{ + struct i915_gem_context *ctx, *cn; + LIST_HEAD(pinned); + int id = -ENOSPC; + + lockdep_assert_held(&i915->contexts.mutex); + + list_for_each_entry_safe(ctx, cn, + &i915->contexts.hw_id_list, hw_id_link) { + if (atomic_read(&ctx->hw_id_pin_count)) { + list_move_tail(&ctx->hw_id_link, &pinned); + continue; + } + + GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */ + list_del_init(&ctx->hw_id_link); + id = ctx->hw_id; + break; + } + + /* + * Remember how far we got up on the last repossesion scan, so the + * list is kept in a "least recently scanned" order. + */ + list_splice_tail(&pinned, &i915->contexts.hw_id_list); + return id; +} + +static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out) +{ + int ret; + + lockdep_assert_held(&i915->contexts.mutex); + + /* + * We prefer to steal/stall ourselves and our users over that of the + * entire system. That may be a little unfair to our users, and + * even hurt high priority clients. The choice is whether to oomkill + * something else, or steal a context id. + */ + ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (unlikely(ret < 0)) { + ret = steal_hw_id(i915); + if (ret < 0) /* once again for the correct errno code */ + ret = new_hw_id(i915, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static void release_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + + if (list_empty(&ctx->hw_id_link)) + return; + + mutex_lock(&i915->contexts.mutex); + if (!list_empty(&ctx->hw_id_link)) { + ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id); + list_del_init(&ctx->hw_id_link); + } + mutex_unlock(&i915->contexts.mutex); +} + static void i915_gem_context_free(struct i915_gem_context *ctx) { unsigned int n; @@ -122,6 +211,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + release_hw_id(ctx); i915_ppgtt_put(ctx->ppgtt); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { @@ -136,7 +226,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); kfree_rcu(ctx, rcu); } @@ -191,6 +280,12 @@ static void context_close(struct i915_gem_context *ctx) i915_gem_context_set_closed(ctx); /* + * This context will never again be assinged to HW, so we can + * reuse its ID for the next context. + */ + release_hw_id(ctx); + + /* * The LUT uses the VMA as a backpointer to unref the object, * so we need to clear the LUT before we close all the VMA (inside * the ppgtt). @@ -203,43 +298,6 @@ static void context_close(struct i915_gem_context *ctx) i915_gem_context_put(ctx); } -static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) -{ - int ret; - unsigned int max; - - if (INTEL_GEN(dev_priv) >= 11) { - max = GEN11_MAX_CONTEXT_HW_ID; - } else { - /* - * When using GuC in proxy submission, GuC consumes the - * highest bit in the context id to indicate proxy submission. - */ - if (USES_GUC_SUBMISSION(dev_priv)) - max = MAX_GUC_CONTEXT_HW_ID; - else - max = MAX_CONTEXT_HW_ID; - } - - - ret = ida_simple_get(&dev_priv->contexts.hw_ida, - 0, max, GFP_KERNEL); - if (ret < 0) { - /* Contexts are only released when no longer active. - * Flush any pending retires to hopefully release some - * stale contexts and try again. - */ - i915_retire_requests(dev_priv); - ret = ida_simple_get(&dev_priv->contexts.hw_ida, - 0, max, GFP_KERNEL); - if (ret < 0) - return ret; - } - - *out = ret; - return 0; -} - static u32 default_desc_template(const struct drm_i915_private *i915, const struct i915_hw_ppgtt *ppgtt) { @@ -276,12 +334,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, if (ctx == NULL) return ERR_PTR(-ENOMEM); - ret = assign_hw_id(dev_priv, &ctx->hw_id); - if (ret) { - kfree(ctx); - return ERR_PTR(ret); - } - kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; @@ -295,6 +347,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); /* Default context will never have a file_priv */ ret = DEFAULT_CONTEXT_HANDLE; @@ -329,16 +382,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); - /* - * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not - * present or not in use we still need a small bias as ring wraparound - * at offset 0 sometimes hangs. No idea why. - */ - if (USES_GUC(dev_priv)) - ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias; - else - ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; - return ctx; err_pid: @@ -431,15 +474,35 @@ out: return ctx; } +static void +destroy_kernel_context(struct i915_gem_context **ctxp) +{ + struct i915_gem_context *ctx; + + /* Keep the context ref so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(ctxp)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + context_close(ctx); + i915_gem_context_free(ctx); +} + struct i915_gem_context * i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) { struct i915_gem_context *ctx; + int err; ctx = i915_gem_create_context(i915, NULL); if (IS_ERR(ctx)) return ctx; + err = i915_gem_context_pin_hw_id(ctx); + if (err) { + destroy_kernel_context(&ctx); + return ERR_PTR(err); + } + i915_gem_context_clear_bannable(ctx); ctx->sched.priority = prio; ctx->ring_size = PAGE_SIZE; @@ -449,17 +512,19 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) return ctx; } -static void -destroy_kernel_context(struct i915_gem_context **ctxp) +static void init_contexts(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; + mutex_init(&i915->contexts.mutex); + INIT_LIST_HEAD(&i915->contexts.list); - /* Keep the context ref so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(ctxp)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&i915->contexts.hw_ida); + INIT_LIST_HEAD(&i915->contexts.hw_id_list); - context_close(ctx); - i915_gem_context_free(ctx); + INIT_WORK(&i915->contexts.free_work, contexts_free_worker); + init_llist_head(&i915->contexts.free_list); } static bool needs_preempt_context(struct drm_i915_private *i915) @@ -480,14 +545,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) if (ret) return ret; - INIT_LIST_HEAD(&dev_priv->contexts.list); - INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); - init_llist_head(&dev_priv->contexts.free_list); - - /* Using the simple ida interface, the max is limited by sizeof(int) */ - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); - ida_init(&dev_priv->contexts.hw_ida); + init_contexts(dev_priv); /* lowest priority; idle task */ ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); @@ -497,9 +555,13 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) } /* * For easy recognisablity, we want the kernel context to be 0 and then - * all user contexts will have non-zero hw_id. + * all user contexts will have non-zero hw_id. Kernel contexts are + * permanently pinned, so that we never suffer a stall and can + * use them from any allocation context (e.g. for evicting other + * contexts and from inside the shrinker). */ GEM_BUG_ON(ctx->hw_id); + GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); dev_priv->kernel_context = ctx; /* highest priority; preempting task */ @@ -537,6 +599,7 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ + GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list)); ida_destroy(&i915->contexts.hw_ida); } @@ -942,6 +1005,33 @@ out: return ret; } +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + int err = 0; + + mutex_lock(&i915->contexts.mutex); + + GEM_BUG_ON(i915_gem_context_is_closed(ctx)); + + if (list_empty(&ctx->hw_id_link)) { + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count)); + + err = assign_hw_id(i915, &ctx->hw_id); + if (err) + goto out_unlock; + + list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list); + } + + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u); + atomic_inc(&ctx->hw_id_pin_count); + +out_unlock: + mutex_unlock(&i915->contexts.mutex); + return err; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index b116e4942c10..e09673ca731d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -134,8 +134,16 @@ struct i915_gem_context { * functions like fault reporting, PASID, scheduling. The * &drm_i915_private.context_hw_ida is used to assign a unqiue * id for the lifetime of the context. + * + * @hw_id_pin_count: - number of times this context had been pinned + * for use (should be, at most, once per engine). + * + * @hw_id_link: - all contexts with an assigned id are tracked + * for possible repossession. */ unsigned int hw_id; + atomic_t hw_id_pin_count; + struct list_head hw_id_link; /** * @user_handle: userspace identifier @@ -147,9 +155,6 @@ struct i915_gem_context { struct i915_sched_attr sched; - /** ggtt_offset_bias: placement restriction for context objects */ - u32 ggtt_offset_bias; - /** engine: per-engine logical HW state */ struct intel_context { struct i915_gem_context *gem_context; @@ -257,6 +262,21 @@ static inline void i915_gem_context_set_force_single_submission(struct i915_gem_ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); } +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx); +static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + if (atomic_inc_not_zero(&ctx->hw_id_pin_count)) + return 0; + + return __i915_gem_context_pin_hw_id(ctx); +} + +static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u); + atomic_dec(&ctx->hw_id_pin_count); +} + static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) { return c->user_handle == DEFAULT_CONTEXT_HANDLE; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 0a8d2d64f380..22b4cb775576 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -64,7 +64,9 @@ enum { #define BATCH_OFFSET_BIAS (256*1024) #define __I915_EXEC_ILLEGAL_FLAGS \ - (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) + (__I915_EXEC_UNKNOWN_FLAGS | \ + I915_EXEC_CONSTANTS_MASK | \ + I915_EXEC_RESOURCE_STREAMER) /* Catch emission of unexpected errors for CI! */ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) @@ -733,7 +735,12 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->ctx = ctx; - eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm; + if (ctx->ppgtt) { + eb->vm = &ctx->ppgtt->vm; + eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; + } else { + eb->vm = &eb->i915->ggtt.vm; + } eb->context_flags = 0; if (ctx->flags & CONTEXT_NO_ZEROMAP) @@ -1120,6 +1127,13 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, u32 *cmd; int err; + if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { + obj = vma->obj; + if (obj->cache_dirty & ~obj->cache_coherent) + i915_gem_clflush_object(obj, 0); + obj->write_domain = 0; + } + GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); @@ -1484,8 +1498,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - __put_user(offset, - &urelocs[r-stack].presumed_offset); + if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { + remain = -EFAULT; + goto out; + } } } while (r++, --count); urelocs += ARRAY_SIZE(stack); @@ -1570,7 +1586,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) relocs = kvmalloc_array(size, 1, GFP_KERNEL); if (!relocs) { - kvfree(relocs); err = -ENOMEM; goto err; } @@ -1584,6 +1599,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) if (__copy_from_user((char *)relocs + copied, (char __user *)urelocs + copied, len)) { +end_user: kvfree(relocs); err = -EFAULT; goto err; @@ -1607,7 +1623,6 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) unsafe_put_user(-1, &urelocs[copied].presumed_offset, end_user); -end_user: user_access_end(); eb->exec[i].relocs_ptr = (uintptr_t)relocs; @@ -2199,8 +2214,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; - if (USES_FULL_PPGTT(eb.i915)) - eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT; reloc_cache_init(&eb.reloc_cache, eb.i915); eb.buffer_count = args->buffer_count; @@ -2221,20 +2234,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (!eb.engine) return -EINVAL; - if (args->flags & I915_EXEC_RESOURCE_STREAMER) { - if (!HAS_RESOURCE_STREAMER(eb.i915)) { - DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); - return -EINVAL; - } - if (eb.engine->id != RCS) { - DRM_DEBUG("RS is not available on %s\n", - eb.engine->name); - return -EINVAL; - } - - eb.batch_flags |= I915_DISPATCH_RS; - } - if (args->flags & I915_EXEC_FENCE_IN) { in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); if (!in_fence) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f00c7fbef79e..eb0e446d6482 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -173,19 +173,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { - DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); - return 0; - } - - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - if (has_full_48bit_ppgtt) - return 3; + if (has_full_48bit_ppgtt) + return 3; - if (has_full_ppgtt) - return 2; - } + if (has_full_ppgtt) + return 2; return 1; } @@ -1259,9 +1251,6 @@ static void gen8_free_page_tables(struct i915_address_space *vm, { int i; - if (!px_page(pd)) - return; - for (i = 0; i < I915_PDES; i++) { if (pd->page_table[i] != vm->scratch_pt) free_pt(vm, pd->page_table[i]); @@ -2348,7 +2337,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active(); } -static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv) +static void gen6_check_faults(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -2366,15 +2355,11 @@ static void gen6_check_and_clear_faults(struct drm_i915_private *dev_priv) fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", RING_FAULT_SRCID(fault), RING_FAULT_FAULT_TYPE(fault)); - I915_WRITE(RING_FAULT_REG(engine), - fault & ~RING_FAULT_VALID); } } - - POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); } -static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv) +static void gen8_check_faults(struct drm_i915_private *dev_priv) { u32 fault = I915_READ(GEN8_RING_FAULT_REG); @@ -2399,22 +2384,20 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv) GEN8_RING_FAULT_ENGINE_ID(fault), RING_FAULT_SRCID(fault), RING_FAULT_FAULT_TYPE(fault)); - I915_WRITE(GEN8_RING_FAULT_REG, - fault & ~RING_FAULT_VALID); } - - POSTING_READ(GEN8_RING_FAULT_REG); } void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { /* From GEN8 onwards we only have one 'All Engine Fault Register' */ if (INTEL_GEN(dev_priv) >= 8) - gen8_check_and_clear_faults(dev_priv); + gen8_check_faults(dev_priv); else if (INTEL_GEN(dev_priv) >= 6) - gen6_check_and_clear_faults(dev_priv); + gen6_check_faults(dev_priv); else return; + + i915_clear_error_registers(dev_priv); } void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) @@ -2937,6 +2920,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) struct drm_mm_node *entry; int ret; + /* + * GuC requires all resources that we're sharing with it to be placed in + * non-WOPCM memory. If GuC is not present or not in use we still need a + * small bias as ring wraparound at offset 0 sometimes hangs. No idea + * why. + */ + ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, + intel_guc_reserved_gtt_size(&dev_priv->guc)); + ret = intel_vgt_balloon(dev_priv); if (ret) return ret; @@ -3612,6 +3604,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); i915_address_space_init(&ggtt->vm, dev_priv); + ggtt->vm.is_ggtt = true; + /* Only VLV supports read-only GGTT mappings */ ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); @@ -3662,6 +3656,10 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915) void i915_ggtt_disable_guc(struct drm_i915_private *i915) { + /* XXX Temporary pardon for error unload */ + if (i915->ggtt.invalidate == gen6_ggtt_invalidate) + return; + /* We should only be called after i915_ggtt_enable_guc() */ GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 2a116a91420b..7e2af5f4f39b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -167,29 +167,22 @@ struct intel_rotation_info { } plane[2]; } __packed; -static inline void assert_intel_rotation_info_is_packed(void) -{ - BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); -} - struct intel_partial_info { u64 offset; unsigned int size; } __packed; -static inline void assert_intel_partial_info_is_packed(void) -{ - BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); -} - enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info), }; -static inline void assert_i915_ggtt_view_type_is_unique(void) +static inline void assert_i915_gem_gtt_types(void) { + BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int)); + BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); + /* As we encode the size of each branch inside the union into its type, * we have to be careful that each branch has a unique size. */ @@ -229,7 +222,6 @@ struct i915_page_dma { }; #define px_base(px) (&(px)->base) -#define px_page(px) (px_base(px)->page) #define px_dma(px) (px_base(px)->daddr) struct i915_page_table { @@ -332,6 +324,9 @@ struct i915_address_space { struct pagestash free_pages; + /* Global GTT */ + bool is_ggtt:1; + /* Some systems require uncached updates of the page directories */ bool pt_kmap_wc:1; @@ -365,7 +360,7 @@ struct i915_address_space { I915_SELFTEST_DECLARE(bool scrub_64K); }; -#define i915_is_ggtt(V) (!(V)->file) +#define i915_is_ggtt(vm) ((vm)->is_ggtt) static inline bool i915_vm_is_48bit(const struct i915_address_space *vm) @@ -401,6 +396,8 @@ struct i915_ggtt { int mtrr; + u32 pin_bias; + struct drm_mm_node error_capture; }; diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 83e5e01fa9ea..a6dd7c46de0d 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -421,19 +421,19 @@ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) } static inline unsigned int -i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) +i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) { return obj->tiling_and_stride & TILING_MASK; } static inline bool -i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) +i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) { return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; } static inline unsigned int -i915_gem_object_get_stride(struct drm_i915_gem_object *obj) +i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) { return obj->tiling_and_stride & STRIDE_MASK; } @@ -446,13 +446,13 @@ i915_gem_tile_height(unsigned int tiling) } static inline unsigned int -i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj) +i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) { return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); } static inline unsigned int -i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj) +i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) { return (i915_gem_object_get_stride(obj) * i915_gem_object_get_tile_height(obj)); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 90628a47ae17..10f28a2ee2e6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -478,7 +478,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); - gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); + gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); dev_priv->gt_pm.rps.pm_iir = 0; spin_unlock_irq(&dev_priv->irq_lock); } @@ -516,7 +516,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); synchronize_irq(dev_priv->drm.irq); @@ -1534,11 +1534,8 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); - if (likely(gt_iir[2] & (i915->pm_rps_events | - i915->pm_guc_events))) - raw_reg_write(regs, GEN8_GT_IIR(2), - gt_iir[2] & (i915->pm_rps_events | - i915->pm_guc_events)); + if (likely(gt_iir[2])) + raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); } if (master_ctl & GEN8_GT_VECS_IRQ) { @@ -3218,7 +3215,7 @@ static void i915_reset_device(struct drm_i915_private *dev_priv, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } -static void i915_clear_error_registers(struct drm_i915_private *dev_priv) +void i915_clear_error_registers(struct drm_i915_private *dev_priv) { u32 eir; @@ -3241,6 +3238,22 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) I915_WRITE(EMR, I915_READ(EMR) | eir); I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); } + + if (INTEL_GEN(dev_priv) >= 8) { + I915_WRITE(GEN8_RING_FAULT_REG, + I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); + POSTING_READ(GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(dev_priv) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, dev_priv, id) { + I915_WRITE(RING_FAULT_REG(engine), + I915_READ(RING_FAULT_REG(engine)) & + ~RING_FAULT_VALID); + } + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); + } } /** @@ -3296,7 +3309,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(dev_priv)) { + if (intel_has_reset_engine(dev_priv) && + !i915_terminally_wedged(&dev_priv->gpu_error)) { for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, @@ -4781,7 +4795,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* WaGsvRC0ResidencyMethod:vlv */ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; else - dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); rps->pm_intrmsk_mbz = 0; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6a4d1388ad2d..d6f7b9fe1d26 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -74,6 +74,7 @@ .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = false, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -110,6 +111,7 @@ static const struct intel_device_info intel_i865g_info = { .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -117,6 +119,7 @@ static const struct intel_device_info intel_i865g_info = { static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, PLATFORM(INTEL_I915G), + .has_coherent_ggtt = false, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, @@ -178,6 +181,7 @@ static const struct intel_device_info intel_pineview_info = { .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ GEN_DEFAULT_PIPEOFFSETS, \ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS @@ -220,6 +224,7 @@ static const struct intel_device_info intel_gm45_info = { .has_hotplug = 1, \ .ring_mask = RENDER_RING | BSD_RING, \ .has_snoop = true, \ + .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ .has_rc6 = 0, \ GEN_DEFAULT_PIPEOFFSETS, \ @@ -243,6 +248,7 @@ static const struct intel_device_info intel_ironlake_m_info = { .has_hotplug = 1, \ .has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ @@ -287,6 +293,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .has_hotplug = 1, \ .has_fbc = 1, \ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ @@ -347,6 +354,7 @@ static const struct intel_device_info intel_valleyview_info = { .has_aliasing_ppgtt = 1, .has_full_ppgtt = 1, .has_snoop = true, + .has_coherent_ggtt = false, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_DEFAULT_PAGE_SIZES, @@ -360,7 +368,6 @@ static const struct intel_device_info intel_valleyview_info = { .has_ddi = 1, \ .has_fpga_dbg = 1, \ .has_psr = 1, \ - .has_resource_streamer = 1, \ .has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 @@ -433,7 +440,6 @@ static const struct intel_device_info intel_cherryview_info = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_64bit_reloc = 1, .has_runtime_pm = 1, - .has_resource_streamer = 1, .has_rc6 = 1, .has_logical_ring_contexts = 1, .has_gmch_display = 1, @@ -441,6 +447,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_full_ppgtt = 1, .has_reset_engine = 1, .has_snoop = true, + .has_coherent_ggtt = false, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_DEFAULT_PAGE_SIZES, GEN_CHV_PIPEOFFSETS, @@ -506,7 +513,6 @@ static const struct intel_device_info intel_skylake_gt4_info = { .has_runtime_pm = 1, \ .has_pooled_eu = 0, \ .has_csr = 1, \ - .has_resource_streamer = 1, \ .has_rc6 = 1, \ .has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ @@ -517,6 +523,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .has_full_48bit_ppgtt = 1, \ .has_reset_engine = 1, \ .has_snoop = true, \ + .has_coherent_ggtt = false, \ .has_ipc = 1, \ GEN9_DEFAULT_PAGE_SIZES, \ GEN_DEFAULT_PIPEOFFSETS, \ @@ -580,6 +587,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { GEN9_FEATURES, \ GEN(10), \ .ddb_size = 1024, \ + .has_coherent_ggtt = false, \ GLK_COLORS static const struct intel_device_info intel_cannonlake_info = { @@ -592,14 +600,12 @@ static const struct intel_device_info intel_cannonlake_info = { GEN10_FEATURES, \ GEN(11), \ .ddb_size = 2048, \ - .has_csr = 0, \ .has_logical_ring_elsq = 1 static const struct intel_device_info intel_icelake_11_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), .is_alpha_support = 1, - .has_resource_streamer = 0, .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, }; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 6bf10952c724..ccb20230df2c 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -210,6 +210,7 @@ #include "i915_oa_cflgt3.h" #include "i915_oa_cnl.h" #include "i915_oa_icl.h" +#include "intel_lrc_reg.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1338,14 +1339,12 @@ free_oa_buffer(struct drm_i915_private *i915) { mutex_lock(&i915->drm.struct_mutex); - i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj); - i915_vma_unpin(i915->perf.oa.oa_buffer.vma); - i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj); - - i915->perf.oa.oa_buffer.vma = NULL; - i915->perf.oa.oa_buffer.vaddr = NULL; + i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma, + I915_VMA_RELEASE_MAP); mutex_unlock(&i915->drm.struct_mutex); + + i915->perf.oa.oa_buffer.vaddr = NULL; } static void i915_oa_stream_destroy(struct i915_perf_stream *stream) @@ -1638,27 +1637,25 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ - u32 flex_mmio[] = { - i915_mmio_reg_offset(EU_PERF_CNTL0), - i915_mmio_reg_offset(EU_PERF_CNTL1), - i915_mmio_reg_offset(EU_PERF_CNTL2), - i915_mmio_reg_offset(EU_PERF_CNTL3), - i915_mmio_reg_offset(EU_PERF_CNTL4), - i915_mmio_reg_offset(EU_PERF_CNTL5), - i915_mmio_reg_offset(EU_PERF_CNTL6), + i915_reg_t flex_regs[] = { + EU_PERF_CNTL0, + EU_PERF_CNTL1, + EU_PERF_CNTL2, + EU_PERF_CNTL3, + EU_PERF_CNTL4, + EU_PERF_CNTL5, + EU_PERF_CNTL6, }; int i; - reg_state[ctx_oactxctrl] = i915_mmio_reg_offset(GEN8_OACTXCONTROL); - reg_state[ctx_oactxctrl+1] = (dev_priv->perf.oa.period_exponent << - GEN8_OA_TIMER_PERIOD_SHIFT) | - (dev_priv->perf.oa.periodic ? - GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME; + CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, + (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME); - for (i = 0; i < ARRAY_SIZE(flex_mmio); i++) { + for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { u32 state_offset = ctx_flexeu0 + i * 2; - u32 mmio = flex_mmio[i]; + u32 mmio = i915_mmio_reg_offset(flex_regs[i]); /* * This arbitrary default will select the 'EU FPU0 Pipeline @@ -1678,8 +1675,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, } } - reg_state[state_offset] = mmio; - reg_state[state_offset+1] = value; + CTX_REG(reg_state, state_offset, flex_regs[i], value); } } @@ -1821,7 +1817,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, /* Switch away from any user context. */ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); if (ret) - goto out; + return ret; /* * The OA register config is setup through the context image. This image @@ -1840,7 +1836,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, wait_flags, MAX_SCHEDULE_TIMEOUT); if (ret) - goto out; + return ret; /* Update all contexts now that we've stalled the submission. */ list_for_each_entry(ctx, &dev_priv->contexts.list, link) { @@ -1852,10 +1848,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, continue; regs = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); - if (IS_ERR(regs)) { - ret = PTR_ERR(regs); - goto out; - } + if (IS_ERR(regs)) + return PTR_ERR(regs); ce->state->obj->mm.dirty = true; regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); @@ -1865,7 +1859,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ce->state->obj); } - out: return ret; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 08ec7446282e..09bc8e730ee1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -344,6 +344,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_RPCS_S_CNT_ENABLE (1 << 18) #define GEN8_RPCS_S_CNT_SHIFT 15 #define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT) +#define GEN11_RPCS_S_CNT_SHIFT 12 +#define GEN11_RPCS_S_CNT_MASK (0x3f << GEN11_RPCS_S_CNT_SHIFT) #define GEN8_RPCS_SS_CNT_ENABLE (1 << 11) #define GEN8_RPCS_SS_CNT_SHIFT 8 #define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT) @@ -1029,126 +1031,43 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* * i915_power_well_id: * - * Platform specific IDs used to look up power wells and - except for custom - * power wells - to define request/status register flag bit positions. As such - * the set of IDs on a given platform must be unique and except for custom - * power wells their value must stay fixed. + * IDs used to look up power wells. Power wells accessed directly bypassing + * the power domains framework must be assigned a unique ID. The rest of power + * wells must be assigned DISP_PW_ID_NONE. */ enum i915_power_well_id { - /* - * I830 - * - custom power well - */ - I830_DISP_PW_PIPES = 0, - - /* - * VLV/CHV - * - PUNIT_REG_PWRGT_CTRL (bit: id*2), - * PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8) - */ - PUNIT_POWER_WELL_RENDER = 0, - PUNIT_POWER_WELL_MEDIA = 1, - PUNIT_POWER_WELL_DISP2D = 3, - PUNIT_POWER_WELL_DPIO_CMN_BC = 5, - PUNIT_POWER_WELL_DPIO_TX_B_LANES_01 = 6, - PUNIT_POWER_WELL_DPIO_TX_B_LANES_23 = 7, - PUNIT_POWER_WELL_DPIO_TX_C_LANES_01 = 8, - PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, - PUNIT_POWER_WELL_DPIO_RX0 = 10, - PUNIT_POWER_WELL_DPIO_RX1 = 11, - PUNIT_POWER_WELL_DPIO_CMN_D = 12, - /* - custom power well */ - CHV_DISP_PW_PIPE_A, /* 13 */ - - /* - * HSW/BDW - * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) - */ - HSW_DISP_PW_GLOBAL = 15, - - /* - * GEN9+ - * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) - */ - SKL_DISP_PW_MISC_IO = 0, - SKL_DISP_PW_DDI_A_E, - GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, - CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E, - SKL_DISP_PW_DDI_B, - SKL_DISP_PW_DDI_C, - SKL_DISP_PW_DDI_D, - CNL_DISP_PW_DDI_F = 6, - - GLK_DISP_PW_AUX_A = 8, - GLK_DISP_PW_AUX_B, - GLK_DISP_PW_AUX_C, - CNL_DISP_PW_AUX_A = GLK_DISP_PW_AUX_A, - CNL_DISP_PW_AUX_B = GLK_DISP_PW_AUX_B, - CNL_DISP_PW_AUX_C = GLK_DISP_PW_AUX_C, - CNL_DISP_PW_AUX_D, - CNL_DISP_PW_AUX_F, - - SKL_DISP_PW_1 = 14, + DISP_PW_ID_NONE, + + VLV_DISP_PW_DISP2D, + BXT_DISP_PW_DPIO_CMN_A, + VLV_DISP_PW_DPIO_CMN_BC, + GLK_DISP_PW_DPIO_CMN_C, + CHV_DISP_PW_DPIO_CMN_D, + HSW_DISP_PW_GLOBAL, + SKL_DISP_PW_MISC_IO, + SKL_DISP_PW_1, SKL_DISP_PW_2, - - /* - custom power wells */ - BXT_DPIO_CMN_A, - BXT_DPIO_CMN_BC, - GLK_DPIO_CMN_C, /* 18 */ - - /* - * GEN11+ - * - _HSW_PWR_WELL_CTL1-4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_1 = 0, - ICL_DISP_PW_2, - ICL_DISP_PW_3, - ICL_DISP_PW_4, - - /* - * - _HSW_PWR_WELL_CTL_AUX1/2/4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_AUX_A = 16, - ICL_DISP_PW_AUX_B, - ICL_DISP_PW_AUX_C, - ICL_DISP_PW_AUX_D, - ICL_DISP_PW_AUX_E, - ICL_DISP_PW_AUX_F, - - ICL_DISP_PW_AUX_TBT1 = 24, - ICL_DISP_PW_AUX_TBT2, - ICL_DISP_PW_AUX_TBT3, - ICL_DISP_PW_AUX_TBT4, - - /* - * - _HSW_PWR_WELL_CTL_DDI1/2/4 - * (status bit: (id&15)*2, req bit:(id&15)*2+1) - */ - ICL_DISP_PW_DDI_A = 32, - ICL_DISP_PW_DDI_B, - ICL_DISP_PW_DDI_C, - ICL_DISP_PW_DDI_D, - ICL_DISP_PW_DDI_E, - ICL_DISP_PW_DDI_F, /* 37 */ - - /* - * Multiple platforms. - * Must start following the highest ID of any platform. - * - custom power wells - */ - SKL_DISP_PW_DC_OFF = 38, - I915_DISP_PW_ALWAYS_ON, }; #define PUNIT_REG_PWRGT_CTRL 0x60 #define PUNIT_REG_PWRGT_STATUS 0x61 -#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) -#define PUNIT_PWRGT_PWR_ON(power_well) (0 << ((power_well) * 2)) -#define PUNIT_PWRGT_CLK_GATE(power_well) (1 << ((power_well) * 2)) -#define PUNIT_PWRGT_RESET(power_well) (2 << ((power_well) * 2)) -#define PUNIT_PWRGT_PWR_GATE(power_well) (3 << ((power_well) * 2)) +#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) + +#define PUNIT_PWGT_IDX_RENDER 0 +#define PUNIT_PWGT_IDX_MEDIA 1 +#define PUNIT_PWGT_IDX_DISP2D 3 +#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 +#define PUNIT_PWGT_IDX_DPIO_RX0 10 +#define PUNIT_PWGT_IDX_DPIO_RX1 11 +#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 @@ -1932,121 +1851,200 @@ enum i915_power_well_id { #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) -#define _ICL_MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ +#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C -#define _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define ICL_PORT_MG_TX1_LINK_PARAMS(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ - _ICL_MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ - _ICL_MG_TX_LINK_PARAMS_TX1LN1_PORT1) - -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC -#define _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define ICL_PORT_MG_TX2_LINK_PARAMS(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ - _ICL_MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ - _ICL_MG_TX_LINK_PARAMS_TX2LN1_PORT1) -#define CRI_USE_FS32 (1 << 5) - -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C -#define _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define ICL_PORT_MG_TX1_PISO_READLOAD(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ - _ICL_MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ - _ICL_MG_TX_PISO_READLOAD_TX1LN1_PORT1) - -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC -#define _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define ICL_PORT_MG_TX2_PISO_READLOAD(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ - _ICL_MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ - _ICL_MG_TX_PISO_READLOAD_TX2LN1_PORT1) -#define CRI_CALCINIT (1 << 1) - -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 -#define _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 -#define _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define ICL_PORT_MG_TX1_SWINGCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT1, \ - _ICL_MG_TX_SWINGCTRL_TX1LN0_PORT2, \ - _ICL_MG_TX_SWINGCTRL_TX1LN1_PORT1) - -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 -#define _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define ICL_PORT_MG_TX2_SWINGCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT1, \ - _ICL_MG_TX_SWINGCTRL_TX2LN0_PORT2, \ - _ICL_MG_TX_SWINGCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) -#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) - -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1 0x168144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1 0x168544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2 0x169144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT2 0x169544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT3 0x16A144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT3 0x16A544 -#define _ICL_MG_TX_DRVCTRL_TX1LN0_PORT4 0x16B144 -#define _ICL_MG_TX_DRVCTRL_TX1LN1_PORT4 0x16B544 -#define ICL_PORT_MG_TX1_DRVCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX1LN0_PORT1, \ - _ICL_MG_TX_DRVCTRL_TX1LN0_PORT2, \ - _ICL_MG_TX_DRVCTRL_TX1LN1_PORT1) - -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 -#define _ICL_MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define ICL_PORT_MG_TX2_DRVCTRL(port, ln) \ - _ICL_MG_PHY_PORT_LN(port, ln, _ICL_MG_TX_DRVCTRL_TX2LN0_PORT1, \ - _ICL_MG_TX_DRVCTRL_TX2LN0_PORT2, \ - _ICL_MG_TX_DRVCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) -#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) -#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) -#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) -#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C +#define MG_TX1_LINK_PARAMS(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX1LN1_PORT1) + +#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC +#define MG_TX2_LINK_PARAMS(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define CRI_USE_FS32 (1 << 5) + +#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C +#define MG_TX1_PISO_READLOAD(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX1LN1_PORT1) + +#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC +#define MG_TX2_PISO_READLOAD(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define CRI_CALCINIT (1 << 1) + +#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 +#define MG_TX1_SWINGCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + MG_TX_SWINGCTRL_TX1LN1_PORT1) + +#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 +#define MG_TX2_SWINGCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) +#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) + +#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 +#define MG_TX1_DRVCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ + MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ + MG_TX_DRVCTRL_TX1LN1_TXPORT1) + +#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 +#define MG_TX2_DRVCTRL(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \ + MG_TX_DRVCTRL_TX2LN0_PORT2, \ + MG_TX_DRVCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) +#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) +#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) +#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) +#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define CRI_LOADGEN_SEL(x) ((x) << 12) +#define CRI_LOADGEN_SEL_MASK (0x3 << 12) + +#define MG_CLKHUB_LN0_PORT1 0x16839C +#define MG_CLKHUB_LN1_PORT1 0x16879C +#define MG_CLKHUB_LN0_PORT2 0x16939C +#define MG_CLKHUB_LN1_PORT2 0x16979C +#define MG_CLKHUB_LN0_PORT3 0x16A39C +#define MG_CLKHUB_LN1_PORT3 0x16A79C +#define MG_CLKHUB_LN0_PORT4 0x16B39C +#define MG_CLKHUB_LN1_PORT4 0x16B79C +#define MG_CLKHUB(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \ + MG_CLKHUB_LN0_PORT2, \ + MG_CLKHUB_LN1_PORT1) +#define CFG_LOW_RATE_LKREN_EN (1 << 11) + +#define MG_TX_DCC_TX1LN0_PORT1 0x168110 +#define MG_TX_DCC_TX1LN1_PORT1 0x168510 +#define MG_TX_DCC_TX1LN0_PORT2 0x169110 +#define MG_TX_DCC_TX1LN1_PORT2 0x169510 +#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 +#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 +#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 +#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 +#define MG_TX1_DCC(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \ + MG_TX_DCC_TX1LN0_PORT2, \ + MG_TX_DCC_TX1LN1_PORT1) +#define MG_TX_DCC_TX2LN0_PORT1 0x168090 +#define MG_TX_DCC_TX2LN1_PORT1 0x168490 +#define MG_TX_DCC_TX2LN0_PORT2 0x169090 +#define MG_TX_DCC_TX2LN1_PORT2 0x169490 +#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 +#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 +#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 +#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 +#define MG_TX2_DCC(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \ + MG_TX_DCC_TX2LN0_PORT2, \ + MG_TX_DCC_TX2LN1_PORT1) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) + +#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 +#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 +#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 +#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 +#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 +#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 +#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 +#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 +#define MG_DP_MODE(port, ln) \ + MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \ + MG_DP_MODE_LN0_ACU_PORT2, \ + MG_DP_MODE_LN1_ACU_PORT1) +#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) +#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) +#define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5) +#define MG_DP_MODE_CFG_TRPWR_GATING (1 << 4) +#define MG_DP_MODE_CFG_CLNPWR_GATING (1 << 3) +#define MG_DP_MODE_CFG_DIGPWR_GATING (1 << 2) +#define MG_DP_MODE_CFG_GAONPWR_GATING (1 << 1) + +#define MG_MISC_SUS0_PORT1 0x168814 +#define MG_MISC_SUS0_PORT2 0x169814 +#define MG_MISC_SUS0_PORT3 0x16A814 +#define MG_MISC_SUS0_PORT4 0x16B814 +#define MG_MISC_SUS0(tc_port) \ + _MMIO(_PORT(tc_port, MG_MISC_SUS0_PORT1, MG_MISC_SUS0_PORT2)) +#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK (3 << 14) +#define MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(x) ((x) << 14) +#define MG_MISC_SUS0_CFG_TR2PWR_GATING (1 << 12) +#define MG_MISC_SUS0_CFG_CL2PWR_GATING (1 << 11) +#define MG_MISC_SUS0_CFG_GAONPWR_GATING (1 << 10) +#define MG_MISC_SUS0_CFG_TRPWR_GATING (1 << 7) +#define MG_MISC_SUS0_CFG_CL1PWR_GATING (1 << 6) +#define MG_MISC_SUS0_CFG_DGPWR_GATING (1 << 5) /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. @@ -3086,18 +3084,9 @@ enum i915_power_well_id { /* * GPIO regs */ -#define GPIOA _MMIO(0x5010) -#define GPIOB _MMIO(0x5014) -#define GPIOC _MMIO(0x5018) -#define GPIOD _MMIO(0x501c) -#define GPIOE _MMIO(0x5020) -#define GPIOF _MMIO(0x5024) -#define GPIOG _MMIO(0x5028) -#define GPIOH _MMIO(0x502c) -#define GPIOJ _MMIO(0x5034) -#define GPIOK _MMIO(0x5038) -#define GPIOL _MMIO(0x503C) -#define GPIOM _MMIO(0x5040) +#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \ + 4 * (gpio)) + # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -5476,6 +5465,7 @@ enum { #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) +#define DP_AUX_CH_CTL_TBT_IO (1 << 11) #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) @@ -6527,7 +6517,7 @@ enum { #define PLANE_CTL_YUV422_UYVY (1 << 16) #define PLANE_CTL_YUV422_YVYU (2 << 16) #define PLANE_CTL_YUV422_VYUY (3 << 16) -#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15) +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ #define PLANE_CTL_TILED_MASK (0x7 << 10) @@ -7207,6 +7197,7 @@ enum { #define GEN11_TC3_HOTPLUG (1 << 18) #define GEN11_TC2_HOTPLUG (1 << 17) #define GEN11_TC1_HOTPLUG (1 << 16) +#define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16)) #define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \ GEN11_TC3_HOTPLUG | \ GEN11_TC2_HOTPLUG | \ @@ -7215,6 +7206,7 @@ enum { #define GEN11_TBT3_HOTPLUG (1 << 2) #define GEN11_TBT2_HOTPLUG (1 << 1) #define GEN11_TBT1_HOTPLUG (1 << 0) +#define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port)) #define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \ GEN11_TBT3_HOTPLUG | \ GEN11_TBT2_HOTPLUG | \ @@ -7490,6 +7482,8 @@ enum { /* PCH */ +#define PCH_DISPLAY_BASE 0xc0000u + /* south display engine interrupt: IBX */ #define SDE_AUDIO_POWER_D (1 << 27) #define SDE_AUDIO_POWER_C (1 << 26) @@ -7587,6 +7581,8 @@ enum { #define SDE_GMBUS_ICP (1 << 23) #define SDE_DDIB_HOTPLUG_ICP (1 << 17) #define SDE_DDIA_HOTPLUG_ICP (1 << 16) +#define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) +#define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16)) #define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ SDE_DDIA_HOTPLUG_ICP) #define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ @@ -7782,20 +7778,6 @@ enum { #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) -#define PCH_GPIOA _MMIO(0xc5010) -#define PCH_GPIOB _MMIO(0xc5014) -#define PCH_GPIOC _MMIO(0xc5018) -#define PCH_GPIOD _MMIO(0xc501c) -#define PCH_GPIOE _MMIO(0xc5020) -#define PCH_GPIOF _MMIO(0xc5024) - -#define PCH_GMBUS0 _MMIO(0xc5100) -#define PCH_GMBUS1 _MMIO(0xc5104) -#define PCH_GMBUS2 _MMIO(0xc5108) -#define PCH_GMBUS3 _MMIO(0xc510c) -#define PCH_GMBUS4 _MMIO(0xc5110) -#define PCH_GMBUS5 _MMIO(0xc5120) - #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) @@ -8498,8 +8480,10 @@ enum { #define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4) #define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2) #define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1) -#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ - GEN6_PM_RP_DOWN_THRESHOLD | \ +#define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_EI_EXPIRED | \ + GEN6_PM_RP_UP_THRESHOLD | \ + GEN6_PM_RP_DOWN_EI_EXPIRED | \ + GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) #define GEN7_GT_SCRATCH(i) _MMIO(0x4F100 + (i) * 4) @@ -8827,46 +8811,78 @@ enum { #define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) -/* HSW Power Wells */ -#define _HSW_PWR_WELL_CTL1 0x45400 -#define _HSW_PWR_WELL_CTL2 0x45404 -#define _HSW_PWR_WELL_CTL3 0x45408 -#define _HSW_PWR_WELL_CTL4 0x4540C - -#define _ICL_PWR_WELL_CTL_AUX1 0x45440 -#define _ICL_PWR_WELL_CTL_AUX2 0x45444 -#define _ICL_PWR_WELL_CTL_AUX4 0x4544C - -#define _ICL_PWR_WELL_CTL_DDI1 0x45450 -#define _ICL_PWR_WELL_CTL_DDI2 0x45454 -#define _ICL_PWR_WELL_CTL_DDI4 0x4545C - /* - * Each power well control register contains up to 16 (request, status) HW - * flag tuples. The register index and HW flag shift is determined by the - * power well ID (see i915_power_well_id). There are 4 possible sources of - * power well requests each source having its own set of control registers: - * BIOS, DRIVER, KVMR, DEBUG. + * HSW - ICL power wells + * + * Platforms have up to 3 power well control register sets, each set + * controlling up to 16 power wells via a request/status HW flag tuple: + * - main (HSW_PWR_WELL_CTL[1-4]) + * - AUX (ICL_PWR_WELL_CTL_AUX[1-4]) + * - DDI (ICL_PWR_WELL_CTL_DDI[1-4]) + * Each control register set consists of up to 4 registers used by different + * sources that can request a power well to be enabled: + * - BIOS (HSW_PWR_WELL_CTL1/ICL_PWR_WELL_CTL_AUX1/ICL_PWR_WELL_CTL_DDI1) + * - DRIVER (HSW_PWR_WELL_CTL2/ICL_PWR_WELL_CTL_AUX2/ICL_PWR_WELL_CTL_DDI2) + * - KVMR (HSW_PWR_WELL_CTL3) (only in the main register set) + * - DEBUG (HSW_PWR_WELL_CTL4/ICL_PWR_WELL_CTL_AUX4/ICL_PWR_WELL_CTL_DDI4) */ -#define _HSW_PW_REG_IDX(pw) ((pw) >> 4) -#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) -#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL1, \ - _ICL_PWR_WELL_CTL_AUX1, \ - _ICL_PWR_WELL_CTL_DDI1)) -#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL2, \ - _ICL_PWR_WELL_CTL_AUX2, \ - _ICL_PWR_WELL_CTL_DDI2)) -/* KVMR doesn't have a reg for AUX or DDI power well control */ -#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) -#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL4, \ - _ICL_PWR_WELL_CTL_AUX4, \ - _ICL_PWR_WELL_CTL_DDI4)) - -#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) -#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) +#define HSW_PWR_WELL_CTL1 _MMIO(0x45400) +#define HSW_PWR_WELL_CTL2 _MMIO(0x45404) +#define HSW_PWR_WELL_CTL3 _MMIO(0x45408) +#define HSW_PWR_WELL_CTL4 _MMIO(0x4540C) +#define HSW_PWR_WELL_CTL_REQ(pw_idx) (0x2 << ((pw_idx) * 2)) +#define HSW_PWR_WELL_CTL_STATE(pw_idx) (0x1 << ((pw_idx) * 2)) + +/* HSW/BDW power well */ +#define HSW_PW_CTL_IDX_GLOBAL 15 + +/* SKL/BXT/GLK/CNL power wells */ +#define SKL_PW_CTL_IDX_PW_2 15 +#define SKL_PW_CTL_IDX_PW_1 14 +#define CNL_PW_CTL_IDX_AUX_F 12 +#define CNL_PW_CTL_IDX_AUX_D 11 +#define GLK_PW_CTL_IDX_AUX_C 10 +#define GLK_PW_CTL_IDX_AUX_B 9 +#define GLK_PW_CTL_IDX_AUX_A 8 +#define CNL_PW_CTL_IDX_DDI_F 6 +#define SKL_PW_CTL_IDX_DDI_D 4 +#define SKL_PW_CTL_IDX_DDI_C 3 +#define SKL_PW_CTL_IDX_DDI_B 2 +#define SKL_PW_CTL_IDX_DDI_A_E 1 +#define GLK_PW_CTL_IDX_DDI_A 1 +#define SKL_PW_CTL_IDX_MISC_IO 0 + +/* ICL - power wells */ +#define ICL_PW_CTL_IDX_PW_4 3 +#define ICL_PW_CTL_IDX_PW_3 2 +#define ICL_PW_CTL_IDX_PW_2 1 +#define ICL_PW_CTL_IDX_PW_1 0 + +#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440) +#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444) +#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C) +#define ICL_PW_CTL_IDX_AUX_TBT4 11 +#define ICL_PW_CTL_IDX_AUX_TBT3 10 +#define ICL_PW_CTL_IDX_AUX_TBT2 9 +#define ICL_PW_CTL_IDX_AUX_TBT1 8 +#define ICL_PW_CTL_IDX_AUX_F 5 +#define ICL_PW_CTL_IDX_AUX_E 4 +#define ICL_PW_CTL_IDX_AUX_D 3 +#define ICL_PW_CTL_IDX_AUX_C 2 +#define ICL_PW_CTL_IDX_AUX_B 1 +#define ICL_PW_CTL_IDX_AUX_A 0 + +#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450) +#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454) +#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C) +#define ICL_PW_CTL_IDX_DDI_F 5 +#define ICL_PW_CTL_IDX_DDI_E 4 +#define ICL_PW_CTL_IDX_DDI_D 3 +#define ICL_PW_CTL_IDX_DDI_C 2 +#define ICL_PW_CTL_IDX_DDI_B 1 +#define ICL_PW_CTL_IDX_DDI_A 0 + +/* HSW - power well misc debug registers */ #define HSW_PWR_WELL_CTL5 _MMIO(0x45410) #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20) @@ -8878,22 +8894,32 @@ enum skl_power_gate { SKL_PG0, SKL_PG1, SKL_PG2, + ICL_PG3, + ICL_PG4, }; #define SKL_FUSE_STATUS _MMIO(0x42000) #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) -/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ -#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) -/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */ -#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1) +/* + * PG0 is HW controlled, so doesn't have a corresponding power well control knob + * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2 + */ +#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \ + ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1) +/* + * PG0 is HW controlled, so doesn't have a corresponding power well control knob + * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4 + */ +#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) -#define _CNL_AUX_REG_IDX(pw) ((pw) - 9) +#define _CNL_AUX_REG_IDX(pw_idx) ((pw_idx) - GLK_PW_CTL_IDX_AUX_B) #define _CNL_AUX_ANAOVRD1_B 0x162250 #define _CNL_AUX_ANAOVRD1_C 0x162210 #define _CNL_AUX_ANAOVRD1_D 0x1622D0 #define _CNL_AUX_ANAOVRD1_F 0x162A90 -#define CNL_AUX_ANAOVRD1(pw) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw), \ +#define CNL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \ _CNL_AUX_ANAOVRD1_B, \ _CNL_AUX_ANAOVRD1_C, \ _CNL_AUX_ANAOVRD1_D, \ @@ -9367,9 +9393,13 @@ enum skl_power_gate { #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_HSCLKCTL_PORT1, \ @@ -9380,7 +9410,10 @@ enum skl_power_gate { #define _MG_PLL_DIV0_PORT3 0x16AA00 #define _MG_PLL_DIV0_PORT4 0x16BA00 #define MG_PLL_DIV0_FRACNEN_H (1 << 30) +#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) +#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 #define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) +#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) #define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) #define MG_PLL_DIV0(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV0_PORT1, \ _MG_PLL_DIV0_PORT2) @@ -9395,6 +9428,7 @@ enum skl_power_gate { #define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) #define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) #define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) +#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) #define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) #define MG_PLL_DIV1(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_DIV1_PORT1, \ _MG_PLL_DIV1_PORT2) @@ -10347,8 +10381,8 @@ enum skl_power_gate { #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) /* Icelake Display Stream Compression Registers */ -#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200 -#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00 +#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) +#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 @@ -10368,8 +10402,8 @@ enum skl_power_gate { #define DSC_VER_MIN_SHIFT 4 #define DSC_VER_MAJ (0x1 << 0) -#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204 -#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04 +#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) +#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 @@ -10382,8 +10416,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) #define DSC_BPP(bpp) ((bpp) << 0) -#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208 -#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08 +#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) +#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 @@ -10397,8 +10431,8 @@ enum skl_power_gate { #define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) #define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) -#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C -#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C +#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) +#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C @@ -10412,8 +10446,8 @@ enum skl_power_gate { #define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) #define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) -#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210 -#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10 +#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) +#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 @@ -10422,13 +10456,13 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) -#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214 -#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14 +#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) +#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 @@ -10437,13 +10471,13 @@ enum skl_power_gate { _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) -#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) -#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218 -#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18 +#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) +#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 @@ -10454,13 +10488,13 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) -#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24) -#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16) +#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) #define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) #define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) -#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C -#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C +#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) +#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C @@ -10474,8 +10508,8 @@ enum skl_power_gate { #define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) #define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) -#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220 -#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20 +#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) +#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 @@ -10489,8 +10523,8 @@ enum skl_power_gate { #define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) #define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) -#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224 -#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24 +#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) +#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 @@ -10504,8 +10538,8 @@ enum skl_power_gate { #define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) #define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) -#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228 -#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28 +#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) +#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 @@ -10521,8 +10555,8 @@ enum skl_power_gate { #define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) #define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) -#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C -#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C +#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) +#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C @@ -10534,8 +10568,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) -#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260 -#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60 +#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) +#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 @@ -10547,8 +10581,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) -#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264 -#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64 +#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) +#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 @@ -10560,8 +10594,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) -#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268 -#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68 +#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) +#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 @@ -10573,8 +10607,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) -#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C -#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C +#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) +#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC @@ -10586,8 +10620,8 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) -#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270 -#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70 +#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) +#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 @@ -10599,7 +10633,7 @@ enum skl_power_gate { _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) -#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) /* Icelake Rate Control Buffer Threshold Registers */ #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) @@ -10652,4 +10686,17 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) +#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) +#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) +#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) +#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) +#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) +#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) + +#define PORT_TX_DFLEXDPPMS _MMIO(0x163890) +#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) + +#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) +#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5c2c93cbab12..09ed48833b54 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -527,7 +527,7 @@ void __i915_request_submit(struct i915_request *request) seqno = timeline_get_seqno(&engine->timeline); GEM_BUG_ON(!seqno); - GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); + GEM_BUG_ON(intel_engine_signaled(engine, seqno)); /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); @@ -579,8 +579,7 @@ void __i915_request_unsubmit(struct i915_request *request) */ GEM_BUG_ON(!request->global_seqno); GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); - GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), - request->global_seqno)); + GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); engine->timeline.seqno--; /* We may be recursing from the signal callback of another i915 fence */ @@ -1205,7 +1204,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * it is a fair assumption that it will not complete within our * relatively short timeout. */ - if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) + if (!intel_engine_has_started(engine, seqno)) return false; /* @@ -1222,7 +1221,7 @@ static bool __i915_spin_request(const struct i915_request *rq, irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { - if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) + if (intel_engine_has_completed(engine, seqno)) return seqno == i915_request_global_seqno(rq); /* diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index e1c9365dfefb..9898301ab7ef 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -272,7 +272,10 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ #define I915_WAIT_FOR_IDLE_BOOST BIT(3) -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); +static inline bool intel_engine_has_started(struct intel_engine_cs *engine, + u32 seqno); +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, + u32 seqno); /** * Returns true if seq1 is later than seq2. @@ -282,11 +285,31 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) return (s32)(seq1 - seq2) >= 0; } +/** + * i915_request_started - check if the request has begun being executed + * @rq: the request + * + * Returns true if the request has been submitted to hardware, and the hardware + * has advanced passed the end of the previous request and so should be either + * currently processing the request (though it may be preempted and so + * not necessarily the next request to complete) or have completed the request. + */ +static inline bool i915_request_started(const struct i915_request *rq) +{ + u32 seqno; + + seqno = i915_request_global_seqno(rq); + if (!seqno) /* not yet submitted to HW */ + return false; + + return intel_engine_has_started(rq->engine, seqno); +} + static inline bool __i915_request_completed(const struct i915_request *rq, u32 seqno) { GEM_BUG_ON(!seqno); - return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) && + return intel_engine_has_completed(rq->engine, seqno) && seqno == i915_request_global_seqno(rq); } @@ -301,18 +324,6 @@ static inline bool i915_request_completed(const struct i915_request *rq) return __i915_request_completed(rq, seqno); } -static inline bool i915_request_started(const struct i915_request *rq) -{ - u32 seqno; - - seqno = i915_request_global_seqno(rq); - if (!seqno) - return false; - - return i915_seqno_passed(intel_engine_get_seqno(rq->engine), - seqno - 1); -} - static inline bool i915_sched_node_signaled(const struct i915_sched_node *node) { const struct i915_request *rq = diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 11d834f94220..31efc971a3a8 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->flags |= I915_VMA_GGTT; list_add(&vma->obj_link, &obj->vma_list); } else { - i915_ppgtt_get(i915_vm_to_ppgtt(vm)); list_add_tail(&vma->obj_link, &obj->vma_list); } @@ -406,7 +405,7 @@ void i915_vma_unpin_iomap(struct i915_vma *vma) i915_vma_unpin(vma); } -void i915_vma_unpin_and_release(struct i915_vma **p_vma) +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) { struct i915_vma *vma; struct drm_i915_gem_object *obj; @@ -421,6 +420,9 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma) i915_vma_unpin(vma); i915_vma_close(vma); + if (flags & I915_VMA_RELEASE_MAP) + i915_gem_object_unpin_map(obj); + __i915_gem_object_release_unless_active(obj); } @@ -807,9 +809,6 @@ static void __i915_vma_destroy(struct i915_vma *vma) if (vma->obj) rb_erase(&vma->obj_node, &vma->obj->vma_tree); - if (!i915_vma_is_ggtt(vma)) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { GEM_BUG_ON(i915_gem_active_isset(&iter->base)); kfree(iter); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index f06d66377107..4f7c1c7599f4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -138,7 +138,8 @@ i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view); -void i915_vma_unpin_and_release(struct i915_vma **p_vma); +void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags); +#define I915_VMA_RELEASE_MAP BIT(0) static inline bool i915_vma_is_active(struct i915_vma *vma) { @@ -207,6 +208,11 @@ static inline u32 i915_ggtt_offset(const struct i915_vma *vma) return lower_32_bits(vma->node.start); } +static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma) +{ + return i915_vm_to_ggtt(vma->vm)->pin_bias; +} + static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) { i915_gem_object_get(vma->obj); @@ -245,6 +251,8 @@ i915_vma_compare(struct i915_vma *vma, if (cmp) return cmp; + assert_i915_gem_gtt_types(); + /* ggtt_view.type also encodes its size so that we both distinguish * different views using it as a "type" and also use a compact (no * accessing of uninitialised padding bytes) memcmp without storing diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dcba645cabb8..fa7df5fe154b 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -159,7 +159,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ } intel_state->base.visible = false; - ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state); + ret = intel_plane->check_plane(crtc_state, intel_state); if (ret) return ret; @@ -170,7 +170,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED || - state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) { + state->fb->modifier == I915_FORMAT_MOD_Yf_TILED || + state->fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + state->fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index b725835b47ef..769f3f586661 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 1db6ba7d926e..84bf8d827136 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -256,8 +256,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) spin_unlock(&b->irq_lock); rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) { - GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno)); + GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno)); RB_CLEAR_NODE(&wait->node); wake_up_process(wait->tsk); } @@ -508,8 +507,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, return armed; /* Make the caller recheck if its request has already started. */ - return i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno - 1); + return intel_engine_has_started(engine, wait->seqno); } static inline bool chain_wakeup(struct rb_node *rb, int priority) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index cf9b600cca79..14cf4c367e36 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -55,7 +55,9 @@ MODULE_FIRMWARE(I915_CSR_BXT); #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) -#define CSR_MAX_FW_SIZE 0x2FFF +#define BXT_CSR_MAX_FW_SIZE 0x3000 +#define GLK_CSR_MAX_FW_SIZE 0x4000 +#define ICL_CSR_MAX_FW_SIZE 0x6000 #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF struct intel_css_header { @@ -279,6 +281,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, struct intel_csr *csr = &dev_priv->csr; const struct stepping_info *si = intel_get_stepping_info(dev_priv); uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; + uint32_t max_fw_size = 0; uint32_t i; uint32_t *dmc_payload; uint32_t required_version; @@ -359,6 +362,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, si->stepping); return NULL; } + /* Convert dmc_offset into number of bytes. By default it is in dwords*/ + dmc_offset *= 4; readcount += dmc_offset; /* Extract dmc_header information. */ @@ -391,8 +396,16 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; - if (nbytes > CSR_MAX_FW_SIZE) { - DRM_ERROR("DMC firmware too big (%u bytes)\n", nbytes); + if (INTEL_GEN(dev_priv) >= 11) + max_fw_size = ICL_CSR_MAX_FW_SIZE; + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + max_fw_size = GLK_CSR_MAX_FW_SIZE; + else if (IS_GEN9(dev_priv)) + max_fw_size = BXT_CSR_MAX_FW_SIZE; + else + MISSING_CASE(INTEL_REVID(dev_priv)); + if (nbytes > max_fw_size) { + DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); return NULL; } csr->dmc_fw_size = dmc_header->fw_size; @@ -468,12 +481,6 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; - else { - DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); - return; - } - - DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); /* * Obtain a runtime pm reference, until CSR is loaded, @@ -481,6 +488,14 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) */ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + if (csr->fw_path == NULL) { + DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); + WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); + + return; + } + + DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); schedule_work(&dev_priv->csr.work); } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8761513f3532..cd01a09c5e0f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1414,7 +1414,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, break; } - ref_clock = dev_priv->cdclk.hw.ref; + ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; @@ -1427,6 +1427,81 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, return dco_freq / (p0 * p1 * p2 * 5); } +static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, + enum port port) +{ + u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + + switch (val) { + case DDI_CLK_SEL_NONE: + return 0; + case DDI_CLK_SEL_TBT_162: + return 162000; + case DDI_CLK_SEL_TBT_270: + return 270000; + case DDI_CLK_SEL_TBT_540: + return 540000; + case DDI_CLK_SEL_TBT_810: + return 810000; + default: + MISSING_CASE(val); + return 0; + } +} + +static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, + enum port port) +{ + u32 mg_pll_div0, mg_clktop_hsclkctl; + u32 m1, m2_int, m2_frac, div1, div2, refclk; + u64 tmp; + + refclk = dev_priv->cdclk.hw.ref; + + mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); + mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + + m1 = I915_READ(MG_PLL_DIV1(port)) & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? + (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> + MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + + switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: + div1 = 2; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: + div1 = 3; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: + div1 = 5; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: + div1 = 7; + break; + default: + MISSING_CASE(mg_clktop_hsclkctl); + return 0; + } + + div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + /* div2 value of 0 is same as 1 means no div */ + if (div2 == 0) + div2 = 1; + + /* + * Adjust the original formula to delay the division by 2^22 in order to + * minimize possible rounding errors. + */ + tmp = (u64)m1 * m2_int * refclk + + (((u64)m1 * m2_frac * refclk) >> 22); + tmp = div_u64(tmp, 5 * div1 * div2); + + return tmp; +} + static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; @@ -1467,8 +1542,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, link_clock = icl_calc_dp_combo_pll_link(dev_priv, pll_id); } else { - /* FIXME - Add for MG PLL */ - WARN(1, "MG PLL clock_get code not implemented yet\n"); + if (pll_id == DPLL_ID_ICL_TBTPLL) + link_clock = icl_calc_tbt_pll_link(dev_priv, port); + else + link_clock = icl_calc_mg_pll_link(dev_priv, port); } pipe_config->port_clock = link_clock; @@ -2468,7 +2545,128 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); } -static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level, +static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + const struct icl_mg_phy_ddi_buf_trans *ddi_translations; + u32 n_entries, val; + int ln; + + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + ddi_translations = icl_mg_phy_ddi_translations; + /* The table does not have values for level 3 and level 9. */ + if (level >= n_entries || level == 3 || level == 9) { + DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", + level, n_entries - 2); + level = n_entries - 2; + } + + /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_LINK_PARAMS(port, ln)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val); + + val = I915_READ(MG_TX2_LINK_PARAMS(port, ln)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val); + } + + /* Program MG_TX_SWINGCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_SWINGCTRL(port, ln)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val); + + val = I915_READ(MG_TX2_SWINGCTRL(port, ln)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val); + } + + /* Program MG_TX_DRVCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DRVCTRL(port, ln)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX1_DRVCTRL(port, ln), val); + + val = I915_READ(MG_TX2_DRVCTRL(port, ln)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX2_DRVCTRL(port, ln), val); + + /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ + } + + /* + * Program MG_CLKHUB<LN, port being used> with value from frequency table + * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the + * values from table for which TX1 and TX2 enabled. + */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_CLKHUB(port, ln)); + if (link_clock < 300000) + val |= CFG_LOW_RATE_LKREN_EN; + else + val &= ~CFG_LOW_RATE_LKREN_EN; + I915_WRITE(MG_CLKHUB(port, ln), val); + } + + /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DCC(port, ln)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX1_DCC(port, ln), val); + + val = I915_READ(MG_TX2_DCC(port, ln)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX2_DCC(port, ln), val); + } + + /* Program MG_TX_PISO_READLOAD with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_PISO_READLOAD(port, ln)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val); + + val = I915_READ(MG_TX2_PISO_READLOAD(port, ln)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val); + } +} + +static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level, enum intel_output_type type) { enum port port = encoder->port; @@ -2476,8 +2674,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level, if (port == PORT_A || port == PORT_B) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else - /* Not Implemented Yet */ - WARN_ON(1); + icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } static uint32_t translate_signal_level(int signal_levels) @@ -2512,7 +2709,8 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) int level = intel_ddi_dp_level(intel_dp); if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, encoder->type); + icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, encoder->type); else @@ -2692,8 +2890,12 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + icl_program_mg_dp_mode(intel_dp); + icl_disable_phy_clock_gating(dig_port); + if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, encoder->type); + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, encoder->type); else if (IS_GEN9_LP(dev_priv)) @@ -2708,7 +2910,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) intel_dp_stop_link_train(intel_dp); - intel_ddi_enable_pipe_clock(crtc_state); + icl_enable_phy_clock_gating(dig_port); + + if (!is_mst) + intel_ddi_enable_pipe_clock(crtc_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -2728,7 +2933,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); if (IS_ICELAKE(dev_priv)) - icl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); else if (IS_GEN9_LP(dev_priv)) @@ -2810,14 +3016,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); - intel_ddi_disable_pipe_clock(old_crtc_state); - - /* - * Power down sink before disabling the port, otherwise we end - * up getting interrupts from the sink on detecting link loss. - */ - if (!is_mst) + if (!is_mst) { + intel_ddi_disable_pipe_clock(old_crtc_state); + /* + * Power down sink before disabling the port, otherwise we end + * up getting interrupts from the sink on detecting link loss. + */ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } intel_disable_ddi_buf(encoder); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 633f9fbf72ea..6eecd64734d5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -103,9 +103,9 @@ enum intel_platform { func(has_psr); \ func(has_rc6); \ func(has_rc6p); \ - func(has_resource_streamer); \ func(has_runtime_pm); \ func(has_snoop); \ + func(has_coherent_ggtt); \ func(unfenced_needs_alignment); \ func(cursor_needs_physical); \ func(hws_needs_physical); \ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c24bc848ac6c..b2bab57cd113 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2475,6 +2475,12 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) } } +bool is_ccs_modifier(u64 modifier) +{ + return modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; +} + static int intel_fill_fb_info(struct drm_i915_private *dev_priv, struct drm_framebuffer *fb) @@ -2505,8 +2511,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, return ret; } - if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) { + if (is_ccs_modifier(fb->modifier) && i == 1) { int hsub = fb->format->hsub; int vsub = fb->format->vsub; int tile_width, tile_height; @@ -2989,6 +2994,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int dst_x = plane_state->base.dst.x1; + int dst_w = drm_rect_width(&plane_state->base.dst); int pipe_src_w = crtc_state->pipe_src_w; int max_width = skl_max_plane_width(fb, 0, rotation); int max_height = 4096; @@ -3010,10 +3016,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * screen may cause FIFO underflow and display corruption. */ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && - (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { + (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) { DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", - dst_x + w < 4 ? "end" : "start", - dst_x + w < 4 ? dst_x + w : dst_x, + dst_x + dst_w < 4 ? "end" : "start", + dst_x + dst_w < 4 ? dst_x + dst_w : dst_x, 4, pipe_src_w - 4); return -ERANGE; } @@ -3055,8 +3061,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state, * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. */ - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { + if (is_ccs_modifier(fb->modifier)) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { if (offset == 0) break; @@ -3190,8 +3195,7 @@ int skl_check_plane_surface(const struct intel_crtc_state *crtc_state, ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; - } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { + } else if (is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -3552,11 +3556,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) case I915_FORMAT_MOD_Y_TILED: return PLANE_CTL_TILED_Y; case I915_FORMAT_MOD_Y_TILED_CCS: - return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; case I915_FORMAT_MOD_Yf_TILED: return PLANE_CTL_TILED_YF; case I915_FORMAT_MOD_Yf_TILED_CCS: - return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE; + return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; default: MISSING_CASE(fb_modifier); } @@ -5079,10 +5083,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) mutex_lock(&dev_priv->pcu_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->pcu_lock); - /* wait for pcode to finish disabling IPS, which may take up to 42ms */ + /* + * Wait for PCODE to finish disabling IPS. The BSpec specified + * 42ms timeout value leads to occasional timeouts so use 100ms + * instead. + */ if (intel_wait_for_register(dev_priv, IPS_CTL, IPS_ENABLE, 0, - 42)) + 100)) DRM_ERROR("Timed out waiting for IPS disable\n"); } else { I915_WRITE(IPS_CTL, 0); @@ -8799,13 +8807,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: - if (val & PLANE_CTL_DECOMPRESSION_ENABLE) + if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE) fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; else fb->modifier = I915_FORMAT_MOD_Yf_TILED; @@ -8974,7 +8982,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); - I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)), + I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), "Display power well on\n"); I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); @@ -9691,8 +9699,7 @@ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); } -static int i845_check_cursor(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +static int i845_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->base.fb; @@ -9882,10 +9889,10 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) return true; } -static int i9xx_check_cursor(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; @@ -12739,7 +12746,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * down. */ INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); - schedule_work(&state->commit_work); + queue_work(system_highpri_wq, &state->commit_work); } static void intel_atomic_commit_work(struct work_struct *work) @@ -12969,8 +12976,11 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state) INTEL_INFO(dev_priv)->cursor_needs_physical) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int align = intel_cursor_alignment(dev_priv); + int err; - return i915_gem_object_attach_phys(obj, align); + err = i915_gem_object_attach_phys(obj, align); + if (err) + return err; } vma = intel_pin_and_fence_fb_obj(fb, @@ -13189,10 +13199,10 @@ skl_max_scale(struct intel_crtc *intel_crtc, } static int -intel_check_primary_plane(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +intel_check_primary_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct intel_plane *plane = to_intel_plane(state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_crtc *crtc = state->base.crtc; int min_scale = DRM_PLANE_HELPER_NO_SCALING; @@ -13400,8 +13410,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS) + if (is_ccs_modifier(modifier)) return true; /* fall through */ case DRM_FORMAT_RGB565: @@ -13620,24 +13629,22 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (plane_id == PLANE_PRIMARY) { - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) && - !IS_GEMINILAKE(dev_priv)) - return false; - } else if (plane_id >= PLANE_SPRITE0) { - if (plane_id == PLANE_CURSOR) - return false; - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) { - if (plane_id != PLANE_SPRITE0) - return false; - } else { - if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C || - IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) - return false; - } - } + /* + * FIXME: ICL requires two hardware planes for scanning out NV12 + * framebuffers. Do not advertize support until this is implemented. + */ + if (INTEL_GEN(dev_priv) >= 11) + return false; + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + return false; + + if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) + return false; + + if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) + return false; + return true; } @@ -14131,6 +14138,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); + if (INTEL_INFO(dev_priv)->num_pipes == 0) + return; + /* * intel_edp_init_connector() depends on this completing first, to * prevent the registeration of both eDP and LVDS and the incorrect @@ -14547,7 +14557,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, break; case DRM_FORMAT_NV12: if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || - IS_BROXTON(dev_priv)) { + IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { DRM_DEBUG_KMS("unsupported pixel format: %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name)); @@ -14594,8 +14604,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, * potential runtime errors at plane configuration time. */ if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 && - (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) + is_ccs_modifier(fb->modifier)) stride_alignment *= 4; if (fb->pitches[i] & (stride_alignment - 1)) { @@ -15131,12 +15140,61 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); } +static int intel_initial_commit(struct drm_device *dev) +{ + struct drm_atomic_state *state = NULL; + struct drm_modeset_acquire_ctx ctx; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int ret = 0; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); + +retry: + state->acquire_ctx = &ctx; + + drm_for_each_crtc(crtc, dev) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out; + } + + if (crtc_state->active) { + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + goto out; + } + } + + ret = drm_atomic_commit(state); + +out: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; enum pipe pipe; struct intel_crtc *crtc; + int ret; dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -15160,9 +15218,6 @@ int intel_modeset_init(struct drm_device *dev) intel_init_pm(dev_priv); - if (INTEL_INFO(dev_priv)->num_pipes == 0) - return 0; - /* * There may be no VBT; and if the BIOS enabled SSC we can * just keep using it to avoid unnecessary flicker. Whereas if the @@ -15211,8 +15266,6 @@ int intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); for_each_pipe(dev_priv, pipe) { - int ret; - ret = intel_crtc_init(dev_priv, pipe); if (ret) { drm_mode_config_cleanup(dev); @@ -15268,6 +15321,16 @@ int intel_modeset_init(struct drm_device *dev) if (!HAS_GMCH_DISPLAY(dev_priv)) sanitize_watermarks(dev); + /* + * Force all active planes to recompute their states. So that on + * mode_setcrtc after probe, all the intel_plane_state variables + * are already calculated and there is no assert_plane warnings + * during bootup. + */ + ret = intel_initial_commit(dev); + if (ret) + DRM_DEBUG_KMS("Initial commit in probe failed.\n"); + return 0; } @@ -15792,6 +15855,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; int i; + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_early_display_was(dev_priv); intel_modeset_readout_hw_state(dev); @@ -15846,9 +15911,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } - intel_display_set_init_power(dev_priv, false); - intel_power_domains_verify_state(dev_priv); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_fbc_init_pipe_state(dev_priv); } @@ -15937,8 +16001,6 @@ void intel_modeset_cleanup(struct drm_device *dev) flush_work(&dev_priv->atomic_helper.free_work); WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); - intel_disable_gt_powersave(dev_priv); - /* * Interrupts and polling as the first thing to avoid creating havoc. * Too much stuff here (turning of connectors, ...) would @@ -15966,8 +16028,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev_priv); - intel_teardown_gmbus(dev_priv); destroy_workqueue(dev_priv->modeset_wq); @@ -16075,8 +16135,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) return NULL; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - error->power_well_driver = - I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)); + error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); for_each_pipe(dev_priv, i) { error->pipe[i].power_domain_on = diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 809c06ae4c07..ed474da6c200 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -27,6 +27,22 @@ #include <drm/drm_util.h> +enum i915_gpio { + GPIOA, + GPIOB, + GPIOC, + GPIOD, + GPIOE, + GPIOF, + GPIOG, + GPIOH, + __GPIOI_UNUSED, + GPIOJ, + GPIOK, + GPIOL, + GPIOM, +}; + enum pipe { INVALID_PIPE = -1, @@ -163,6 +179,13 @@ enum tc_port { I915_MAX_TC_PORTS }; +enum tc_port_type { + TC_PORT_UNKNOWN = 0, + TC_PORT_TYPEC, + TC_PORT_TBT, + TC_PORT_LEGACY, +}; + enum dpio_channel { DPIO_CH0, DPIO_CH1 @@ -348,11 +371,11 @@ struct intel_link_m_n { #define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if((__power_well)->desc->domains & (__domain_mask)) #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ for_each_power_well_rev(__dev_priv, __power_well) \ - for_each_if((__power_well)->domains & (__domain_mask)) + for_each_if((__power_well)->desc->domains & (__domain_mask)) #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ for ((__i) = 0; \ @@ -384,4 +407,5 @@ void intel_link_compute_m_n(int bpp, int nlanes, struct intel_link_m_n *m_n, bool reduce_m_n); +bool is_ccs_modifier(u64 modifier); #endif diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cd0f649b57a5..436c22de33b6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -107,13 +107,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return intel_dig_port->base.type == INTEL_OUTPUT_EDP; } -static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - - return intel_dig_port->base.base.dev; -} - static struct intel_dp *intel_attached_dp(struct drm_connector *connector) { return enc_to_intel_dp(&intel_attached_encoder(connector)->base); @@ -176,14 +169,45 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp->common_rates[intel_dp->num_common_rates - 1]; } +static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 lane_info; + + if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) + return 4; + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + default: + MISSING_CASE(lane_info); + case 1: + case 2: + case 4: + case 8: + return 1; + case 3: + case 12: + return 2; + case 15: + return 4; + } +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp); - return min(source_max, sink_max); + return min3(source_max, sink_max, fia_max); } int intel_dp_max_lane_count(struct intel_dp *intel_dp) @@ -198,6 +222,138 @@ intel_dp_link_required(int pixel_clock, int bpp) return DIV_ROUND_UP(pixel_clock * bpp, 8); } +void icl_program_mg_dp_mode(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 ln0, ln1, lane_info; + + if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + return; + + ln0 = I915_READ(MG_DP_MODE(port, 0)); + ln1 = I915_READ(MG_DP_MODE(port, 1)); + + switch (intel_dig_port->tc_type) { + case TC_PORT_TYPEC: + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + case 0x1: + case 0x4: + break; + case 0x2: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0x3: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0x8: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0xC: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0xF: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + default: + MISSING_CASE(lane_info); + } + break; + + case TC_PORT_LEGACY: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + break; + + default: + MISSING_CASE(intel_dig_port->tc_type); + return; + } + + I915_WRITE(MG_DP_MODE(port, 0), ln0); + I915_WRITE(MG_DP_MODE(port, 1), ln1); +} + +void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val |= MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + u32 val; + int i; + + if (tc_port == PORT_TC_NONE) + return; + + for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { + val = I915_READ(mg_regs[i]); + val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING); + I915_WRITE(mg_regs[i], val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING); + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + int intel_dp_max_data_rate(int max_link_clock, int max_lanes) { @@ -498,7 +654,7 @@ intel_dp_pps_init(struct intel_dp *intel_dp); static void pps_lock(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* * See intel_power_sequencer_reset() why we need @@ -511,7 +667,7 @@ static void pps_lock(struct intel_dp *intel_dp) static void pps_unlock(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->pps_mutex); @@ -521,7 +677,7 @@ static void pps_unlock(struct intel_dp *intel_dp) static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps_pipe; bool pll_enabled, release_cl_override = false; @@ -626,7 +782,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; @@ -673,7 +829,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) static int bxt_power_sequencer_idx(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int backlight_controller = dev_priv->vbt.backlight.controller; lockdep_assert_held(&dev_priv->pps_mutex); @@ -742,7 +898,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; @@ -819,7 +975,7 @@ struct pps_registers { static void intel_pps_get_registers(struct intel_dp *intel_dp, struct pps_registers *regs) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int pps_idx = 0; memset(regs, 0, sizeof(*regs)); @@ -865,7 +1021,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, { struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), edp_notifier); - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) return 0; @@ -895,7 +1051,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, static bool edp_have_panel_power(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -908,7 +1064,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -922,7 +1078,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) static void intel_dp_check_edp(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!intel_dp_is_edp(intel_dp)) return; @@ -938,7 +1094,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) static uint32_t intel_dp_aux_wait_done(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); uint32_t status; bool done; @@ -955,7 +1111,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (index) return 0; @@ -969,7 +1125,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (index) return 0; @@ -987,7 +1143,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ @@ -1045,15 +1201,23 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, uint32_t unused) { - return DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_INTERRUPT | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_TIME_OUT_MAX | - DP_AUX_CH_CTL_RECEIVE_ERROR | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | - DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + uint32_t ret; + + ret = DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_TIME_OUT_MAX | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | + DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); + + if (intel_dig_port->tc_type == TC_PORT_TBT) + ret |= DP_AUX_CH_CTL_TBT_IO; + + return ret; } static int @@ -1381,7 +1545,7 @@ intel_aux_power_domain(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1397,7 +1561,7 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1413,7 +1577,7 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1431,7 +1595,7 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1449,7 +1613,7 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1468,7 +1632,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum aux_ch aux_ch = intel_dp->aux_ch; switch (aux_ch) { @@ -1494,7 +1658,7 @@ intel_dp_aux_fini(struct intel_dp *intel_dp) static void intel_dp_aux_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp->aux_ch = intel_aux_ch(intel_dp); @@ -1662,7 +1826,7 @@ struct link_config_limits { static int intel_dp_compute_bpp(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_connector *intel_connector = intel_dp->attached_connector; int bpp, bpc; @@ -2030,7 +2194,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, u32 mask, u32 value) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2106,7 +2270,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp) static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; lockdep_assert_held(&dev_priv->pps_mutex); @@ -2127,7 +2291,7 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) */ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; @@ -2198,7 +2362,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); u32 pp; @@ -2264,7 +2428,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) */ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -2284,7 +2448,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) static void edp_panel_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2342,7 +2506,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp) static void edp_panel_off(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2390,7 +2554,7 @@ void intel_edp_panel_off(struct intel_dp *intel_dp) /* Enable backlight in the panel power control. */ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2433,7 +2597,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, /* Disable backlight in the panel power control. */ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp; i915_reg_t pp_ctrl_reg; @@ -2864,7 +3028,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t *DP, uint8_t dp_train_pat) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); @@ -2946,7 +3110,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, static void intel_dp_enable_port(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); /* enable with pattern 1 (as per spec) */ @@ -3203,7 +3367,7 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum port port = encoder->port; @@ -3222,7 +3386,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum port port = encoder->port; @@ -3534,7 +3698,7 @@ ivb_cpu_edp_signal_levels(uint8_t train_set) void intel_dp_set_signal_levels(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint32_t signal_levels, mask = 0; @@ -3591,7 +3755,7 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; uint32_t val; @@ -4090,12 +4254,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) int ret = 0; int retry; bool handled; + + WARN_ON_ONCE(intel_dp->active_mst_links < 0); bret = intel_dp_get_sink_irq_esi(intel_dp, esi); go_again: if (bret == true) { /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links && + if (intel_dp->active_mst_links > 0 && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); intel_dp_start_link_train(intel_dp); @@ -4160,18 +4326,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp) return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); } -/* - * If display is now connected check links status, - * there has been known issues of link loss triggering - * long pulse. - * - * Some sinks (eg. ASUS PB287Q) seem to perform some - * weird HPD ping pong during modesets. So we can apparently - * end up with HPD going low during a modeset, and then - * going back up soon after. And once that happens we must - * retrain the link to get a picture. That's in case no - * userspace component reacted to intermittent HPD dip. - */ int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx) { @@ -4294,7 +4448,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 sink_irq_vector = 0; u8 old_sink_count = intel_dp->sink_count; bool ret; @@ -4586,10 +4740,205 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder) return I915_READ(GEN8_DE_PORT_ISR) & bit; } +static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + + return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); +} + +static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port, + bool is_legacy, bool is_typec, bool is_tbt) +{ + enum port port = intel_dig_port->base.port; + enum tc_port_type old_type = intel_dig_port->tc_type; + const char *type_str; + + WARN_ON(is_legacy + is_typec + is_tbt != 1); + + if (is_legacy) { + intel_dig_port->tc_type = TC_PORT_LEGACY; + type_str = "legacy"; + } else if (is_typec) { + intel_dig_port->tc_type = TC_PORT_TYPEC; + type_str = "typec"; + } else if (is_tbt) { + intel_dig_port->tc_type = TC_PORT_TBT; + type_str = "tbt"; + } else { + return; + } + + /* Types are not supposed to be changed at runtime. */ + WARN_ON(old_type != TC_PORT_UNKNOWN && + old_type != intel_dig_port->tc_type); + + if (old_type != intel_dig_port->tc_type) + DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), + type_str); +} + +/* + * This function implements the first part of the Connect Flow described by our + * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading + * lanes, EDID, etc) is done as needed in the typical places. + * + * Unlike the other ports, type-C ports are not available to use as soon as we + * get a hotplug. The type-C PHYs can be shared between multiple controllers: + * display, USB, etc. As a result, handshaking through FIA is required around + * connect and disconnect to cleanly transfer ownership with the controller and + * set the type-C power state. + * + * We could opt to only do the connect flow when we actually try to use the AUX + * channels or do a modeset, then immediately run the disconnect flow after + * usage, but there are some implications on this for a dynamic environment: + * things may go away or change behind our backs. So for now our driver is + * always trying to acquire ownership of the controller as soon as it gets an + * interrupt (or polls state and sees a port is connected) and only gives it + * back when it sees a disconnect. Implementation of a more fine-grained model + * will require a lot of coordination with user space and thorough testing for + * the extra possible cases. + */ +static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return true; + + val = I915_READ(PORT_TX_DFLEXDPPMS); + if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { + DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + return false; + } + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + /* + * Now we have to re-check the live state, in case the port recently + * became disconnected. Not necessary for legacy mode. + */ + if (dig_port->tc_type == TC_PORT_TYPEC && + !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { + DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); + val = I915_READ(PORT_TX_DFLEXDPCSSS); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + return false; + } + + return true; +} + +/* + * See the comment at the connect function. This implements the Disconnect + * Flow. + */ +static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return; + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } +} + +/* + * The type-C ports are different because even when they are connected, they may + * not be available/usable by the graphics driver: see the comment on + * icl_tc_phy_connect(). So in our driver instead of adding the additional + * concept of "usable" and make everything check for "connected and usable" we + * define a port as "connected" when it is not only connected, but also when it + * is usable by the rest of the driver. That maintains the old assumption that + * connected ports are usable, and avoids exposing to the users objects they + * can't really use. + */ +static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + bool is_legacy, is_typec, is_tbt; + u32 dpsp; + + is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port); + + /* + * The spec says we shouldn't be using the ISR bits for detecting + * between TC and TBT. We should use DFLEXDPSP. + */ + dpsp = I915_READ(PORT_TX_DFLEXDPSP); + is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); + is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); + + if (!is_legacy && !is_typec && !is_tbt) { + icl_tc_phy_disconnect(dev_priv, intel_dig_port); + return false; + } + + icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, + is_tbt); + + if (!icl_tc_phy_connect(dev_priv, intel_dig_port)) + return false; + + return true; +} + +static bool icl_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + case HPD_PORT_B: + return icl_combo_port_connected(dev_priv, dig_port); + case HPD_PORT_C: + case HPD_PORT_D: + case HPD_PORT_E: + case HPD_PORT_F: + return icl_tc_port_connected(dev_priv, dig_port); + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } +} + /* * intel_digital_port_connected - is the specified port connected? * @encoder: intel_encoder * + * In cases where there's a connector physically connected but it can't be used + * by our hardware we also return false, since the rest of the driver should + * pretty much treat the port as disconnected. This is relevant for type-C + * (starting on ICL) where there's ownership involved. + * * Return %true if port is connected, %false otherwise. */ bool intel_digital_port_connected(struct intel_encoder *encoder) @@ -4613,8 +4962,10 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) return bdw_digital_port_connected(encoder); else if (IS_GEN9_LP(dev_priv)) return bxt_digital_port_connected(encoder); - else + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) return spt_digital_port_connected(encoder); + else + return icl_digital_port_connected(encoder); } static struct edid * @@ -4661,7 +5012,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) } static int -intel_dp_long_pulse(struct intel_connector *connector) +intel_dp_long_pulse(struct intel_connector *connector, + struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(&connector->base); @@ -4720,6 +5072,22 @@ intel_dp_long_pulse(struct intel_connector *connector) */ status = connector_status_disconnected; goto out; + } else { + /* + * If display is now connected check links status, + * there has been known issues of link loss triggering + * long pulse. + * + * Some sinks (eg. ASUS PB287Q) seem to perform some + * weird HPD ping pong during modesets. So we can apparently + * end up with HPD going low during a modeset, and then + * going back up soon after. And once that happens we must + * retrain the link to get a picture. That's in case no + * userspace component reacted to intermittent HPD dip. + */ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + intel_dp_retrain_link(encoder, ctx); } /* @@ -4781,7 +5149,7 @@ intel_dp_detect(struct drm_connector *connector, return ret; } - status = intel_dp_long_pulse(intel_dp->attached_connector); + status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); } intel_dp->detect_done = false; @@ -5172,7 +5540,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->pps_mutex); @@ -5193,7 +5561,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; enum pipe pipe; @@ -5260,7 +5628,7 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum irqreturn ret = IRQ_NONE; if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { @@ -5376,7 +5744,7 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; struct pps_registers regs; @@ -5444,7 +5812,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp) static void intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps_delays; @@ -5537,7 +5905,7 @@ static void intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, pp_div, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; struct pps_registers regs; @@ -5633,7 +6001,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, static void intel_dp_pps_init(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_initial_power_sequencer_setup(intel_dp); @@ -5750,7 +6118,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!crtc_state->has_drrs) { DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); @@ -5785,7 +6153,7 @@ unlock: void intel_edp_drrs_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!old_crtc_state->has_drrs) return; @@ -6017,8 +6385,8 @@ intel_dp_drrs_init(struct intel_connector *connector, static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector = &intel_connector->base; struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 4da6e33c7fa1..a9f40985a621 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -129,7 +129,8 @@ static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { uint8_t voltage; - int voltage_tries, max_vswing_tries; + int voltage_tries, cr_tries, max_cr_tries; + bool max_vswing_reached = false; uint8_t link_config[2]; uint8_t link_bw, rate_select; @@ -170,9 +171,21 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) return false; } + /* + * The DP 1.4 spec defines the max clock recovery retries value + * as 10 but for pre-DP 1.4 devices we set a very tolerant + * retry limit of 80 (4 voltage levels x 4 preemphasis levels x + * x 5 identical voltage retries). Since the previous specs didn't + * define a limit and created the possibility of an infinite loop + * we want to prevent any sync from triggering that corner case. + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + max_cr_tries = 10; + else + max_cr_tries = 80; + voltage_tries = 1; - max_vswing_tries = 0; - for (;;) { + for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { uint8_t link_status[DP_LINK_STATUS_SIZE]; drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); @@ -192,7 +205,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) return false; } - if (max_vswing_tries == 1) { + if (max_vswing_reached) { DRM_DEBUG_KMS("Max Voltage Swing reached\n"); return false; } @@ -213,9 +226,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) voltage_tries = 1; if (intel_dp_link_max_vswing_reached(intel_dp)) - ++max_vswing_tries; + max_vswing_reached = true; } + DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries); + return false; } /* diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7e3e01607643..77920f1a3da1 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + intel_ddi_disable_pipe_clock(old_crtc_state); + /* this can fail */ drm_dp_check_act_status(&intel_dp->mst_mgr); /* and this can also fail */ @@ -241,17 +243,16 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->port, pipe_config->pbn, pipe_config->dp_m_n.tu); - if (ret == false) { + if (!ret) DRM_ERROR("failed to allocate vcpi\n"); - return; - } - intel_dp->active_mst_links++; temp = I915_READ(DP_TP_STATUS(port)); I915_WRITE(DP_TP_STATUS(port), temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + + intel_ddi_enable_pipe_clock(pipe_config); } static void intel_mst_enable_dp(struct intel_encoder *encoder, @@ -263,7 +264,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_dig_port->base.port; - int ret; DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); @@ -274,9 +274,9 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); - ret = drm_dp_check_act_status(&intel_dp->mst_mgr); + drm_dp_check_act_status(&intel_dp->mst_mgr); - ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr); + drm_dp_update_payload_part2(&intel_dp->mst_mgr); if (pipe_config->has_audio) intel_audio_codec_enable(encoder, pipe_config, conn_state); } diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index b51ad2917dbe..e6cac9225536 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -2212,6 +2212,20 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_fraction = dco & 0x7fff; } +int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) +{ + int ref_clock = dev_priv->cdclk.hw.ref; + + /* + * For ICL+, the spec states: if reference frequency is 38.4, + * use 19.2 because the DPLL automatically divides that by 2. + */ + if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400) + ref_clock = 19200; + + return ref_clock; +} + static bool cnl_ddi_calculate_wrpll(int clock, struct drm_i915_private *dev_priv, @@ -2251,14 +2265,7 @@ cnl_ddi_calculate_wrpll(int clock, cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); - ref_clock = dev_priv->cdclk.hw.ref; - - /* - * For ICL, the spec states: if reference frequency is 38.4, use 19.2 - * because the DPLL automatically divides that by 2. - */ - if (IS_ICELAKE(dev_priv) && ref_clock == 38400) - ref_clock = 19200; + ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); @@ -2452,6 +2459,16 @@ static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = { .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, }; +static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { + .dco_integer = 0x151, .dco_fraction = 0x4000, + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + +static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { + .dco_integer = 0x1A5, .dco_fraction = 0x7000, + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, struct skl_wrpll_params *pll_params) { @@ -2494,6 +2511,14 @@ static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, return true; } +static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock, + struct skl_wrpll_params *pll_params) +{ + *pll_params = dev_priv->cdclk.hw.ref == 24000 ? + icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values; + return true; +} + static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder, int clock, struct intel_dpll_hw_state *pll_state) @@ -2503,7 +2528,9 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct skl_wrpll_params pll_params = { 0 }; bool ret; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + if (intel_port_is_tc(dev_priv, encoder->port)) + ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); else ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); @@ -2623,7 +2650,8 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, for (div2 = 10; div2 > 0; div2--) { int dco = div1 * div2 * clock_khz * 5; - int a_divratio, tlinedrv, inputsel, hsdiv; + int a_divratio, tlinedrv, inputsel; + u32 hsdiv; if (dco < dco_min_freq || dco > dco_max_freq) continue; @@ -2642,16 +2670,16 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, MISSING_CASE(div1); /* fall through */ case 2: - hsdiv = 0; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2; break; case 3: - hsdiv = 1; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3; break; case 5: - hsdiv = 2; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5; break; case 7: - hsdiv = 3; + hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7; break; } @@ -2665,7 +2693,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, state->mg_clktop2_hsclkctl = MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(hsdiv) | + hsdiv | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2); return true; @@ -2846,6 +2874,8 @@ static struct intel_shared_dpll * icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); struct intel_shared_dpll *pll; struct intel_dpll_hw_state pll_state = {}; enum port port = encoder->port; @@ -2865,7 +2895,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case PORT_D: case PORT_E: case PORT_F: - if (0 /* TODO: TBT PLLs */) { + if (intel_dig_port->tc_type == TC_PORT_TBT) { min = DPLL_ID_ICL_TBTPLL; max = min; ret = icl_calc_dpll_state(crtc_state, encoder, clock, diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index 7e522cf4f13f..bf0de8a4dc63 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -344,5 +344,6 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, uint32_t pll_id); +int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); #endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5f63e1a9c25b..f5731215210a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -972,9 +972,8 @@ struct intel_plane { void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); - int (*check_plane)(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, - struct intel_plane_state *state); + int (*check_plane)(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); }; struct intel_watermark_params { @@ -1168,6 +1167,7 @@ struct intel_digital_port { bool release_cl2_override; uint8_t max_lanes; enum intel_display_power_domain ddi_io_power_domain; + enum tc_port_type tc_type; void (*write_infoframe)(struct drm_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -1314,6 +1314,12 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } +static inline struct drm_i915_private * +dp_to_i915(struct intel_dp *intel_dp) +{ + return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); +} + static inline struct intel_digital_port * hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) { @@ -1717,6 +1723,9 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); +void icl_program_mg_dp_mode(struct intel_dp *intel_dp); +void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port); +void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, @@ -1930,6 +1939,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); @@ -1939,20 +1951,33 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); -void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); +void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, + u32 *out_value); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); -void intel_power_domains_fini(struct drm_i915_private *); +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); -void intel_power_domains_suspend(struct drm_i915_private *dev_priv); -void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); +void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); +void intel_power_domains_enable(struct drm_i915_private *dev_priv); +void intel_power_domains_disable(struct drm_i915_private *dev_priv); + +enum i915_drm_suspend_mode { + I915_DRM_SUSPEND_IDLE, + I915_DRM_SUSPEND_MEM, + I915_DRM_SUSPEND_HIBERNATE, +}; + +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, + enum i915_drm_suspend_mode); +void intel_power_domains_resume(struct drm_i915_private *dev_priv); void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); +void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); const char * intel_display_power_domain_str(enum intel_display_power_domain domain); @@ -2030,8 +2055,6 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); -void intel_display_set_init_power(struct drm_i915_private *dev, bool enable); - void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 2d1952849d69..10cd051ba29e 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -513,7 +513,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, goto err_unref; } - ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (ret) goto err_unref; @@ -527,36 +527,19 @@ err_unref: void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) { - i915_vma_unpin_and_release(&engine->scratch); -} - -static void cleanup_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - if (!dev_priv->status_page_dmah) - return; - - drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); - engine->status_page.page_addr = NULL; + i915_vma_unpin_and_release(&engine->scratch, 0); } static void cleanup_status_page(struct intel_engine_cs *engine) { - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(&engine->status_page.vma); - if (!vma) - return; - - obj = vma->obj; + if (HWS_NEEDS_PHYSICAL(engine->i915)) { + void *addr = fetch_and_zero(&engine->status_page.page_addr); - i915_vma_unpin(vma); - i915_vma_close(vma); + __free_page(virt_to_page(addr)); + } - i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); + i915_vma_unpin_and_release(&engine->status_page.vma, + I915_VMA_RELEASE_MAP); } static int init_status_page(struct intel_engine_cs *engine) @@ -598,7 +581,7 @@ static int init_status_page(struct intel_engine_cs *engine) flags |= PIN_MAPPABLE; else flags |= PIN_HIGH; - ret = i915_vma_pin(vma, 0, 4096, flags); + ret = i915_vma_pin(vma, 0, 0, flags); if (ret) goto err; @@ -622,17 +605,18 @@ err: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct page *page; - GEM_BUG_ON(engine->id != RCS); - - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) + /* + * Though the HWS register does support 36bit addresses, historically + * we have had hangs and corruption reported due to wild writes if + * the HWS is placed above 4G. + */ + page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO); + if (!page) return -ENOMEM; - engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(engine->status_page.page_addr, 0, PAGE_SIZE); + engine->status_page.page_addr = page_address(page); return 0; } @@ -722,10 +706,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_engine_cleanup_scratch(engine); - if (HWS_NEEDS_PHYSICAL(engine->i915)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); + cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); @@ -800,6 +781,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) return err; } +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + GEM_TRACE("%s\n", engine->name); + + I915_WRITE_FW(RING_MI_MODE(engine->mmio_base), + _MASKED_BIT_DISABLE(STOP_RING)); +} + const char *i915_cache_level_str(struct drm_i915_private *i915, int type) { switch (type) { @@ -980,8 +971,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return true; /* Any inflight/incomplete requests? */ - if (!i915_seqno_passed(intel_engine_get_seqno(engine), - intel_engine_last_submit(engine))) + if (!intel_engine_signaled(engine, intel_engine_last_submit(engine))) return false; if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) @@ -1348,20 +1338,19 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, if (HAS_EXECLISTS(dev_priv)) { const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; - u32 ptr, read, write; unsigned int idx; + u8 read, write; drm_printf(m, "\tExeclist status: 0x%08x %08x\n", I915_READ(RING_EXECLIST_STATUS_LO(engine)), I915_READ(RING_EXECLIST_STATUS_HI(engine))); - ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); - read = GEN8_CSB_READ_PTR(ptr); - write = GEN8_CSB_WRITE_PTR(ptr); - drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", - read, execlists->csb_head, - write, - intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), + read = execlists->csb_head; + write = READ_ONCE(*execlists->csb_write); + + drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n", + read, write, + GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1373,12 +1362,12 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, write += GEN8_CSB_ENTRIES; while (read < write) { idx = ++read % GEN8_CSB_ENTRIES; - drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", + drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n", idx, - I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), hws[idx * 2], - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), - hws[idx * 2 + 1]); + I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + hws[idx * 2 + 1], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); } rcu_read_lock(); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 560c7406ae40..230aea69385d 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -27,8 +27,6 @@ #include "intel_guc_submission.h" #include "i915_drv.h" -static void guc_init_ggtt_pin_bias(struct intel_guc *guc); - static void gen8_guc_raise_irq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -128,13 +126,15 @@ static int guc_init_wq(struct intel_guc *guc) static void guc_fini_wq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct workqueue_struct *wq; - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) && - USES_GUC_SUBMISSION(dev_priv)) - destroy_workqueue(guc->preempt_wq); + wq = fetch_and_zero(&guc->preempt_wq); + if (wq) + destroy_workqueue(wq); - destroy_workqueue(guc->log.relay.flush_wq); + wq = fetch_and_zero(&guc->log.relay.flush_wq); + if (wq) + destroy_workqueue(wq); } int intel_guc_init_misc(struct intel_guc *guc) @@ -142,8 +142,6 @@ int intel_guc_init_misc(struct intel_guc *guc) struct drm_i915_private *i915 = guc_to_i915(guc); int ret; - guc_init_ggtt_pin_bias(guc); - ret = guc_init_wq(guc); if (ret) return ret; @@ -170,7 +168,7 @@ static int guc_shared_data_create(struct intel_guc *guc) vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma); + i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); } @@ -182,8 +180,7 @@ static int guc_shared_data_create(struct intel_guc *guc) static void guc_shared_data_destroy(struct intel_guc *guc) { - i915_gem_object_unpin_map(guc->shared_data->obj); - i915_vma_unpin_and_release(&guc->shared_data); + i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); } int intel_guc_init(struct intel_guc *guc) @@ -584,53 +581,28 @@ int intel_guc_resume(struct intel_guc *guc) * * :: * - * +==============> +====================+ <== GUC_GGTT_TOP - * ^ | | - * | | | - * | | DRAM | - * | | Memory | - * | | | - * GuC | | - * Address +========> +====================+ <== WOPCM Top - * Space ^ | HW contexts RSVD | - * | | | WOPCM | - * | | +==> +--------------------+ <== GuC WOPCM Top - * | GuC ^ | | - * | GGTT | | | - * | Pin GuC | GuC | - * | Bias WOPCM | WOPCM | - * | | Size | | - * | | | | | - * v v v | | - * +=====+=====+==> +====================+ <== GuC WOPCM Base - * | Non-GuC WOPCM | - * | (HuC/Reserved) | - * +====================+ <== WOPCM Base + * +===========> +====================+ <== FFFF_FFFF + * ^ | Reserved | + * | +====================+ <== GUC_GGTT_TOP + * | | | + * | | DRAM | + * GuC | | + * Address +===> +====================+ <== GuC ggtt_pin_bias + * Space ^ | | + * | | | | + * | GuC | GuC | + * | WOPCM | WOPCM | + * | Size | | + * | | | | + * v v | | + * +=======+===> +====================+ <== 0000_0000 * - * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to WOPCM + * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped - * to DRAM. The value of the GuC ggtt_pin_bias is determined by WOPCM size and - * actual GuC WOPCM size. + * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. */ /** - * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. - * @guc: intel_guc structure. - * - * This function will calculate and initialize the ggtt_pin_bias value based on - * overall WOPCM size and GuC WOPCM size. - */ -static void guc_init_ggtt_pin_bias(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - - GEM_BUG_ON(!i915->wopcm.size); - GEM_BUG_ON(i915->wopcm.size < i915->wopcm.guc.base); - - guc->ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base; -} - -/** * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage * @guc: the guc * @size: size of area to allocate (both virtual space and memory) @@ -648,6 +620,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_gem_object *obj; struct i915_vma *vma; + u64 flags; int ret; obj = i915_gem_object_create(dev_priv, size); @@ -658,8 +631,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) if (IS_ERR(vma)) goto err; - ret = i915_vma_pin(vma, 0, PAGE_SIZE, - PIN_GLOBAL | PIN_OFFSET_BIAS | guc->ggtt_pin_bias); + flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + ret = i915_vma_pin(vma, 0, 0, flags); if (ret) { vma = ERR_PTR(ret); goto err; @@ -671,3 +644,20 @@ err: i915_gem_object_put(obj); return vma; } + +/** + * intel_guc_reserved_gtt_size() + * @guc: intel_guc structure + * + * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using + * GuC we can't have any objects pinned in that region. This function returns + * the size of the shadowed region. + * + * Returns: + * 0 if GuC is not present or not in use. + * Otherwise, the GuC WOPCM size. + */ +u32 intel_guc_reserved_gtt_size(struct intel_guc *guc) +{ + return guc_to_i915(guc)->wopcm.guc.size; +} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 4121928a495e..ad42faf48c46 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -49,9 +49,6 @@ struct intel_guc { struct intel_guc_log log; struct intel_guc_ct ct; - /* Offset where Non-WOPCM memory starts. */ - u32 ggtt_pin_bias; - /* Log snapshot if GuC errors during load */ struct drm_i915_gem_object *load_err_log; @@ -130,10 +127,10 @@ static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) * @vma: i915 graphics virtual memory area. * * GuC does not allow any gfx GGTT address that falls into range - * [0, GuC ggtt_pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. - * Currently, in order to exclude [0, GuC ggtt_pin_bias) address space from + * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. + * Currently, in order to exclude [0, ggtt.pin_bias) address space from * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() - * and pinned with PIN_OFFSET_BIAS along with the value of GuC ggtt_pin_bias. + * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. * * Return: GGTT offset of the @vma. */ @@ -142,7 +139,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, { u32 offset = i915_ggtt_offset(vma); - GEM_BUG_ON(offset < guc->ggtt_pin_bias); + GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); return offset; @@ -168,6 +165,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); +u32 intel_guc_reserved_gtt_size(struct intel_guc *guc); static inline int intel_guc_sanitize(struct intel_guc *guc) { diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index dcaa3fb71765..f0db62887f50 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -148,5 +148,5 @@ int intel_guc_ads_create(struct intel_guc *guc) void intel_guc_ads_destroy(struct intel_guc *guc) { - i915_vma_unpin_and_release(&guc->ads_vma); + i915_vma_unpin_and_release(&guc->ads_vma, 0); } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index 371b6005954a..a52883e9146f 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -204,7 +204,7 @@ static int ctch_init(struct intel_guc *guc, return 0; err_vma: - i915_vma_unpin_and_release(&ctch->vma); + i915_vma_unpin_and_release(&ctch->vma, 0); err_out: CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", ctch->owner, err); @@ -214,10 +214,7 @@ err_out: static void ctch_fini(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { - GEM_BUG_ON(!ctch->vma); - - i915_gem_object_unpin_map(ctch->vma->obj); - i915_vma_unpin_and_release(&ctch->vma); + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); } static int ctch_open(struct intel_guc *guc, diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 1a0f2a39cef9..8382d591c784 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -49,6 +49,7 @@ #define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) #define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) #define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) +#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) #define WQ_TARGET_SHIFT 10 #define WQ_LEN_SHIFT 16 #define WQ_NO_WCFLUSH_WAIT (1 << 27) diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 6da61a71d28f..d3ebdbc0182e 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -498,7 +498,7 @@ err: void intel_guc_log_destroy(struct intel_guc_log *log) { - i915_vma_unpin_and_release(&log->vma); + i915_vma_unpin_and_release(&log->vma, 0); } int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 4aa5e6463e7b..07b9d313b019 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -317,7 +317,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc) vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma); + i915_vma_unpin_and_release(&vma, 0); return PTR_ERR(vaddr); } @@ -331,8 +331,7 @@ static int guc_stage_desc_pool_create(struct intel_guc *guc) static void guc_stage_desc_pool_destroy(struct intel_guc *guc) { ida_destroy(&guc->stage_ids); - i915_gem_object_unpin_map(guc->stage_desc_pool->obj); - i915_vma_unpin_and_release(&guc->stage_desc_pool); + i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); } /* @@ -457,6 +456,9 @@ static void guc_wq_item_append(struct intel_guc_client *client, */ BUILD_BUG_ON(wqi_size != 16); + /* We expect the WQ to be active if we're appending items to it */ + GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); + /* Free space is guaranteed. */ wq_off = READ_ONCE(desc->tail); GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), @@ -466,15 +468,19 @@ static void guc_wq_item_append(struct intel_guc_client *client, /* WQ starts from the page after doorbell / process_desc */ wqi = client->vaddr + wq_off + GUC_DB_SIZE; - /* Now fill in the 4-word work queue item */ - wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (target_engine << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; - wqi->context_desc = context_desc; - wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; - GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); - wqi->fence_id = fence_id; + if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { + wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); + } else { + /* Now fill in the 4-word work queue item */ + wqi->header = WQ_TYPE_INORDER | + (wqi_len << WQ_LEN_SHIFT) | + (target_engine << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; + wqi->context_desc = context_desc; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); + wqi->fence_id = fence_id; + } /* Make the update visible to GuC */ WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); @@ -1008,7 +1014,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv, err_vaddr: i915_gem_object_unpin_map(client->vma->obj); err_vma: - i915_vma_unpin_and_release(&client->vma); + i915_vma_unpin_and_release(&client->vma, 0); err_id: ida_simple_remove(&guc->stage_ids, client->stage_id); err_client: @@ -1020,8 +1026,7 @@ static void guc_client_free(struct intel_guc_client *client) { unreserve_doorbell(client); guc_stage_desc_fini(client->guc, client); - i915_gem_object_unpin_map(client->vma->obj); - i915_vma_unpin_and_release(&client->vma); + i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); ida_simple_remove(&client->guc->stage_ids, client->stage_id); kfree(client); } diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h index fb081cefef93..169c54568340 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/intel_guc_submission.h @@ -28,6 +28,7 @@ #include <linux/spinlock.h> #include "i915_gem.h" +#include "i915_selftest.h" struct drm_i915_private; @@ -71,6 +72,9 @@ struct intel_guc_client { spinlock_t wq_lock; /* Per-engine counts of GuC submissions */ u64 submissions[I915_NUM_ENGINES]; + + /* For testing purposes, use nop WQ items instead of real ones */ + I915_SELFTEST_DECLARE(bool use_nop_wqi); }; int intel_guc_submission_init(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 2fc7a0dd0df9..e26d05a46451 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -142,7 +142,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) return -1; - if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) + if (intel_engine_signaled(signaller, seqno)) return 1; /* cursory check for an unkickable deadlock */ diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 0cc6a861bcf8..26e48fc95543 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -57,9 +57,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) /* PG1 (power well #1) needs to be enabled */ for_each_power_well(dev_priv, power_well) { - if (power_well->id == id) { - enabled = power_well->ops->is_enabled(dev_priv, - power_well); + if (power_well->desc->id == id) { + enabled = power_well->desc->ops->is_enabled(dev_priv, + power_well); break; } } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a9076402dcb0..a2dab0b6bde6 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, ret = i2c_transfer(adapter, &msg, 1); if (ret == 1) - return 0; - return ret >= 0 ? -EIO : ret; + ret = 0; + else if (ret >= 0) + ret = -EIO; + + kfree(write_buf); + return ret; } static @@ -1907,22 +1911,26 @@ intel_hdmi_set_edid(struct drm_connector *connector) static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector, bool force) { - enum drm_connector_status status; + enum drm_connector_status status = connector_status_disconnected; struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + if (IS_ICELAKE(dev_priv) && + !intel_digital_port_connected(encoder)) + goto out; + intel_hdmi_unset_edid(connector); if (intel_hdmi_set_edid(connector)) status = connector_status_connected; - else - status = connector_status_disconnected; +out: intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); if (status != connector_status_connected) diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index ffcad5fad6a7..37ef540dd280 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -63,7 +63,7 @@ int intel_huc_auth(struct intel_huc *huc) return -ENOEXEC; vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0, - PIN_OFFSET_BIAS | guc->ggtt_pin_bias); + PIN_OFFSET_BIAS | i915->ggtt.pin_bias); if (IS_ERR(vma)) { ret = PTR_ERR(vma); DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index bef32b7c248e..33d87ab93fdd 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -37,7 +37,7 @@ struct gmbus_pin { const char *name; - i915_reg_t reg; + enum i915_gpio gpio; }; /* Map gmbus pin pairs to names and registers. */ @@ -121,8 +121,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, else size = ARRAY_SIZE(gmbus_pins); - return pin < size && - i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg); + return pin < size && get_gmbus_pin(dev_priv, pin)->name; } /* Intel GPIO access functions */ @@ -292,8 +291,7 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo = &bus->bit_algo; - bus->gpio_reg = _MMIO(dev_priv->gpio_mmio_base + - i915_mmio_reg_offset(get_gmbus_pin(dev_priv, pin)->reg)); + bus->gpio_reg = GPIO(get_gmbus_pin(dev_priv, pin)->gpio); bus->adapter.algo_data = algo; algo->setsda = set_data; algo->setscl = set_clock; @@ -825,9 +823,11 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; else if (!HAS_GMCH_DISPLAY(dev_priv)) - dev_priv->gpio_mmio_base = - i915_mmio_reg_offset(PCH_GPIOA) - - i915_mmio_reg_offset(GPIOA); + /* + * Broxton uses the same PCH offsets for South Display Engine, + * even though it doesn't have a PCH. + */ + dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE; mutex_init(&dev_priv->gmbus_mutex); init_waitqueue_head(&dev_priv->gmbus_wait_queue); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 174479232e94..9b1f0e5211a0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -541,11 +541,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine) GEM_BUG_ON(execlists->preempt_complete_status != upper_32_bits(ce->lrc_desc)); - GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] & - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) != - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)); /* * Switch to our empty preempt context so @@ -1277,6 +1272,8 @@ static void execlists_context_destroy(struct intel_context *ce) static void execlists_context_unpin(struct intel_context *ce) { + i915_gem_context_unpin_hw_id(ce->gem_context); + intel_ring_unpin(ce->ring); ce->state->obj->pin_global--; @@ -1303,10 +1300,9 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) } flags = PIN_GLOBAL | PIN_HIGH; - if (ctx->ggtt_offset_bias) - flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias; + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags); + return i915_vma_pin(vma, 0, 0, flags); } static struct intel_context * @@ -1332,10 +1328,14 @@ __execlists_context_pin(struct intel_engine_cs *engine, goto unpin_vma; } - ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias); + ret = intel_ring_pin(ce->ring); if (ret) goto unpin_map; + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) + goto unpin_ring; + intel_lr_context_descriptor_update(ctx, engine, ce); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1348,6 +1348,8 @@ __execlists_context_pin(struct intel_engine_cs *engine, i915_gem_context_get(ctx); return ce; +unpin_ring: + intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_vma: @@ -1643,7 +1645,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) goto err; } - err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err; @@ -1657,7 +1659,7 @@ err: static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) { - i915_vma_unpin_and_release(&engine->wa_ctx.vma); + i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); @@ -1775,11 +1777,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - int ret; - - ret = intel_mocs_init_engine(engine); - if (ret) - return ret; + intel_mocs_init_engine(engine); intel_engine_reset_breadcrumbs(engine); @@ -1838,7 +1836,8 @@ execlists_reset_prepare(struct intel_engine_cs *engine) struct i915_request *request, *active; unsigned long flags; - GEM_TRACE("%s\n", engine->name); + GEM_TRACE("%s: depth<-%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); /* * Prevent request submission to the hardware until we have @@ -1971,22 +1970,18 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - /* After a GPU reset, we may have requests to replay */ - if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) - tasklet_schedule(&execlists->tasklet); - /* - * Flush the tasklet while we still have the forcewake to be sure - * that it is not allowed to sleep before we restart and reload a - * context. + * After a GPU reset, we may have requests to replay. Do so now while + * we still have the forcewake to be sure that the GPU is not allowed + * to sleep before we restart and reload a context. * - * As before (with execlists_reset_prepare) we rely on the caller - * serialising multiple attempts to reset so that we know that we - * are the only one manipulating tasklet state. */ - __tasklet_enable_sync_once(&execlists->tasklet); + if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) + execlists->tasklet.func(execlists->tasklet.data); - GEM_TRACE("%s\n", engine->name); + tasklet_enable(&execlists->tasklet); + GEM_TRACE("%s: depth->%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); } static int intel_logical_ring_emit_pdps(struct i915_request *rq) @@ -2066,8 +2061,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, /* FIXME(BDW): Address space and security selectors. */ *cs++ = MI_BATCH_BUFFER_START_GEN8 | - (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) | - (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); @@ -2494,6 +2488,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine) static u32 make_rpcs(struct drm_i915_private *dev_priv) { + bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg; + u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask); + u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]); u32 rpcs = 0; /* @@ -2504,30 +2501,88 @@ make_rpcs(struct drm_i915_private *dev_priv) return 0; /* + * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits + * wide and Icelake has up to eight subslices, specfial programming is + * needed in order to correctly enable all subslices. + * + * According to documentation software must consider the configuration + * as 2x4x8 and hardware will translate this to 1x8x8. + * + * Furthemore, even though SScount is three bits, maximum documented + * value for it is four. From this some rules/restrictions follow: + * + * 1. + * If enabled subslice count is greater than four, two whole slices must + * be enabled instead. + * + * 2. + * When more than one slice is enabled, hardware ignores the subslice + * count altogether. + * + * From these restrictions it follows that it is not possible to enable + * a count of subslices between the SScount maximum of four restriction, + * and the maximum available number on a particular SKU. Either all + * subslices are enabled, or a count between one and four on the first + * slice. + */ + if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) { + GEM_BUG_ON(subslices & 1); + + subslice_pg = false; + slices *= 2; + } + + /* * Starting in Gen9, render power gating can leave * slice/subslice/EU in a partially enabled state. We * must make an explicit request through RPCS for full * enablement. */ if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) { - rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) << - GEN8_RPCS_S_CNT_SHIFT; - rpcs |= GEN8_RPCS_ENABLE; + u32 mask, val = slices; + + if (INTEL_GEN(dev_priv) >= 11) { + mask = GEN11_RPCS_S_CNT_MASK; + val <<= GEN11_RPCS_S_CNT_SHIFT; + } else { + mask = GEN8_RPCS_S_CNT_MASK; + val <<= GEN8_RPCS_S_CNT_SHIFT; + } + + GEM_BUG_ON(val & ~mask); + val &= mask; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val; } - if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) { - rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) << - GEN8_RPCS_SS_CNT_SHIFT; - rpcs |= GEN8_RPCS_ENABLE; + if (subslice_pg) { + u32 val = subslices; + + val <<= GEN8_RPCS_SS_CNT_SHIFT; + + GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK); + val &= GEN8_RPCS_SS_CNT_MASK; + + rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val; } if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) { - rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice << - GEN8_RPCS_EU_MAX_SHIFT; + u32 val; + + val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + GEN8_RPCS_EU_MIN_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK); + val &= GEN8_RPCS_EU_MIN_MASK; + + rpcs |= val; + + val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice << + GEN8_RPCS_EU_MAX_SHIFT; + GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK); + val &= GEN8_RPCS_EU_MAX_MASK; + + rpcs |= val; + rpcs |= GEN8_RPCS_ENABLE; } @@ -2584,11 +2639,13 @@ static void execlists_init_reg_state(u32 *regs, MI_LRI_FORCE_POSTED; CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | - (HAS_RESOURCE_STREAMER(dev_priv) ? - CTX_CTRL_RS_CTX_ENABLE : 0))); + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); + if (INTEL_GEN(dev_priv) < 11) { + regs[CTX_CONTEXT_CONTROL + 1] |= + _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + } CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); @@ -2654,6 +2711,10 @@ static void execlists_init_reg_state(u32 *regs, i915_oa_init_reg_state(engine, ctx, regs); } + + regs[CTX_END] = MI_BATCH_BUFFER_END; + if (INTEL_GEN(dev_priv) >= 10) + regs[CTX_END] |= BIT(0); } static int diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 4dfb78e3ec7e..f5a5502ecf70 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -27,8 +27,6 @@ #include "intel_ringbuffer.h" #include "i915_gem_context.h" -#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT - /* Execlists regs */ #define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) #define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234) diff --git a/drivers/gpu/drm/i915/intel_lrc_reg.h b/drivers/gpu/drm/i915/intel_lrc_reg.h index 169a2239d6c7..5ef932d810a7 100644 --- a/drivers/gpu/drm/i915/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/intel_lrc_reg.h @@ -37,7 +37,7 @@ #define CTX_PDP0_LDW 0x32 #define CTX_LRI_HEADER_2 0x41 #define CTX_R_PWR_CLK_STATE 0x42 -#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44 +#define CTX_END 0x44 #define CTX_REG(reg_state, pos, reg, val) do { \ u32 *reg_state__ = (reg_state); \ diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5dae16ccd9f1..3e085c5f2b81 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); + wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) DRM_ERROR("LSPCON mode hasn't settled\n"); diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 9f0bd6a4cb79..77e9871a8c9a 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -232,20 +232,17 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) * * This function simply emits a MI_LOAD_REGISTER_IMM command for the * given table starting at the given address. - * - * Return: 0 on success, otherwise the error status. */ -int intel_mocs_init_engine(struct intel_engine_cs *engine) +void intel_mocs_init_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; if (!get_mocs_settings(dev_priv, &table)) - return 0; + return; - if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES)) - return -ENODEV; + GEM_BUG_ON(table.size > GEN9_NUM_MOCS_ENTRIES); for (index = 0; index < table.size; index++) I915_WRITE(mocs_register(engine->id, index), @@ -262,8 +259,6 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine) for (; index < GEN9_NUM_MOCS_ENTRIES; index++) I915_WRITE(mocs_register(engine->id, index), table.table[0].control_value); - - return 0; } /** diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index d1751f91c1a4..d89080d75b80 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h @@ -54,6 +54,6 @@ int intel_rcs_context_init_mocs(struct i915_request *rq); void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); -int intel_mocs_init_engine(struct intel_engine_cs *engine); +void intel_mocs_init_engine(struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 43ae9de12ba3..d99e5fabe93c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,6 +26,7 @@ */ #include <linux/cpufreq.h> +#include <linux/pm_runtime.h> #include <drm/drm_plane_helper.h> #include "i915_drv.h" #include "intel_drv.h" @@ -2942,8 +2943,8 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv, unsigned int latency = wm[level]; if (latency == 0) { - DRM_ERROR("%s WM%d latency not provided\n", - name, level); + DRM_DEBUG_KMS("%s WM%d latency not provided\n", + name, level); continue; } @@ -3771,11 +3772,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return true; } -static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, - const unsigned int total_data_rate, - const int num_active, - struct skl_ddb_allocation *ddb) +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *cstate, + const unsigned int total_data_rate, + const int num_active, + struct skl_ddb_allocation *ddb) { const struct drm_display_mode *adjusted_mode; u64 total_data_bw; @@ -3814,8 +3815,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - unsigned int pipe_size, ddb_size; - int nth_active_pipe; + const struct drm_crtc_state *crtc_state; + const struct drm_crtc *crtc; + u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; + enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; + u16 ddb_size; + u32 i; if (WARN_ON(!state) || !cstate->base.active) { alloc->start = 0; @@ -3833,14 +3838,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, *num_active, ddb); /* - * If the state doesn't change the active CRTC's, then there's - * no need to recalculate; the existing pipe allocation limits - * should remain unchanged. Note that we're safe from racing - * commits since any racing commit that changes the active CRTC - * list would need to grab _all_ crtc locks, including the one - * we currently hold. + * If the state doesn't change the active CRTC's or there is no + * modeset request, then there's no need to recalculate; + * the existing pipe allocation limits should remain unchanged. + * Note that we're safe from racing commits since any racing commit + * that changes the active CRTC list or do modeset would need to + * grab _all_ crtc locks, including the one we currently hold. */ - if (!intel_state->active_pipe_changes) { + if (!intel_state->active_pipe_changes && !intel_state->modeset) { /* * alloc may be cleared by clear_intel_crtc_state, * copy from old state to be sure @@ -3849,11 +3854,32 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, return; } - nth_active_pipe = hweight32(intel_state->active_crtcs & - (drm_crtc_mask(for_crtc) - 1)); - pipe_size = ddb_size / hweight32(intel_state->active_crtcs); - alloc->start = nth_active_pipe * ddb_size / *num_active; - alloc->end = alloc->start + pipe_size; + /* + * Watermark/ddb requirement highly depends upon width of the + * framebuffer, So instead of allocating DDB equally among pipes + * distribute DDB based on resolution/width of the display. + */ + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + const struct drm_display_mode *adjusted_mode; + int hdisplay, vdisplay; + enum pipe pipe; + + if (!crtc_state->enable) + continue; + + pipe = to_intel_crtc(crtc)->pipe; + adjusted_mode = &crtc_state->adjusted_mode; + drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); + total_width += hdisplay; + + if (pipe < for_pipe) + width_before_pipe += hdisplay; + else if (pipe == for_pipe) + pipe_width = hdisplay; + } + + alloc->start = ddb_size * width_before_pipe / total_width; + alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width; } static unsigned int skl_cursor_allocation(int num_active) @@ -3909,7 +3935,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val & PLANE_CTL_ALPHA_MASK); val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); - val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); + /* + * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed + * registers for now. + */ + if (INTEL_GEN(dev_priv) < 11) + val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); if (fourcc == DRM_FORMAT_NV12) { skl_ddb_entry_init_from_hw(dev_priv, @@ -4977,6 +5008,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), &ddb->plane[pipe][plane_id]); + /* FIXME: add proper NV12 support for ICL. */ if (INTEL_GEN(dev_priv) >= 11) return skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), @@ -5142,17 +5174,6 @@ skl_compute_ddb(struct drm_atomic_state *state) } static void -skl_copy_ddb_for_pipe(struct skl_ddb_values *dst, - struct skl_ddb_values *src, - enum pipe pipe) -{ - memcpy(dst->ddb.uv_plane[pipe], src->ddb.uv_plane[pipe], - sizeof(dst->ddb.uv_plane[pipe])); - memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], - sizeof(dst->ddb.plane[pipe])); -} - -static void skl_print_wm_changes(const struct drm_atomic_state *state) { const struct drm_device *dev = state->dev; @@ -5259,7 +5280,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) * any other display updates race with this transaction, so we need * to grab the lock on *all* CRTC's. */ - if (intel_state->active_pipe_changes) { + if (intel_state->active_pipe_changes || intel_state->modeset) { realloc_pipes = ~0; intel_state->wm_results.dirty_pipes = ~0; } @@ -5381,7 +5402,10 @@ static void skl_initial_wm(struct intel_atomic_state *state, if (cstate->base.active_changed) skl_atomic_update_crtc_wm(state, cstate); - skl_copy_ddb_for_pipe(hw_vals, results, pipe); + memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe], + sizeof(hw_vals->ddb.uv_plane[pipe])); + memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe], + sizeof(hw_vals->ddb.plane[pipe])); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -6379,7 +6403,6 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) new_power = HIGH_POWER; rps_set_power(dev_priv, new_power); mutex_unlock(&rps->power.mutex); - rps->last_adj = 0; } void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) @@ -8159,7 +8182,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) */ if (!sanitize_rc6(dev_priv)) { DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - intel_runtime_pm_get(dev_priv); + pm_runtime_get(&dev_priv->drm.pdev->dev); } mutex_lock(&dev_priv->pcu_lock); @@ -8211,7 +8234,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) valleyview_cleanup_gt_powersave(dev_priv); if (!HAS_RC6(dev_priv)) - intel_runtime_pm_put(dev_priv); + pm_runtime_put(&dev_priv->drm.pdev->dev); } /** @@ -8238,7 +8261,7 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 11) gen11_reset_rps_interrupts(dev_priv); - else + else if (INTEL_GEN(dev_priv) >= 6) gen6_reset_rps_interrupts(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 4bd5768731ee..b6838b525502 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -56,7 +56,30 @@ #include "intel_drv.h" #include "i915_drv.h" -void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) +static bool psr_global_enabled(u32 debug) +{ + switch (debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_DEFAULT: + return i915_modparams.enable_psr; + case I915_PSR_DEBUG_DISABLE: + return false; + default: + return true; + } +} + +static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) +{ + switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { + case I915_PSR_DEBUG_FORCE_PSR1: + return false; + default: + return crtc_state->has_psr2; + } +} + +void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) { u32 debug_mask, mask; @@ -77,10 +100,9 @@ void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) EDP_PSR_PRE_ENTRY(TRANSCODER_C); } - if (debug) + if (debug & I915_PSR_DEBUG_IRQ) mask |= debug_mask; - WRITE_ONCE(dev_priv->psr.debug, debug); I915_WRITE(EDP_PSR_IMR, ~mask); } @@ -213,6 +235,9 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); + WARN_ON(dev_priv->psr.dp); + dev_priv->psr.dp = intel_dp; + if (INTEL_GEN(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { bool y_req = intel_dp->psr_dpcd[1] & @@ -245,7 +270,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct edp_vsc_psr psr_vsc; if (dev_priv->psr.psr2_enabled) { @@ -275,8 +300,7 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 aux_clock_divider, aux_ctl; int i; static const uint8_t aux_msg[] = { @@ -309,9 +333,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) static void intel_psr_enable_sink(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ @@ -332,9 +354,7 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) static void hsw_activate_psr1(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 max_sleep_time = 0x1f; u32 val = EDP_PSR_ENABLE; @@ -389,9 +409,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) static void hsw_activate_psr2(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; /* Let's use 6 as the minimum to cover all known cases including the @@ -427,8 +445,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); int crtc_hdisplay = crtc_state->base.adjusted_mode.crtc_hdisplay; int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0; @@ -463,7 +480,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; int psr_setup_time; @@ -471,10 +488,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv)) return; - if (!i915_modparams.enable_psr) { - DRM_DEBUG_KMS("PSR disable by flag\n"); + if (intel_dp != dev_priv->psr.dp) return; - } /* * HSW spec explicitly says PSR is tied to port A. @@ -517,14 +532,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); - DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); } static void intel_psr_activate(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (INTEL_GEN(dev_priv) >= 9) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); @@ -544,9 +556,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp) static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ @@ -589,6 +599,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, } } +static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = dev_priv->psr.dp; + + if (dev_priv->psr.enabled) + return; + + DRM_DEBUG_KMS("Enabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); + intel_psr_setup_vsc(intel_dp, crtc_state); + intel_psr_enable_sink(intel_dp); + intel_psr_enable_source(intel_dp, crtc_state); + dev_priv->psr.enabled = true; + + intel_psr_activate(intel_dp); +} + /** * intel_psr_enable - Enable PSR * @intel_dp: Intel DP @@ -599,9 +627,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!crtc_state->has_psr) return; @@ -610,21 +636,21 @@ void intel_psr_enable(struct intel_dp *intel_dp, return; WARN_ON(dev_priv->drrs.dp); + mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.enabled) { + if (dev_priv->psr.prepared) { DRM_DEBUG_KMS("PSR already in use\n"); goto unlock; } - dev_priv->psr.psr2_enabled = crtc_state->has_psr2; + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; + dev_priv->psr.prepared = true; - intel_psr_setup_vsc(intel_dp, crtc_state); - intel_psr_enable_sink(intel_dp); - intel_psr_enable_source(intel_dp, crtc_state); - dev_priv->psr.enabled = intel_dp; - - intel_psr_activate(intel_dp); + if (psr_global_enabled(dev_priv->psr.debug)) + intel_psr_enable_locked(dev_priv, crtc_state); + else + DRM_DEBUG_KMS("PSR disabled by flag\n"); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -633,9 +659,7 @@ unlock: static void intel_psr_disable_source(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (dev_priv->psr.active) { i915_reg_t psr_status; @@ -674,21 +698,21 @@ intel_psr_disable_source(struct intel_dp *intel_dp) static void intel_psr_disable_locked(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); lockdep_assert_held(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) return; + DRM_DEBUG_KMS("Disabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); intel_psr_disable_source(intel_dp); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); - dev_priv->psr.enabled = NULL; + dev_priv->psr.enabled = false; } /** @@ -701,9 +725,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!old_crtc_state->has_psr) return; @@ -712,57 +734,61 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.prepared) { + mutex_unlock(&dev_priv->psr.lock); + return; + } + intel_psr_disable_locked(intel_dp); + + dev_priv->psr.prepared = false; mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); } -int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) +/** + * intel_psr_wait_for_idle - wait for PSR1 to idle + * @new_crtc_state: new CRTC state + * @out_value: PSR status in case of failure + * + * This function is expected to be called from pipe_update_start() where it is + * not expected to race with PSR enable or disable. + * + * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. + */ +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, + u32 *out_value) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg; - u32 mask; - if (!new_crtc_state->has_psr) + if (!dev_priv->psr.enabled || !new_crtc_state->has_psr) return 0; - /* - * The sole user right now is intel_pipe_update_start(), - * which won't race with psr_enable/disable, which is - * where psr2_enabled is written to. So, we don't need - * to acquire the psr.lock. More importantly, we want the - * latency inside intel_pipe_update_start() to be as low - * as possible, so no need to acquire psr.lock when it is - * not needed and will induce latencies in the atomic - * update path. - */ - if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; - mask = EDP_PSR2_STATUS_STATE_MASK; - } else { - reg = EDP_PSR_STATUS; - mask = EDP_PSR_STATUS_STATE_MASK; - } + /* FIXME: Update this for PSR2 if we need to wait for idle */ + if (READ_ONCE(dev_priv->psr.psr2_enabled)) + return 0; /* - * Max time for PSR to idle = Inverse of the refresh rate + - * 6 ms of exit training time + 1.5 ms of aux channel - * handshake. 50 msec is defesive enough to cover everything. + * From bspec: Panel Self Refresh (BDW+) + * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of + * exit training time + 1.5 ms of aux channel handshake. 50 ms is + * defensive enough to cover everything. */ - return intel_wait_for_register(dev_priv, reg, mask, - EDP_PSR_STATUS_STATE_IDLE, 50); + + return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, + EDP_PSR_STATUS_STATE_MASK, + EDP_PSR_STATUS_STATE_IDLE, 2, 50, + out_value); } static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) { - struct intel_dp *intel_dp; i915_reg_t reg; u32 mask; int err; - intel_dp = dev_priv->psr.enabled; - if (!intel_dp) + if (!dev_priv->psr.enabled) return false; if (dev_priv->psr.psr2_enabled) { @@ -784,6 +810,89 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return err == 0 && dev_priv->psr.enabled; } +static bool switching_psr(struct drm_i915_private *dev_priv, + struct intel_crtc_state *crtc_state, + u32 mode) +{ + /* Can't switch psr state anyway if PSR2 is not supported. */ + if (!crtc_state || !crtc_state->has_psr2) + return false; + + if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1) + return true; + + if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1) + return true; + + return false; +} + +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + u64 val) +{ + struct drm_device *dev = &dev_priv->drm; + struct drm_connector_state *conn_state; + struct intel_crtc_state *crtc_state = NULL; + struct drm_crtc_commit *commit; + struct drm_crtc *crtc; + struct intel_dp *dp; + int ret; + bool enable; + u32 mode = val & I915_PSR_DEBUG_MODE_MASK; + + if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || + mode > I915_PSR_DEBUG_FORCE_PSR1) { + DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); + return -EINVAL; + } + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + /* dev_priv->psr.dp should be set once and then never touched again. */ + dp = READ_ONCE(dev_priv->psr.dp); + conn_state = dp->attached_connector->base.state; + crtc = conn_state->crtc; + if (crtc) { + ret = drm_modeset_lock(&crtc->mutex, ctx); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(crtc->state); + commit = crtc_state->base.commit; + } else { + commit = conn_state->commit; + } + if (commit) { + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + return ret; + } + + ret = mutex_lock_interruptible(&dev_priv->psr.lock); + if (ret) + return ret; + + enable = psr_global_enabled(val); + + if (!enable || switching_psr(dev_priv, crtc_state, mode)) + intel_psr_disable_locked(dev_priv->psr.dp); + + dev_priv->psr.debug = val; + if (crtc) + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + + intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + + if (dev_priv->psr.prepared && enable) + intel_psr_enable_locked(dev_priv, crtc_state); + + mutex_unlock(&dev_priv->psr.lock); + return ret; +} + static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -811,7 +920,7 @@ static void intel_psr_work(struct work_struct *work) if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) goto unlock; - intel_psr_activate(dev_priv->psr.enabled); + intel_psr_activate(dev_priv->psr.dp); unlock: mutex_unlock(&dev_priv->psr.lock); } @@ -866,7 +975,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -909,7 +1018,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -977,9 +1086,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv) void intel_psr_short_pulse(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct i915_psr *psr = &dev_priv->psr; u8 val; const u8 errors = DP_PSR_RFB_STORAGE_ERROR | @@ -991,7 +1098,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) mutex_lock(&psr->lock); - if (psr->enabled != intel_dp) + if (!psr->enabled || psr->dp != intel_dp) goto exit; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6a8f27d0a742..472939f5c18f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -344,11 +344,14 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + struct page *page = virt_to_page(engine->status_page.page_addr); + phys_addr_t phys = PFN_PHYS(page_to_pfn(page)); u32 addr; - addr = dev_priv->status_page_dmah->busaddr; + addr = lower_32_bits(phys); if (INTEL_GEN(dev_priv) >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + addr |= (phys >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); } @@ -537,6 +540,8 @@ static int init_ring_common(struct intel_engine_cs *engine) if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + /* Papering over lost _interrupts_ immediately following the restart */ + intel_engine_wakeup(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -1013,24 +1018,22 @@ i915_emit_bb_start(struct i915_request *rq, return 0; } - - -int intel_ring_pin(struct intel_ring *ring, - struct drm_i915_private *i915, - unsigned int offset_bias) +int intel_ring_pin(struct intel_ring *ring) { - enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; struct i915_vma *vma = ring->vma; + enum i915_map_type map = + HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC; unsigned int flags; void *addr; int ret; GEM_BUG_ON(ring->vaddr); - flags = PIN_GLOBAL; - if (offset_bias) - flags |= PIN_OFFSET_BIAS | offset_bias; + + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + if (vma->obj->stolen) flags |= PIN_MAPPABLE; else @@ -1045,7 +1048,7 @@ int intel_ring_pin(struct intel_ring *ring, return ret; } - ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); + ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) return ret; @@ -1230,8 +1233,7 @@ static int __context_pin(struct intel_context *ce) return err; } - err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) return err; @@ -1419,8 +1421,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) goto err; } - /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE); + err = intel_ring_pin(ring); if (err) goto err_ring; @@ -1706,9 +1707,29 @@ static int switch_context(struct i915_request *rq) } if (ppgtt) { + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + goto err_mm; + ret = flush_pd_dir(rq); if (ret) goto err_mm; + + /* + * Not only do we need a full barrier (post-sync write) after + * invalidating the TLBs, but we need to wait a little bit + * longer. Whether this is merely delaying us, or the + * subsequent flush is a key part of serialising with the + * post-sync op, this extra pass appears vital before a + * mm switch! + */ + ret = engine->emit_flush(rq, EMIT_INVALIDATE); + if (ret) + goto err_mm; + + ret = engine->emit_flush(rq, EMIT_FLUSH); + if (ret) + goto err_mm; } if (ctx->remap_slice) { @@ -1946,7 +1967,7 @@ static void gen6_bsd_submit_request(struct i915_request *request) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) +static int mi_flush_dw(struct i915_request *rq, u32 flags) { u32 cmd, *cs; @@ -1956,7 +1977,8 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) cmd = MI_FLUSH_DW; - /* We always require a command barrier so that subsequent + /* + * We always require a command barrier so that subsequent * commands, such as breadcrumb interrupts, are strictly ordered * wrt the contents of the write cache being flushed to memory * (and thus being coherent from the CPU). @@ -1964,22 +1986,33 @@ static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; /* - * Bspec vol 1c.5 - video engine command streamer: + * Bspec vol 1c.3 - blitter engine command streamer: * "If ENABLED, all TLBs will be invalidated once the flush * operation is complete. This bit is only valid when the * Post-Sync Operation field is a value of 1h or 3h." */ - if (mode & EMIT_INVALIDATE) - cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; + cmd |= flags; *cs++ = cmd; *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; *cs++ = 0; *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + return 0; } +static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) +{ + return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); +} + +static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) +{ + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); +} + static int hsw_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, @@ -1992,9 +2025,7 @@ hsw_emit_bb_start(struct i915_request *rq, return PTR_ERR(cs); *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | - (dispatch_flags & I915_DISPATCH_RS ? - MI_BATCH_RESOURCE_STREAMER : 0); + 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); /* bit0-7 is the length on GEN6+ */ *cs++ = offset; intel_ring_advance(rq, cs); @@ -2026,36 +2057,7 @@ gen6_emit_bb_start(struct i915_request *rq, static int gen6_ring_flush(struct i915_request *rq, u32 mode) { - u32 cmd, *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cmd = MI_FLUSH_DW; - - /* We always require a command barrier so that subsequent - * commands, such as breadcrumb interrupts, are strictly ordered - * wrt the contents of the write cache being flushed to memory - * (and thus being coherent from the CPU). - */ - cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; - - /* - * Bspec vol 1c.3 - blitter engine command streamer: - * "If ENABLED, all TLBs will be invalidated once the flush - * operation is complete. This bit is only valid when the - * Post-Sync Operation field is a value of 1h or 3h." - */ - if (mode & EMIT_INVALIDATE) - cmd |= MI_INVALIDATE_TLB; - *cs++ = cmd; - *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; - *cs++ = 0; - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; + return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); } static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 0c2302d27931..2dfa585712c2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -476,7 +476,6 @@ struct intel_engine_cs { unsigned int dispatch_flags); #define I915_DISPATCH_SECURE BIT(0) #define I915_DISPATCH_PINNED BIT(1) -#define I915_DISPATCH_RS BIT(2) void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs); int emit_breadcrumb_sz; @@ -799,9 +798,7 @@ struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, struct i915_timeline *timeline, int size); -int intel_ring_pin(struct intel_ring *ring, - struct drm_i915_private *i915, - unsigned int offset_bias); +int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); unsigned int intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); @@ -911,18 +908,15 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); +void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) { - /* We are only peeking at the tail of the submit queue (and not the + /* + * We are only peeking at the tail of the submit queue (and not the * queue itself) in order to gain a hint as to the current active * state of the engine. Callers are not expected to be taking * engine->timeline->lock, nor are they expected to be concerned @@ -932,6 +926,31 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) return READ_ONCE(engine->timeline.seqno); } +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); +} + +static inline bool intel_engine_signaled(struct intel_engine_cs *engine, + u32 seqno) +{ + return i915_seqno_passed(intel_engine_get_seqno(engine), seqno); +} + +static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, + u32 seqno) +{ + GEM_BUG_ON(!seqno); + return intel_engine_signaled(engine, seqno); +} + +static inline bool intel_engine_has_started(struct intel_engine_cs *engine, + u32 seqno) +{ + GEM_BUG_ON(!seqno); + return intel_engine_signaled(engine, seqno - 1); +} + void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6b5aa3b074ec..480dadb1047b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -52,10 +52,6 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); -static struct i915_power_well * -lookup_power_well(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id); - const char * intel_display_power_domain_str(enum intel_display_power_domain domain) { @@ -159,17 +155,17 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) static void intel_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("enabling %s\n", power_well->name); - power_well->ops->enable(dev_priv, power_well); + DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); + power_well->desc->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } static void intel_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("disabling %s\n", power_well->name); + DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); power_well->hw_enabled = false; - power_well->ops->disable(dev_priv, power_well); + power_well->desc->ops->disable(dev_priv, power_well); } static void intel_power_well_get(struct drm_i915_private *dev_priv, @@ -183,7 +179,7 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { WARN(!power_well->count, "Use count on power well %s is already zero", - power_well->name); + power_well->desc->name); if (!--power_well->count) intel_power_well_disable(dev_priv, power_well); @@ -213,7 +209,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, is_enabled = true; for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { - if (power_well->always_on) + if (power_well->desc->always_on) continue; if (!power_well->hw_enabled) { @@ -257,30 +253,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, return ret; } -/** - * intel_display_set_init_power - set the initial power domain state - * @dev_priv: i915 device instance - * @enable: whether to enable or disable the initial power domain state - * - * For simplicity our driver load/unload and system suspend/resume code assumes - * that all power domains are always enabled. This functions controls the state - * of this little hack. While the initial power domain state is enabled runtime - * pm is effectively disabled. - */ -void intel_display_set_init_power(struct drm_i915_private *dev_priv, - bool enable) -{ - if (dev_priv->power_domains.init_power_on == enable) - return; - - if (enable) - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - else - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - dev_priv->power_domains.init_power_on = enable; -} - /* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well @@ -323,26 +295,29 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ WARN_ON(intel_wait_for_register(dev_priv, - HSW_PWR_WELL_CTL_DRIVER(id), - HSW_PWR_WELL_CTL_STATE(id), - HSW_PWR_WELL_CTL_STATE(id), + regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + HSW_PWR_WELL_CTL_STATE(pw_idx), 1)); } static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, - enum i915_power_well_id id) + const struct i915_power_well_regs *regs, + int pw_idx) { - u32 req_mask = HSW_PWR_WELL_CTL_REQ(id); + u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 ret; - ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0; - ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0; + ret = I915_READ(regs->bios) & req_mask ? 1 : 0; + ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; + if (regs->kvmr.reg) + ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; + ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; return ret; } @@ -350,7 +325,8 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; bool disabled; u32 reqs; @@ -363,14 +339,14 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, * Skip the wait in case any of the request bits are set and print a * diagnostic message. */ - wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & - HSW_PWR_WELL_CTL_STATE(id))) || - (reqs = hsw_power_well_requesters(dev_priv, id)), 1); + wait_for((disabled = !(I915_READ(regs->driver) & + HSW_PWR_WELL_CTL_STATE(pw_idx))) || + (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); if (disabled) return; DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - power_well->name, + power_well->desc->name, !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); } @@ -386,14 +362,15 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, static void hsw_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - bool wait_fuses = power_well->hsw.has_fuses; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + bool wait_fuses = power_well->desc->hsw.has_fuses; enum skl_power_gate uninitialized_var(pg); u32 val; if (wait_fuses) { - pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) : - SKL_PW_TO_PG(id); + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -405,52 +382,55 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well); /* Display WA #1178: cnl */ if (IS_CANNONLAKE(dev_priv) && - (id == CNL_DISP_PW_AUX_B || id == CNL_DISP_PW_AUX_C || - id == CNL_DISP_PW_AUX_D || id == CNL_DISP_PW_AUX_F)) { - val = I915_READ(CNL_AUX_ANAOVRD1(id)); + pw_idx >= GLK_PW_CTL_IDX_AUX_B && + pw_idx <= CNL_PW_CTL_IDX_AUX_F) { + val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(CNL_AUX_ANAOVRD1(id), val); + I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); } if (wait_fuses) gen9_wait_for_power_well_fuses(dev_priv, pg); - hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask, - power_well->hsw.has_vga); + hsw_power_well_post_enable(dev_priv, + power_well->desc->hsw.irq_pipe_mask, + power_well->desc->hsw.has_vga); } static void hsw_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; u32 val; - hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask); + hsw_power_well_pre_disable(dev_priv, + power_well->desc->hsw.irq_pipe_mask); - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), - val & ~HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } -#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A) +#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - enum port port = ICL_AUX_PW_TO_PORT(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); u32 val; - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); val = I915_READ(ICL_PORT_CL_DW12(port)); I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); @@ -462,16 +442,16 @@ static void icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - enum port port = ICL_AUX_PW_TO_PORT(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); u32 val; val = I915_READ(ICL_PORT_CL_DW12(port)); I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); - val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), - val & ~HSW_PWR_WELL_CTL_REQ(id)); + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -484,22 +464,22 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | + HSW_PWR_WELL_CTL_STATE(pw_idx); - return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask; + return (I915_READ(regs->driver) & mask) == mask; } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { - enum i915_power_well_id id = SKL_DISP_PW_2; - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), "DC9 already programmed to be enabled.\n"); WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled to enable DC9.\n"); - WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & - HSW_PWR_WELL_CTL_REQ(id), + WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), "Power well 2 on.\n"); WARN_ONCE(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); @@ -668,6 +648,27 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv) WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); } +static struct i915_power_well * +lookup_power_well(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id) +{ + struct i915_power_well *power_well; + + for_each_power_well(dev_priv, power_well) + if (power_well->desc->id == power_well_id) + return power_well; + + /* + * It's not feasible to add error checking code to the callers since + * this condition really shouldn't happen and it doesn't even make sense + * to abort things like display initialization sequences. Just return + * the first power well and hope the WARN gets reported so we can fix + * our driver. + */ + WARN(1, "Power well %d not defined for this platform\n", power_well_id); + return &dev_priv->power_domains.power_wells[0]; +} + static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, @@ -723,54 +724,57 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv) static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id id = power_well->id; - u32 mask = HSW_PWR_WELL_CTL_REQ(id); - u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)); + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 bios_req = I915_READ(regs->bios); /* Take over the request bit if set by BIOS. */ if (bios_req & mask) { - u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + u32 drv_req = I915_READ(regs->driver); if (!(drv_req & mask)) - I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask); - I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask); + I915_WRITE(regs->driver, drv_req | mask); + I915_WRITE(regs->bios, bios_req & ~mask); } } static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_init(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); } static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); } static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy); + return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); } static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) { struct i915_power_well *power_well; - power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A); + power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC); + power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); if (IS_GEMINILAKE(dev_priv)) { - power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C); + power_well = lookup_power_well(dev_priv, + GLK_DISP_PW_DPIO_CMN_C); if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy); + bxt_ddi_phy_verify_state(dev_priv, + power_well->desc->bxt.phy); } } @@ -869,14 +873,14 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, static void vlv_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { - enum i915_power_well_id power_well_id = power_well->id; + int pw_idx = power_well->desc->vlv.idx; u32 mask; u32 state; u32 ctrl; - mask = PUNIT_PWRGT_MASK(power_well_id); - state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : - PUNIT_PWRGT_PWR_GATE(power_well_id); + mask = PUNIT_PWRGT_MASK(pw_idx); + state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : + PUNIT_PWRGT_PWR_GATE(pw_idx); mutex_lock(&dev_priv->pcu_lock); @@ -917,14 +921,14 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv, static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum i915_power_well_id power_well_id = power_well->id; + int pw_idx = power_well->desc->vlv.idx; bool enabled = false; u32 mask; u32 state; u32 ctrl; - mask = PUNIT_PWRGT_MASK(power_well_id); - ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); + mask = PUNIT_PWRGT_MASK(pw_idx); + ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); mutex_lock(&dev_priv->pcu_lock); @@ -933,8 +937,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, * We only ever set the power-on and power-gate states, anything * else is unexpected. */ - WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) && - state != PUNIT_PWRGT_PWR_GATE(power_well_id)); + WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); if (state == ctrl) enabled = true; @@ -1045,8 +1049,6 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); - vlv_set_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); @@ -1055,8 +1057,6 @@ static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DISP2D); - vlv_display_power_well_deinit(dev_priv); vlv_set_power_well(dev_priv, power_well, false); @@ -1065,8 +1065,6 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); - /* since ref/cri clock was enabled */ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ @@ -1091,8 +1089,6 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum pipe pipe; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC); - for_each_pipe(dev_priv, pipe) assert_pll_disabled(dev_priv, pipe); @@ -1104,32 +1100,14 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) -static struct i915_power_well * -lookup_power_well(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; - - power_well = &power_domains->power_wells[i]; - if (power_well->id == power_well_id) - return power_well; - } - - return NULL; -} - #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) static void assert_chv_phy_status(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); u32 phy_control = dev_priv->chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1154,7 +1132,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); - if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { phy_status |= PHY_POWERGOOD(DPIO_PHY0); /* this assumes override is only used to enable lanes */ @@ -1195,7 +1173,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); } - if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { phy_status |= PHY_POWERGOOD(DPIO_PHY1); /* this assumes override is only used to enable lanes */ @@ -1239,10 +1217,10 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, enum pipe pipe; uint32_t tmp; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { pipe = PIPE_A; phy = DPIO_PHY0; } else { @@ -1270,7 +1248,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); tmp |= DPIO_DYNPWRDOWNEN_CH1; vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); @@ -1301,10 +1279,10 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; - WARN_ON_ONCE(power_well->id != PUNIT_POWER_WELL_DPIO_CMN_BC && - power_well->id != PUNIT_POWER_WELL_DPIO_CMN_D); + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - if (power_well->id == PUNIT_POWER_WELL_DPIO_CMN_BC) { + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { phy = DPIO_PHY0; assert_pll_disabled(dev_priv, PIPE_A); assert_pll_disabled(dev_priv, PIPE_B); @@ -1516,8 +1494,6 @@ out: static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A); - chv_set_pipe_power_well(dev_priv, power_well, true); vlv_display_power_well_init(dev_priv); @@ -1526,8 +1502,6 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A); - vlv_display_power_well_deinit(dev_priv); chv_set_pipe_power_well(dev_priv, power_well, false); @@ -2063,13 +2037,13 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; -static struct i915_power_well i9xx_always_on_power_well[] = { +static const struct i915_power_well_desc i9xx_always_on_power_well[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, }; @@ -2080,19 +2054,19 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = { .is_enabled = i830_pipes_power_well_enabled, }; -static struct i915_power_well i830_power_wells[] = { +static const struct i915_power_well_desc i830_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "pipes", .domains = I830_PIPES_POWER_DOMAINS, .ops = &i830_pipes_power_well_ops, - .id = I830_DISP_PW_PIPES, + .id = DISP_PW_ID_NONE, }, }; @@ -2117,13 +2091,20 @@ static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { .is_enabled = bxt_dpio_cmn_power_well_enabled, }; -static struct i915_power_well hsw_power_wells[] = { +static const struct i915_power_well_regs hsw_power_well_regs = { + .bios = HSW_PWR_WELL_CTL1, + .driver = HSW_PWR_WELL_CTL2, + .kvmr = HSW_PWR_WELL_CTL3, + .debug = HSW_PWR_WELL_CTL4, +}; + +static const struct i915_power_well_desc hsw_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2131,18 +2112,20 @@ static struct i915_power_well hsw_power_wells[] = { .ops = &hsw_power_well_ops, .id = HSW_DISP_PW_GLOBAL, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .hsw.has_vga = true, }, }, }; -static struct i915_power_well bdw_power_wells[] = { +static const struct i915_power_well_desc bdw_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2150,6 +2133,8 @@ static struct i915_power_well bdw_power_wells[] = { .ops = &hsw_power_well_ops, .id = HSW_DISP_PW_GLOBAL, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, }, @@ -2177,19 +2162,22 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = { .is_enabled = vlv_power_well_enabled, }; -static struct i915_power_well vlv_power_wells[] = { +static const struct i915_power_well_desc vlv_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", .domains = VLV_DISPLAY_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DISP2D, .ops = &vlv_display_power_well_ops, + .id = VLV_DISP_PW_DISP2D, + { + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + }, }, { .name = "dpio-tx-b-01", @@ -2198,7 +2186,10 @@ static struct i915_power_well vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, + }, }, { .name = "dpio-tx-b-23", @@ -2207,7 +2198,10 @@ static struct i915_power_well vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, + }, }, { .name = "dpio-tx-c-01", @@ -2216,7 +2210,10 @@ static struct i915_power_well vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, + }, }, { .name = "dpio-tx-c-23", @@ -2225,23 +2222,29 @@ static struct i915_power_well vlv_power_wells[] = { VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, .ops = &vlv_dpio_power_well_ops, - .id = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, + }, }, { .name = "dpio-common", .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, .ops = &vlv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, }, }; -static struct i915_power_well chv_power_wells[] = { +static const struct i915_power_well_desc chv_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "display", @@ -2251,20 +2254,26 @@ static struct i915_power_well chv_power_wells[] = { * required for any pipe to work. */ .domains = CHV_DISPLAY_POWER_DOMAINS, - .id = CHV_DISP_PW_PIPE_A, .ops = &chv_pipe_power_well_ops, + .id = DISP_PW_ID_NONE, }, { .name = "dpio-common-bc", .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_BC, .ops = &chv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, }, { .name = "dpio-common-d", .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, - .id = PUNIT_POWER_WELL_DPIO_CMN_D, .ops = &chv_dpio_cmn_power_well_ops, + .id = CHV_DISP_PW_DPIO_CMN_D, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + }, }, }; @@ -2275,18 +2284,18 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, bool ret; power_well = lookup_power_well(dev_priv, power_well_id); - ret = power_well->ops->is_enabled(dev_priv, power_well); + ret = power_well->desc->ops->is_enabled(dev_priv, power_well); return ret; } -static struct i915_power_well skl_power_wells[] = { +static const struct i915_power_well_desc skl_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2295,6 +2304,8 @@ static struct i915_power_well skl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2304,12 +2315,16 @@ static struct i915_power_well skl_power_wells[] = { .domains = 0, .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_MISC_IO, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + }, }, { .name = "DC off", .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2317,6 +2332,8 @@ static struct i915_power_well skl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2326,35 +2343,51 @@ static struct i915_power_well skl_power_wells[] = { .name = "DDI A/E IO power well", .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_A_E, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, + }, }, { .name = "DDI B IO power well", .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO power well", .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, }, }; -static struct i915_power_well bxt_power_wells[] = { +static const struct i915_power_well_desc bxt_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2362,6 +2395,8 @@ static struct i915_power_well bxt_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2369,7 +2404,7 @@ static struct i915_power_well bxt_power_wells[] = { .name = "DC off", .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2377,6 +2412,8 @@ static struct i915_power_well bxt_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2386,7 +2423,7 @@ static struct i915_power_well bxt_power_wells[] = { .name = "dpio-common-a", .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_A, + .id = BXT_DISP_PW_DPIO_CMN_A, { .bxt.phy = DPIO_PHY1, }, @@ -2395,20 +2432,20 @@ static struct i915_power_well bxt_power_wells[] = { .name = "dpio-common-bc", .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .bxt.phy = DPIO_PHY0, }, }, }; -static struct i915_power_well glk_power_wells[] = { +static const struct i915_power_well_desc glk_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2417,6 +2454,8 @@ static struct i915_power_well glk_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2424,7 +2463,7 @@ static struct i915_power_well glk_power_wells[] = { .name = "DC off", .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2432,6 +2471,8 @@ static struct i915_power_well glk_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2441,7 +2482,7 @@ static struct i915_power_well glk_power_wells[] = { .name = "dpio-common-a", .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_A, + .id = BXT_DISP_PW_DPIO_CMN_A, { .bxt.phy = DPIO_PHY1, }, @@ -2450,7 +2491,7 @@ static struct i915_power_well glk_power_wells[] = { .name = "dpio-common-b", .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DPIO_CMN_BC, + .id = VLV_DISP_PW_DPIO_CMN_BC, { .bxt.phy = DPIO_PHY0, }, @@ -2459,7 +2500,7 @@ static struct i915_power_well glk_power_wells[] = { .name = "dpio-common-c", .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DPIO_CMN_C, + .id = GLK_DISP_PW_DPIO_CMN_C, { .bxt.phy = DPIO_PHY2, }, @@ -2468,47 +2509,71 @@ static struct i915_power_well glk_power_wells[] = { .name = "AUX A", .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, }, { .name = "DDI A IO power well", .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = GLK_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO power well", .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, }; -static struct i915_power_well cnl_power_wells[] = { +static const struct i915_power_well_desc cnl_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", @@ -2517,6 +2582,8 @@ static struct i915_power_well cnl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_1, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, .hsw.has_fuses = true, }, }, @@ -2524,31 +2591,47 @@ static struct i915_power_well cnl_power_wells[] = { .name = "AUX A", .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, }, { .name = "AUX D", .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_D, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_D, + }, }, { .name = "DC off", .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 2", @@ -2556,6 +2639,8 @@ static struct i915_power_well cnl_power_wells[] = { .ops = &hsw_power_well_ops, .id = SKL_DISP_PW_2, { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), .hsw.has_vga = true, .hsw.has_fuses = true, @@ -2565,37 +2650,61 @@ static struct i915_power_well cnl_power_wells[] = { .name = "DDI A IO power well", .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO power well", .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO power well", .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO power well", .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, }, { .name = "DDI F IO power well", .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_DDI_F, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_DDI_F, + }, }, { .name = "AUX F", .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = CNL_DISP_PW_AUX_F, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_F, + }, }, }; @@ -2606,147 +2715,239 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { .is_enabled = hsw_power_well_enabled, }; -static struct i915_power_well icl_power_wells[] = { +static const struct i915_power_well_regs icl_aux_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_AUX1, + .driver = ICL_PWR_WELL_CTL_AUX2, + .debug = ICL_PWR_WELL_CTL_AUX4, +}; + +static const struct i915_power_well_regs icl_ddi_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_DDI1, + .driver = ICL_PWR_WELL_CTL_DDI2, + .debug = ICL_PWR_WELL_CTL_DDI4, +}; + +static const struct i915_power_well_desc icl_power_wells[] = { { .name = "always-on", .always_on = 1, .domains = POWER_DOMAIN_MASK, .ops = &i9xx_always_on_power_well_ops, - .id = I915_DISP_PW_ALWAYS_ON, + .id = DISP_PW_ID_NONE, }, { .name = "power well 1", /* Handled by the DMC firmware */ .domains = 0, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_1, - .hsw.has_fuses = true, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, }, { .name = "power well 2", .domains = ICL_PW_2_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_2, - .hsw.has_fuses = true, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, }, { .name = "DC off", .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = SKL_DISP_PW_DC_OFF, + .id = DISP_PW_ID_NONE, }, { .name = "power well 3", .domains = ICL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, }, { .name = "DDI A IO", .domains = ICL_DDI_IO_A_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, }, { .name = "DDI B IO", .domains = ICL_DDI_IO_B_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, }, { .name = "DDI C IO", .domains = ICL_DDI_IO_C_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, }, { .name = "DDI D IO", .domains = ICL_DDI_IO_D_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_D, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, }, { .name = "DDI E IO", .domains = ICL_DDI_IO_E_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_E, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_E, + }, }, { .name = "DDI F IO", .domains = ICL_DDI_IO_F_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_DDI_F, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_F, + }, }, { .name = "AUX A", .domains = ICL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, - .id = ICL_DISP_PW_AUX_A, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, }, { .name = "AUX B", .domains = ICL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, - .id = ICL_DISP_PW_AUX_B, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, }, { .name = "AUX C", .domains = ICL_AUX_C_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_C, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, }, { .name = "AUX D", .domains = ICL_AUX_D_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_D, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + }, }, { .name = "AUX E", .domains = ICL_AUX_E_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_E, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_E, + }, }, { .name = "AUX F", .domains = ICL_AUX_F_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_F, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_F, + }, }, { .name = "AUX TBT1", .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT1, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, + }, }, { .name = "AUX TBT2", .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT2, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, + }, }, { .name = "AUX TBT3", .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT3, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, + }, }, { .name = "AUX TBT4", .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_AUX_TBT4, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, + }, }, { .name = "power well 4", .domains = ICL_PW_4_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = ICL_DISP_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, }, }; @@ -2809,26 +3010,41 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return mask; } -static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) +static int +__set_power_wells(struct i915_power_domains *power_domains, + const struct i915_power_well_desc *power_well_descs, + int power_well_count) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; - u64 power_well_ids; + u64 power_well_ids = 0; int i; - power_well_ids = 0; - for (i = 0; i < power_domains->power_well_count; i++) { - enum i915_power_well_id id = power_domains->power_wells[i].id; + power_domains->power_well_count = power_well_count; + power_domains->power_wells = + kcalloc(power_well_count, + sizeof(*power_domains->power_wells), + GFP_KERNEL); + if (!power_domains->power_wells) + return -ENOMEM; + + for (i = 0; i < power_well_count; i++) { + enum i915_power_well_id id = power_well_descs[i].id; + + power_domains->power_wells[i].desc = &power_well_descs[i]; + + if (id == DISP_PW_ID_NONE) + continue; WARN_ON(id >= sizeof(power_well_ids) * 8); WARN_ON(power_well_ids & BIT_ULL(id)); power_well_ids |= BIT_ULL(id); } + + return 0; } -#define set_power_wells(power_domains, __power_wells) ({ \ - (power_domains)->power_wells = (__power_wells); \ - (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ -}) +#define set_power_wells(power_domains, __power_well_descs) \ + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs)) /** * intel_power_domains_init - initializes the power domain structures @@ -2840,6 +3056,7 @@ static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv) int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; + int err; i915_modparams.disable_power_well = sanitize_disable_power_well_option(dev_priv, @@ -2856,15 +3073,15 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * the disabling order is reversed. */ if (IS_ICELAKE(dev_priv)) { - set_power_wells(power_domains, icl_power_wells); + err = set_power_wells(power_domains, icl_power_wells); } else if (IS_HASWELL(dev_priv)) { - set_power_wells(power_domains, hsw_power_wells); + err = set_power_wells(power_domains, hsw_power_wells); } else if (IS_BROADWELL(dev_priv)) { - set_power_wells(power_domains, bdw_power_wells); + err = set_power_wells(power_domains, bdw_power_wells); } else if (IS_GEN9_BC(dev_priv)) { - set_power_wells(power_domains, skl_power_wells); + err = set_power_wells(power_domains, skl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { - set_power_wells(power_domains, cnl_power_wells); + err = set_power_wells(power_domains, cnl_power_wells); /* * DDI and Aux IO are getting enabled for all ports @@ -2876,57 +3093,31 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) power_domains->power_well_count -= 2; } else if (IS_BROXTON(dev_priv)) { - set_power_wells(power_domains, bxt_power_wells); + err = set_power_wells(power_domains, bxt_power_wells); } else if (IS_GEMINILAKE(dev_priv)) { - set_power_wells(power_domains, glk_power_wells); + err = set_power_wells(power_domains, glk_power_wells); } else if (IS_CHERRYVIEW(dev_priv)) { - set_power_wells(power_domains, chv_power_wells); + err = set_power_wells(power_domains, chv_power_wells); } else if (IS_VALLEYVIEW(dev_priv)) { - set_power_wells(power_domains, vlv_power_wells); + err = set_power_wells(power_domains, vlv_power_wells); } else if (IS_I830(dev_priv)) { - set_power_wells(power_domains, i830_power_wells); + err = set_power_wells(power_domains, i830_power_wells); } else { - set_power_wells(power_domains, i9xx_always_on_power_well); + err = set_power_wells(power_domains, i9xx_always_on_power_well); } - assert_power_well_ids_unique(dev_priv); - - return 0; + return err; } /** - * intel_power_domains_fini - finalizes the power domain structures + * intel_power_domains_cleanup - clean up power domains resources * @dev_priv: i915 device instance * - * Finalizes the power domain structures for @dev_priv depending upon the - * supported platform. This function also disables runtime pm and ensures that - * the device stays powered up so that the driver can be reloaded. + * Release any resources acquired by intel_power_domains_init() */ -void intel_power_domains_fini(struct drm_i915_private *dev_priv) +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { - struct device *kdev = &dev_priv->drm.pdev->dev; - - /* - * The i915.ko module is still not prepared to be loaded when - * the power well is not enabled, so just enable it in case - * we're going to unload/reload. - * The following also reacquires the RPM reference the core passed - * to the driver during loading, which is dropped in - * intel_runtime_pm_enable(). We have to hand back the control of the - * device to the core with this reference held. - */ - intel_display_set_init_power(dev_priv, true); - - /* Remove the refcount we took to keep power well support disabled. */ - if (!i915_modparams.disable_power_well) - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - - /* - * Remove the refcount we took in intel_runtime_pm_enable() in case - * the platform doesn't support runtime PM. - */ - if (!HAS_RUNTIME_PM(dev_priv)) - pm_runtime_put(kdev); + kfree(dev_priv->power_domains.power_wells); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) @@ -2936,9 +3127,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_lock(&power_domains->lock); for_each_power_well(dev_priv, power_well) { - power_well->ops->sync_hw(dev_priv, power_well); - power_well->hw_enabled = power_well->ops->is_enabled(dev_priv, - power_well); + power_well->desc->ops->sync_hw(dev_priv, power_well); + power_well->hw_enabled = + power_well->desc->ops->is_enabled(dev_priv, power_well); } mutex_unlock(&power_domains->lock); } @@ -3360,7 +3551,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, * The AUX IO power wells will be enabled on demand. */ mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); @@ -3372,10 +3563,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, /* 7. Setup MBUS. */ icl_mbus_init(dev_priv); - - /* 8. CHICKEN_DCPR_1 */ - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - CNL_DDI_CLOCK_REG_ACCESS_ON); } static void icl_display_core_uninit(struct drm_i915_private *dev_priv) @@ -3401,7 +3588,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) * disabled at this point. */ mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); intel_power_well_disable(dev_priv, well); mutex_unlock(&power_domains->lock); @@ -3416,9 +3603,9 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) static void chv_phy_control_init(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); /* * DISPLAY_PHY_CONTROL can get corrupted if read. As a @@ -3441,7 +3628,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * override and set the lane powerdown bits accding to the * current lane status. */ - if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { uint32_t status = I915_READ(DPLL(PIPE_A)); unsigned int mask; @@ -3472,7 +3659,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) dev_priv->chv_phy_assert[DPIO_PHY0] = true; } - if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { uint32_t status = I915_READ(DPIO_PHY_STATUS); unsigned int mask; @@ -3503,20 +3690,20 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) { struct i915_power_well *cmn = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC); + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *disp2d = - lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D); + lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); /* If the display might be already active skip this */ - if (cmn->ops->is_enabled(dev_priv, cmn) && - disp2d->ops->is_enabled(dev_priv, disp2d) && + if (cmn->desc->ops->is_enabled(dev_priv, cmn) && + disp2d->desc->ops->is_enabled(dev_priv, disp2d) && I915_READ(DPIO_CTL) & DPIO_CMNRST) return; DRM_DEBUG_KMS("toggling display PHY side reset\n"); /* cmnlane needs DPLL registers */ - disp2d->ops->enable(dev_priv, disp2d); + disp2d->desc->ops->enable(dev_priv, disp2d); /* * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: @@ -3525,9 +3712,11 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) * Simply ungating isn't enough to reset the PHY enough to get * ports and lanes running. */ - cmn->ops->disable(dev_priv, cmn); + cmn->desc->ops->disable(dev_priv, cmn); } +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); + /** * intel_power_domains_init_hw - initialize hardware power domain state * @dev_priv: i915 device instance @@ -3535,9 +3724,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) * * This function initializes the hardware power domain state and enables all * power wells belonging to the INIT power domain. Power wells in other - * domains (and not in the INIT domain) are referenced or disabled during the - * modeset state HW readout. After that the reference count of each power well - * must match its HW enabled state, see intel_power_domains_verify_state(). + * domains (and not in the INIT domain) are referenced or disabled by + * intel_modeset_readout_hw_state(). After that the reference count of each + * power well must match its HW enabled state, see + * intel_power_domains_verify_state(). + * + * It will return with power domains disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_fini_hw(). */ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) { @@ -3563,30 +3757,117 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) mutex_unlock(&power_domains->lock); } - /* For now, we need the power well to be always enabled. */ - intel_display_set_init_power(dev_priv, true); + /* + * Keep all power wells enabled for any dependent HW access during + * initialization and to make sure we keep BIOS enabled display HW + * resources powered until display HW readout is complete. We drop + * this reference in intel_power_domains_enable(). + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); + power_domains->initializing = false; } /** + * intel_power_domains_fini_hw - deinitialize hw power domain state + * @dev_priv: i915 device instance + * + * De-initializes the display power domain HW state. It also ensures that the + * device stays powered up so that the driver can be reloaded. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and must be paired with + * intel_power_domains_init_hw(). + */ +void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv) +{ + /* Keep the power well enabled, but cancel its rpm wakeref. */ + intel_runtime_pm_put(dev_priv); + + /* Remove the refcount we took to keep power well support disabled. */ + if (!i915_modparams.disable_power_well) + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); +} + +/** + * intel_power_domains_enable - enable toggling of display power wells + * @dev_priv: i915 device instance + * + * Enable the ondemand enabling/disabling of the display power wells. Note that + * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled + * only at specific points of the display modeset sequence, thus they are not + * affected by the intel_power_domains_enable()/disable() calls. The purpose + * of these function is to keep the rest of power wells enabled until the end + * of display HW readout (which will acquire the power references reflecting + * the current HW state). + */ +void intel_power_domains_enable(struct drm_i915_private *dev_priv) +{ + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); +} + +/** + * intel_power_domains_disable - disable toggling of display power wells + * @dev_priv: i915 device instance + * + * Disable the ondemand enabling/disabling of the display power wells. See + * intel_power_domains_enable() for which power wells this call controls. + */ +void intel_power_domains_disable(struct drm_i915_private *dev_priv) +{ + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(dev_priv); +} + +/** * intel_power_domains_suspend - suspend power domain state * @dev_priv: i915 device instance + * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) * * This function prepares the hardware power domain state before entering - * system suspend. It must be paired with intel_power_domains_init_hw(). + * system suspend. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and paired with intel_power_domains_resume(). */ -void intel_power_domains_suspend(struct drm_i915_private *dev_priv) +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, + enum i915_drm_suspend_mode suspend_mode) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + /* + * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 + * support don't manually deinit the power domains. This also means the + * CSR/DMC firmware will stay active, it will power down any HW + * resources as required and also enable deeper system power states + * that would be blocked if the firmware was inactive. + */ + if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) && + suspend_mode == I915_DRM_SUSPEND_IDLE && + dev_priv->csr.dmc_payload != NULL) { + intel_power_domains_verify_state(dev_priv); + return; + } + /* * Even if power well support was disabled we still want to disable - * power wells while we are system suspended. + * power wells if power domains must be deinitialized for suspend. */ - if (!i915_modparams.disable_power_well) + if (!i915_modparams.disable_power_well) { intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + intel_power_domains_verify_state(dev_priv); + } if (IS_ICELAKE(dev_priv)) icl_display_core_uninit(dev_priv); @@ -3596,8 +3877,36 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) skl_display_core_uninit(dev_priv); else if (IS_GEN9_LP(dev_priv)) bxt_display_core_uninit(dev_priv); + + power_domains->display_core_suspended = true; } +/** + * intel_power_domains_resume - resume power domain state + * @dev_priv: i915 device instance + * + * This function resume the hardware power domain state during system resume. + * + * It will return with power domain support disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_suspend(). + */ +void intel_power_domains_resume(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + if (power_domains->display_core_suspended) { + intel_power_domains_init_hw(dev_priv, true); + power_domains->display_core_suspended = false; + } else { + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + } + + intel_power_domains_verify_state(dev_priv); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; @@ -3607,9 +3916,9 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%-25s %d\n", - power_well->name, power_well->count); + power_well->desc->name, power_well->count); - for_each_power_domain(domain, power_well->domains) + for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); @@ -3626,7 +3935,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv) * acquiring reference counts for any power wells in use and disabling the * ones left on by BIOS but not required by any active output. */ -void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; struct i915_power_well *power_well; @@ -3645,22 +3954,25 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) * and PW1 power wells) are under FW control, so ignore them, * since their state can change asynchronously. */ - if (!power_well->domains) + if (!power_well->desc->domains) continue; - enabled = power_well->ops->is_enabled(dev_priv, power_well); - if ((power_well->count || power_well->always_on) != enabled) + enabled = power_well->desc->ops->is_enabled(dev_priv, + power_well); + if ((power_well->count || power_well->desc->always_on) != + enabled) DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", - power_well->name, power_well->count, enabled); + power_well->desc->name, + power_well->count, enabled); domains_count = 0; - for_each_power_domain(domain, power_well->domains) + for_each_power_domain(domain, power_well->desc->domains) domains_count += power_domains->domain_use_count[domain]; if (power_well->count != domains_count) { DRM_ERROR("power well %s refcount/domain refcount mismatch " "(refcount %d/domains refcount %d)\n", - power_well->name, power_well->count, + power_well->desc->name, power_well->count, domains_count); dump_domain_info = true; } @@ -3678,6 +3990,14 @@ void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +#else + +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) +{ +} + +#endif + /** * intel_runtime_pm_get - grab a runtime pm reference * @dev_priv: i915 device instance @@ -3791,14 +4111,24 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv) * This function enables runtime pm at the end of the driver load sequence. * * Note that this function does currently not enable runtime pm for the - * subordinate display power domains. That is only done on the first modeset - * using intel_display_set_init_power(). + * subordinate display power domains. That is done by + * intel_power_domains_enable(). */ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; struct device *kdev = &pdev->dev; + /* + * Disable the system suspend direct complete optimization, which can + * leave the device suspended skipping the driver's suspend handlers + * if the device was already runtime suspended. This is needed due to + * the difference in our runtime and system suspend sequence and + * becaue the HDA driver may require us to enable the audio power + * domain during system suspend. + */ + dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); + pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ pm_runtime_mark_last_busy(kdev); @@ -3825,3 +4155,18 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) */ pm_runtime_put_autosuspend(kdev); } + +void intel_runtime_pm_disable(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + struct device *kdev = &pdev->dev; + + /* Transfer rpm ownership back to core */ + WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0, + "Failed to pass rpm ownership back to core\n"); + + pm_runtime_dont_use_autosuspend(kdev); + + if (!HAS_RUNTIME_PM(dev_priv)) + pm_runtime_put(kdev); +} diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index f7026e887fa9..9600ccfc5b76 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -83,6 +83,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); DEFINE_WAIT(wait); + u32 psr_status; vblank_start = adjusted_mode->crtc_vblank_start; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -104,8 +105,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) * VBL interrupts will start the PSR exit and prevent a PSR * re-entry as well. */ - if (intel_psr_wait_for_idle(new_crtc_state)) - DRM_ERROR("PSR idle timed out, atomic update may fail\n"); + if (intel_psr_wait_for_idle(new_crtc_state, &psr_status)) + DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n", + psr_status); local_irq_disable(); @@ -957,10 +959,10 @@ g4x_plane_get_hw_state(struct intel_plane *plane, } static int -intel_check_sprite_plane(struct intel_plane *plane, - struct intel_crtc_state *crtc_state, +intel_check_sprite_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct intel_plane *plane = to_intel_plane(state->base.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = state->base.fb; @@ -1407,8 +1409,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS) + if (is_ccs_modifier(modifier)) return true; /* fall through */ case DRM_FORMAT_RGB565: diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index 6e8e0b546743..fd496416087c 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -222,7 +222,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, goto fail; } - ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->guc.ggtt_pin_bias; + ggtt_pin_bias = to_i915(uc_fw->obj->base.dev)->ggtt.pin_bias; vma = i915_gem_object_ggtt_pin(uc_fw->obj, NULL, 0, 0, PIN_OFFSET_BIAS | ggtt_pin_bias); if (IS_ERR(vma)) { diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 50b39aa4ffb8..3ad302c66254 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -283,14 +283,24 @@ fw_domains_reset(struct drm_i915_private *i915, fw_domain_reset(i915, d); } +static inline u32 gt_thread_status(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG); + val &= GEN6_GT_THREAD_STATUS_CORE_MASK; + + return val; +} + static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) { - /* w/a for a sporadic read returning 0 by waiting for the GT + /* + * w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & - GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) - DRM_ERROR("GT thread status wait timed out\n"); + WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000), + "GT thread status wait timed out\n"); } static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, @@ -1729,7 +1739,7 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) } static void i915_stop_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1749,7 +1759,9 @@ static bool i915_in_reset(struct pci_dev *pdev) return gdrst & GRDOM_RESET_STATUS; } -static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; int err; @@ -1776,7 +1788,9 @@ static bool g4x_reset_complete(struct pci_dev *pdev) return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; @@ -1784,7 +1798,9 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, + unsigned int engine_mask, + unsigned int retry) { struct pci_dev *pdev = dev_priv->drm.pdev; int ret; @@ -1821,7 +1837,8 @@ out: } static int ironlake_do_reset(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { int ret; @@ -1877,6 +1894,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * gen6_reset_engines - reset individual engines * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset + * @retry: the count of of previous attempts to reset. * * This function will reset the individual engines that are set in engine_mask. * If you provide ALL_ENGINES as mask, full global domain reset will be issued. @@ -1887,7 +1905,8 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * Returns 0 on success, nonzero on error. */ static int gen6_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { @@ -1926,7 +1945,7 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv, * Returns 0 on success, nonzero on error. */ static int gen11_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask) { struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { @@ -2066,7 +2085,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, return ret; } -static int gen8_reset_engine_start(struct intel_engine_cs *engine) +static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; int ret; @@ -2086,7 +2105,7 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine) return ret; } -static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) +static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -2094,33 +2113,56 @@ static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } +static int reset_engines(struct drm_i915_private *i915, + unsigned int engine_mask, + unsigned int retry) +{ + if (INTEL_GEN(i915) >= 11) + return gen11_reset_engines(i915, engine_mask); + else + return gen6_reset_engines(i915, engine_mask, retry); +} + static int gen8_reset_engines(struct drm_i915_private *dev_priv, - unsigned engine_mask) + unsigned int engine_mask, + unsigned int retry) { struct intel_engine_cs *engine; + const bool reset_non_ready = retry >= 1; unsigned int tmp; int ret; for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - if (gen8_reset_engine_start(engine)) { - ret = -EIO; - goto not_ready; - } + ret = gen8_engine_reset_prepare(engine); + if (ret && !reset_non_ready) + goto skip_reset; + + /* + * If this is not the first failed attempt to prepare, + * we decide to proceed anyway. + * + * By doing so we risk context corruption and with + * some gens (kbl), possible system hang if reset + * happens during active bb execution. + * + * We rather take context corruption instead of + * failed reset with a wedged driver/gpu. And + * active bb execution case should be covered by + * i915_stop_engines we have before the reset. + */ } - if (INTEL_GEN(dev_priv) >= 11) - ret = gen11_reset_engines(dev_priv, engine_mask); - else - ret = gen6_reset_engines(dev_priv, engine_mask); + ret = reset_engines(dev_priv, engine_mask, retry); -not_ready: +skip_reset: for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - gen8_reset_engine_cancel(engine); + gen8_engine_reset_cancel(engine); return ret; } -typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); +typedef int (*reset_func)(struct drm_i915_private *, + unsigned int engine_mask, unsigned int retry); static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { @@ -2143,12 +2185,15 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) return NULL; } -int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, + const unsigned int engine_mask) { reset_func reset = intel_get_gpu_reset(dev_priv); - int retry; + unsigned int retry; int ret; + GEM_BUG_ON(!engine_mask); + /* * We want to perform per-engine reset from atomic context (e.g. * softirq), which imposes the constraint that we cannot sleep. @@ -2190,8 +2235,9 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) ret = -ENODEV; if (reset) { - GEM_TRACE("engine_mask=%x\n", engine_mask); - ret = reset(dev_priv, engine_mask); + ret = reset(dev_priv, engine_mask, retry); + GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n", + engine_mask, ret, retry); } if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) break; @@ -2237,20 +2283,28 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { - if (unlikely(i915_modparams.mmio_debug || - dev_priv->uncore.unclaimed_mmio_check <= 0)) - return false; + bool ret = false; + + spin_lock_irq(&dev_priv->uncore.lock); + + if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) + goto out; if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { - DRM_DEBUG("Unclaimed register detected, " - "enabling oneshot unclaimed register reporting. " - "Please use i915.mmio_debug=N for more information.\n"); - i915_modparams.mmio_debug++; + if (!i915_modparams.mmio_debug) { + DRM_DEBUG("Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); + i915_modparams.mmio_debug++; + } dev_priv->uncore.unclaimed_mmio_check--; - return true; + ret = true; } - return false; +out: + spin_unlock_irq(&dev_priv->uncore.lock); + + return ret; } static enum forcewake_domains diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 74bf76f3fddc..92cb82dd0c07 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -163,8 +163,14 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) u32 guc_wopcm_rsvd; int err; + if (!USES_GUC(dev_priv)) + return 0; + GEM_BUG_ON(!wopcm->size); + if (i915_inject_load_failure()) + return -E2BIG; + if (guc_fw_size >= wopcm->size) { DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.", guc_fw_size / 1024); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 7efb326badcd..e272127783fe 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -906,7 +906,11 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) if (IS_ERR(obj)) return ERR_CAST(obj); - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + err = i915_gem_object_set_to_wc_domain(obj, true); + if (err) + goto err; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -936,13 +940,10 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) } *cmd = MI_BATCH_BUFFER_END; + i915_gem_chipset_flush(i915); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - batch = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c new file mode 100644 index 000000000000..d0aa19d17653 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -0,0 +1,221 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/random.h> + +#include "../i915_selftest.h" + +#include "mock_context.h" +#include "igt_flush_test.h" + +static int switch_to_context(struct drm_i915_private *i915, + struct i915_gem_context *ctx) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + intel_runtime_pm_get(i915); + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + + intel_runtime_pm_put(i915); + + return err; +} + +static void trash_stolen(struct drm_i915_private *i915) +{ + struct i915_ggtt *ggtt = &i915->ggtt; + const u64 slot = ggtt->error_capture.start; + const resource_size_t size = resource_size(&i915->dsm); + unsigned long page; + u32 prng = 0x12345678; + + for (page = 0; page < size; page += PAGE_SIZE) { + const dma_addr_t dma = i915->dsm.start + page; + u32 __iomem *s; + int x; + + ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + + s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); + for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) { + prng = next_pseudo_random32(prng); + iowrite32(prng, &s[x]); + } + io_mapping_unmap_atomic(s); + } + + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); +} + +static void simulate_hibernate(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + /* + * As a final sting in the tail, invalidate stolen. Under a real S4, + * stolen is lost and needs to be refilled on resume. However, under + * CI we merely do S4-device testing (as full S4 is too unreliable + * for automated testing across a cluster), so to simulate the effect + * of stolen being trashed across S4, we trash it ourselves. + */ + trash_stolen(i915); + + intel_runtime_pm_put(i915); +} + +static int pm_prepare(struct drm_i915_private *i915) +{ + int err = 0; + + if (i915_gem_suspend(i915)) { + pr_err("i915_gem_suspend failed\n"); + err = -EINVAL; + } + + return err; +} + +static void pm_suspend(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + i915_gem_suspend_gtt_mappings(i915); + i915_gem_suspend_late(i915); + + intel_runtime_pm_put(i915); +} + +static void pm_hibernate(struct drm_i915_private *i915) +{ + intel_runtime_pm_get(i915); + + i915_gem_suspend_gtt_mappings(i915); + + i915_gem_freeze(i915); + i915_gem_freeze_late(i915); + + intel_runtime_pm_put(i915); +} + +static void pm_resume(struct drm_i915_private *i915) +{ + /* + * Both suspend and hibernate follow the same wakeup path and assume + * that runtime-pm just works. + */ + intel_runtime_pm_get(i915); + + intel_engines_sanitize(i915); + i915_gem_sanitize(i915); + i915_gem_resume(i915); + + intel_runtime_pm_put(i915); +} + +static int igt_gem_suspend(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct drm_file *file; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + err = -ENOMEM; + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + if (!IS_ERR(ctx)) + err = switch_to_context(i915, ctx); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + goto out; + + err = pm_prepare(i915); + if (err) + goto out; + + pm_suspend(i915); + + /* Here be dragons! Note that with S3RST any S3 may become S4! */ + simulate_hibernate(i915); + + pm_resume(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = switch_to_context(i915, ctx); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +out: + mock_file_free(i915, file); + return err; +} + +static int igt_gem_hibernate(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct drm_file *file; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + err = -ENOMEM; + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + if (!IS_ERR(ctx)) + err = switch_to_context(i915, ctx); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + goto out; + + err = pm_prepare(i915); + if (err) + goto out; + + pm_hibernate(i915); + + /* Here be dragons! */ + simulate_hibernate(i915); + + pm_resume(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = switch_to_context(i915, ctx); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); +out: + mock_file_free(i915, file); + return err; +} + +int i915_gem_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_gem_suspend), + SUBTEST(igt_gem_hibernate), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 3a095c37c120..4e6a221063ac 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - u32 *map; + void *map; + u32 *cpu; int err; err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); @@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); + cpu = map + offset_in_page(offset); - if (needs_clflush & CLFLUSH_BEFORE) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); - map[offset_in_page(offset) / sizeof(*map)] = v; + *cpu = v; - if (needs_clflush & CLFLUSH_AFTER) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_AFTER) + drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); - i915_gem_obj_finish_shmem_access(obj); + return 0; } @@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; struct page *page; - u32 *map; + void *map; + u32 *cpu; int err; err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); @@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); + cpu = map + offset_in_page(offset); - if (needs_clflush & CLFLUSH_BEFORE) { - mb(); - clflush(map+offset_in_page(offset) / sizeof(*map)); - mb(); - } + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); - *v = map[offset_in_page(offset) / sizeof(*map)]; - kunmap_atomic(map); + *v = *cpu; + kunmap_atomic(map); i915_gem_obj_finish_shmem_access(obj); + return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index ba4f322d56b8..6d3516d5bff9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -282,7 +282,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, view.partial.offset, view.partial.size, vma->size >> PAGE_SHIFT, - tile_row_pages(obj), + tile->tiling ? tile_row_pages(obj) : 0, vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, offset >> PAGE_SHIFT, (unsigned int)offset_in_page(offset), diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a00e2bd08bce..a15713cae3b3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -17,6 +17,7 @@ selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) selftest(gtt, i915_gem_gtt_live_selftests) +selftest(gem, i915_gem_live_selftests) selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(contexts, i915_gem_context_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 407c98fb9170..90ba88c972cf 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -65,6 +65,40 @@ static int check_all_doorbells(struct intel_guc *guc) return 0; } +static int ring_doorbell_nop(struct intel_guc_client *client) +{ + struct guc_process_desc *desc = __get_process_desc(client); + int err; + + client->use_nop_wqi = true; + + spin_lock_irq(&client->wq_lock); + + guc_wq_item_append(client, 0, 0, 0, 0); + guc_ring_doorbell(client); + + spin_unlock_irq(&client->wq_lock); + + client->use_nop_wqi = false; + + /* if there are no issues GuC will update the WQ head and keep the + * WQ in active status + */ + err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); + if (err) { + pr_err("doorbell %u ring failed!\n", client->doorbell_id); + return -EIO; + } + + if (desc->wq_status != WQ_STATUS_ACTIVE) { + pr_err("doorbell %u ring put WQ in bad state (%u)!\n", + client->doorbell_id, desc->wq_status); + return -EIO; + } + + return 0; +} + /* * Basic client sanity check, handy to validate create_clients. */ @@ -332,6 +366,10 @@ static int igt_guc_doorbells(void *arg) err = check_all_doorbells(guc); if (err) goto out; + + err = ring_doorbell_nop(clients[i]); + if (err) + goto out; } out: diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 65d66cdedd26..db378226ac10 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -1018,8 +1018,41 @@ static int evict_vma(void *data) return err; } +static int evict_fence(void *data) +{ + struct evict_vma *arg = data; + struct drm_i915_private *i915 = arg->vma->vm->i915; + int err; + + complete(&arg->completion); + + mutex_lock(&i915->drm.struct_mutex); + + /* Mark the fence register as dirty to force the mmio update. */ + err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); + if (err) { + pr_err("Invalid Y-tiling settings; err:%d\n", err); + goto out_unlock; + } + + err = i915_vma_pin_fence(arg->vma); + if (err) { + pr_err("Unable to pin Y-tiled fence; err:%d\n", err); + goto out_unlock; + } + + i915_vma_unpin_fence(arg->vma); + +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int __igt_reset_evict_vma(struct drm_i915_private *i915, - struct i915_address_space *vm) + struct i915_address_space *vm, + int (*fn)(void *), + unsigned int flags) { struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; @@ -1040,12 +1073,20 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (err) goto unlock; - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(i915, SZ_1M); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto fini; } + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512); + if (err) { + pr_err("Invalid X-tiling settings; err:%d\n", err); + goto out_obj; + } + } + arg.vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(arg.vma)) { err = PTR_ERR(arg.vma); @@ -1059,11 +1100,28 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER); - if (err) + i915_vma_is_ggtt(arg.vma) ? + PIN_GLOBAL | PIN_MAPPABLE : + PIN_USER); + if (err) { + i915_request_add(rq); goto out_obj; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) { + err = i915_vma_pin_fence(arg.vma); + if (err) { + pr_err("Unable to pin X-tiled fence; err:%d\n", err); + i915_vma_unpin(arg.vma); + i915_request_add(rq); + goto out_obj; + } + } + + err = i915_vma_move_to_active(arg.vma, rq, flags); - err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE); + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_vma_unpin_fence(arg.vma); i915_vma_unpin(arg.vma); i915_request_get(rq); @@ -1086,7 +1144,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, init_completion(&arg.completion); - tsk = kthread_run(evict_vma, &arg, "igt/evict_vma"); + tsk = kthread_run(fn, &arg, "igt/evict_vma"); if (IS_ERR(tsk)) { err = PTR_ERR(tsk); tsk = NULL; @@ -1137,29 +1195,47 @@ static int igt_reset_evict_ggtt(void *arg) { struct drm_i915_private *i915 = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm); + return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + evict_vma, EXEC_OBJECT_WRITE); } static int igt_reset_evict_ppgtt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx; + struct drm_file *file; int err; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + mutex_lock(&i915->drm.struct_mutex); - ctx = kernel_context(i915); + ctx = live_context(i915, file); mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } err = 0; if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm); + err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, + evict_vma, EXEC_OBJECT_WRITE); - kernel_context_close(ctx); +out: + mock_file_free(i915, file); return err; } +static int igt_reset_evict_fence(void *arg) +{ + struct drm_i915_private *i915 = arg; + + return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + evict_fence, EXEC_OBJECT_NEEDS_FENCE); +} + static int wait_for_others(struct drm_i915_private *i915, struct intel_engine_cs *exclude) { @@ -1409,6 +1485,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_wait), SUBTEST(igt_reset_evict_ggtt), SUBTEST(igt_reset_evict_ppgtt), + SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), }; bool saved_hangcheck; diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 8904f1ce64e3..d937bdff26f9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -43,6 +43,7 @@ mock_context(struct drm_i915_private *i915, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; @@ -50,11 +51,9 @@ mock_context(struct drm_i915_private *i915, ce->gem_context = ctx; } - ret = ida_simple_get(&i915->contexts.hw_ida, - 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) goto err_handles; - ctx->hw_id = ret; if (name) { ctx->name = kstrdup(name, GFP_KERNEL); @@ -85,11 +84,7 @@ void mock_context_close(struct i915_gem_context *ctx) void mock_init_contexts(struct drm_i915_private *i915) { - INIT_LIST_HEAD(&i915->contexts.list); - ida_init(&i915->contexts.hw_ida); - - INIT_WORK(&i915->contexts.free_work, contexts_free_worker); - init_llist_head(&i915->contexts.free_list); + init_contexts(i915); } struct i915_gem_context * diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index a140ea5c3a7c..6ae418c76015 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -118,6 +118,8 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, i915); + + ggtt->vm.is_ggtt = true; } void mock_fini_ggtt(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 978782a77629..28d191192945 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, writel(0x0, comp->regs + DISP_REG_OVL_RST); } +static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) +{ + return 4; +} + static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; @@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) { + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ switch (fmt) { default: case DRM_FORMAT_RGB565: @@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, .layer_config = mtk_ovl_layer_config, diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 585943c81e1f..b0a5cffe345a 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -31,14 +31,31 @@ #define RDMA_REG_UPDATE_INT BIT(0) #define DISP_REG_RDMA_GLOBAL_CON 0x0010 #define RDMA_ENGINE_EN BIT(0) +#define RDMA_MODE_MEMORY BIT(1) #define DISP_REG_RDMA_SIZE_CON_0 0x0014 +#define RDMA_MATRIX_ENABLE BIT(17) +#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20) +#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20) #define DISP_REG_RDMA_SIZE_CON_1 0x0018 #define DISP_REG_RDMA_TARGET_LINE 0x001c +#define DISP_RDMA_MEM_CON 0x0024 +#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4) +#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4) +#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4) +#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4) +#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4) +#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4) +#define MEM_MODE_INPUT_SWAP BIT(8) +#define DISP_RDMA_MEM_SRC_PITCH 0x002c +#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030 #define DISP_REG_RDMA_FIFO_CON 0x0040 #define RDMA_FIFO_UNDERFLOW_EN BIT(31) #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) +#define DISP_RDMA_MEM_START_ADDR 0x0f00 + +#define RDMA_MEM_GMC 0x40402020 struct mtk_disp_rdma_data { unsigned int fifo_size; @@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); } +static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma, + unsigned int fmt) +{ + /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" + * is defined in mediatek HW data sheet. + * The alphabet order in XXX is no relation to data + * arrangement in memory. + */ + switch (fmt) { + default: + case DRM_FORMAT_RGB565: + return MEM_MODE_INPUT_FORMAT_RGB565; + case DRM_FORMAT_BGR565: + return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGB888: + return MEM_MODE_INPUT_FORMAT_RGB888; + case DRM_FORMAT_BGR888: + return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888; + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA8888: + return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP; + case DRM_FORMAT_UYVY: + return MEM_MODE_INPUT_FORMAT_UYVY; + case DRM_FORMAT_YUYV: + return MEM_MODE_INPUT_FORMAT_YUYV; + } +} + +static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp) +{ + return 1; +} + +static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *state) +{ + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); + struct mtk_plane_pending_state *pending = &state->pending; + unsigned int addr = pending->addr; + unsigned int pitch = pending->pitch & 0xffff; + unsigned int fmt = pending->format; + unsigned int con; + + con = rdma_fmt_convert(rdma, fmt); + writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON); + + if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE); + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_INT_MTX_SEL, + RDMA_MATRIX_INT_MTX_BT601_to_RGB); + } else { + rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, + RDMA_MATRIX_ENABLE, 0); + } + + writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR); + writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH); + writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0); + rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, + RDMA_MODE_MEMORY, RDMA_MODE_MEMORY); +} + static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { .config = mtk_rdma_config, .start = mtk_rdma_start, .stop = mtk_rdma_stop, .enable_vblank = mtk_rdma_enable_vblank, .disable_vblank = mtk_rdma_disable_vblank, + .layer_nr = mtk_rdma_layer_nr, + .layer_config = mtk_rdma_layer_config, }; static int mtk_disp_rdma_bind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 2d6aa150a9ff..0b976dfd04df 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -45,7 +45,8 @@ struct mtk_drm_crtc { bool pending_needs_vblank; struct drm_pending_vblank_event *event; - struct drm_plane planes[OVL_LAYER_NR]; + struct drm_plane *planes; + unsigned int layer_nr; bool pending_planes; void __iomem *config_regs; @@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); + mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base); return 0; } @@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; - mtk_ddp_comp_disable_vblank(ovl); + mtk_ddp_comp_disable_vblank(comp); } static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) @@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) } /* Initially configure all planes */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; /* @@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) * queue update module registers on vblank. */ if (state->pending_config) { - mtk_ddp_comp_config(ovl, state->pending_width, + mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, state->pending_vrefresh, 0); @@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) } if (mtk_crtc->pending_planes) { - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(ovl, i, plane_state); + mtk_ddp_comp_layer_config(comp, i, plane_state); plane_state->pending.config = false; } } @@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int ret; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); - ret = mtk_smi_larb_get(ovl->larb_dev); + ret = mtk_smi_larb_get(comp->larb_dev); if (ret) { DRM_ERROR("Failed to get larb: %d\n", ret); return; @@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, ret = mtk_crtc_ddp_hw_init(mtk_crtc); if (ret) { - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); return; } @@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; int i; DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); @@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, return; /* Set all pending plane state to disabled */ - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); mtk_crtc_ddp_hw_fini(mtk_crtc); - mtk_smi_larb_put(ovl->larb_dev); + mtk_smi_larb_put(comp->larb_dev); mtk_crtc->enabled = false; } @@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < OVL_LAYER_NR; i++) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; @@ -516,7 +517,7 @@ err_cleanup_crtc: return ret; } -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { + mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); + mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr * + sizeof(struct drm_plane), + GFP_KERNEL); + + for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : DRM_PLANE_TYPE_OVERLAY; @@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], - &mtk_crtc->planes[1], pipe); + mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] : + NULL, pipe); if (ret < 0) goto unprepare; drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 9d9410c67ae9..091adb2087eb 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -18,13 +18,12 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_plane.h" -#define OVL_LAYER_NR 4 #define MTK_LUT_SIZE 512 #define MTK_MAX_BPC 10 #define MTK_MIN_BPC 3 void mtk_drm_crtc_commit(struct drm_crtc *crtc); -void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); +void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 87e4191c250e..546b3e3b300b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -106,6 +106,8 @@ #define OVL1_MOUT_EN_COLOR1 0x1 #define GAMMA_MOUT_EN_RDMA1 0x1 #define RDMA0_SOUT_DPI0 0x2 +#define RDMA0_SOUT_DPI1 0x3 +#define RDMA0_SOUT_DSI1 0x1 #define RDMA0_SOUT_DSI2 0x4 #define RDMA0_SOUT_DSI3 0x5 #define RDMA1_SOUT_DPI0 0x2 @@ -122,6 +124,8 @@ #define DPI0_SEL_IN_RDMA2 0x3 #define DPI1_SEL_IN_RDMA1 (0x1 << 8) #define DPI1_SEL_IN_RDMA2 (0x3 << 8) +#define DSI0_SEL_IN_RDMA1 0x1 +#define DSI0_SEL_IN_RDMA2 0x4 #define DSI1_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA2 0x4 #define DSI2_SEL_IN_RDMA1 (0x1 << 16) @@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DPI0; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DPI1; + } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; + value = RDMA0_SOUT_DSI1; } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; value = RDMA0_SOUT_DSI2; @@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA1; + } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { + *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA1; } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA1; @@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { *addr = DISP_REG_CONFIG_DPI_SEL_IN; value = DPI1_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; + value = DSI0_SEL_IN_RDMA2; + } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { + *addr = DISP_REG_CONFIG_DSIO_SEL_IN; value = DSI1_SEL_IN_RDMA2; } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { *addr = DISP_REG_CONFIG_DSIE_SEL_IN; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 7413ffeb3c9d..8399229e6ad2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, @@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->layer_nr) + return comp->funcs->layer_nr(comp); + + return 0; +} + static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 39721119713b..47ec604289b7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev) err_deinit: mtk_drm_kms_deinit(drm); err_free: - drm_dev_unref(drm); + drm_dev_put(drm); return ret; } @@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev) struct mtk_drm_private *private = dev_get_drvdata(dev); drm_dev_unregister(private->drm); - drm_dev_unref(private->drm); + drm_dev_put(private->drm); private->drm = NULL; } @@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev) drm_dev_unregister(drm); mtk_drm_kms_deinit(drm); - drm_dev_unref(drm); + drm_dev_put(drm); component_master_del(&pdev->dev, &mtk_drm_ops); pm_runtime_disable(&pdev->dev); @@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_kms_helper_poll_disable(drm); - - private->suspend_state = drm_atomic_helper_suspend(drm); - if (IS_ERR(private->suspend_state)) { - drm_kms_helper_poll_enable(drm); - return PTR_ERR(private->suspend_state); - } - + ret = drm_mode_config_helper_suspend(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); - return 0; + + return ret; } static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; + int ret; - drm_atomic_helper_resume(drm, private->suspend_state); - drm_kms_helper_poll_enable(drm); - + ret = drm_mode_config_helper_resume(drm); DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); - return 0; + + return ret; } #endif diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 9eabd7201a12..28a3ce8f88d2 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -18,77 +18,27 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct device *dev; - - struct videomode vm; -}; - -static const struct videomode tvc_pal_vm = { - .hactive = 720, - .vactive = 574, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 12, - .hback_porch = 68, - .vsync_len = 5, - .vfront_porch = 5, - .vback_porch = 41, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tvc_connect(struct omap_dss_device *dssdev) +static int tvc_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - dev_dbg(ddata->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(in)) { - dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.atv->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void tvc_disconnect(struct omap_dss_device *dssdev) +static void tvc_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - dev_dbg(ddata->dev, "disconnect\n"); - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.atv->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int tvc_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(ddata->dev, "enable\n"); @@ -99,9 +49,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.atv->set_timings(in, &ddata->vm); - - r = in->ops.atv->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -113,83 +61,30 @@ static int tvc_enable(struct omap_dss_device *dssdev) static void tvc_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(ddata->dev, "disable\n"); if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.atv->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tvc_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.atv->set_timings(in, vm); -} - -static void tvc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static int tvc_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.atv->check_timings(in, vm); -} - -static u32 tvc_get_wss(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.atv->get_wss(in); -} - -static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.atv->set_wss(in, wss); -} - -static struct omap_dss_driver tvc_driver = { +static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, .enable = tvc_enable, .disable = tvc_disable, - - .set_timings = tvc_set_timings, - .get_timings = tvc_get_timings, - .check_timings = tvc_check_timings, - - .get_wss = tvc_get_wss, - .set_wss = tvc_set_wss, }; static int tvc_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; - int r; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -198,20 +93,15 @@ static int tvc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - ddata->vm = tvc_pal_vm; - dssdev = &ddata->dssdev; - dssdev->driver = &tvc_driver; + dssdev->ops = &tvc_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = tvc_pal_vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -221,10 +111,9 @@ static int __exit tvc_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); tvc_disable(dssdev); - tvc_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 6d8cbd9e2110..24b14f44248e 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -19,30 +19,8 @@ #include "../dss/omapdss.h" -static const struct videomode dvic_default_vm = { - .hactive = 640, - .vactive = 480, - - .pixelclock = 23500000, - - .hfront_porch = 48, - .hsync_len = 32, - .hback_porch = 80, - - .vfront_porch = 3, - .vsync_len = 4, - .vback_porch = 7, - - .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH | - DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE, -}; - struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; - - struct videomode vm; struct i2c_adapter *i2c_adapter; @@ -57,49 +35,20 @@ struct panel_drv_data { #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int dvic_connect(struct omap_dss_device *dssdev) +static int dvic_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dvi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void dvic_disconnect(struct omap_dss_device *dssdev) +static void dvic_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dvi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int dvic_enable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -108,9 +57,7 @@ static int dvic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dvi->set_timings(in, &ddata->vm); - - r = in->ops.dvi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -121,46 +68,16 @@ static int dvic_enable(struct omap_dss_device *dssdev) static void dvic_disable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.dvi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void dvic_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dvi->set_timings(in, vm); -} - -static void dvic_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static int dvic_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dvi->check_timings(in, vm); -} - static int dvic_ddc_read(struct i2c_adapter *adapter, unsigned char *buf, u16 count, u8 offset) { @@ -198,12 +115,6 @@ static int dvic_read_edid(struct omap_dss_device *dssdev, struct panel_drv_data *ddata = to_panel_data(dssdev); int r, l, bytes_read; - if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio)) - return -ENODEV; - - if (!ddata->i2c_adapter) - return -ENODEV; - l = min(EDID_LENGTH, len); r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); if (r) @@ -243,78 +154,41 @@ static bool dvic_detect(struct omap_dss_device *dssdev) return r == 0; } -static int dvic_register_hpd_cb(struct omap_dss_device *dssdev, +static void dvic_register_hpd_cb(struct omap_dss_device *dssdev, void (*cb)(void *cb_data, enum drm_connector_status status), void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return -ENOTSUPP; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); - return 0; } static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - if (!ddata->hpd_gpio) - return; - mutex_lock(&ddata->hpd_lock); ddata->hpd_cb = NULL; ddata->hpd_cb_data = NULL; mutex_unlock(&ddata->hpd_lock); } -static void dvic_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); -} - -static void dvic_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - if (!ddata->hpd_gpio) - return; - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); -} - -static struct omap_dss_driver dvic_driver = { +static const struct omap_dss_device_ops dvic_ops = { .connect = dvic_connect, .disconnect = dvic_disconnect, .enable = dvic_enable, .disable = dvic_disable, - .set_timings = dvic_set_timings, - .get_timings = dvic_get_timings, - .check_timings = dvic_check_timings, - .read_edid = dvic_read_edid, .detect = dvic_detect, .register_hpd_cb = dvic_register_hpd_cb, .unregister_hpd_cb = dvic_unregister_hpd_cb, - .enable_hpd = dvic_enable_hpd, - .disable_hpd = dvic_disable_hpd, }; static irqreturn_t dvic_hpd_isr(int irq, void *data) @@ -396,28 +270,24 @@ static int dvic_probe(struct platform_device *pdev) if (r) return r; - ddata->vm = dvic_default_vm; - dssdev = &ddata->dssdev; - dssdev->driver = &dvic_driver; + dssdev->ops = &dvic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = dvic_default_vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - goto err_reg; - } - - return 0; + if (ddata->hpd_gpio) + dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_HPD; + if (ddata->i2c_adapter) + dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_EDID; -err_reg: - i2c_put_adapter(ddata->i2c_adapter); - mutex_destroy(&ddata->hpd_lock); + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); - return r; + return 0; } static int __exit dvic_remove(struct platform_device *pdev) @@ -425,10 +295,9 @@ static int __exit dvic_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); dvic_disable(dssdev); - dvic_disconnect(dssdev); i2c_put_adapter(ddata->i2c_adapter); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index ca30ed9da7eb..e602fa4a50a4 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -10,95 +10,41 @@ */ #include <linux/gpio/consumer.h> -#include <linux/slab.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/mutex.h> - -#include <drm/drm_edid.h> +#include <linux/platform_device.h> +#include <linux/slab.h> #include "../dss/omapdss.h" -static const struct videomode hdmic_default_vm = { - .hactive = 640, - .vactive = 480, - .pixelclock = 25175000, - .hsync_len = 96, - .hfront_porch = 16, - .hback_porch = 48, - .vsync_len = 2, - .vfront_porch = 11, - .vback_porch = 31, - - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, -}; - struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; - bool hpd_enabled; struct mutex hpd_lock; struct device *dev; - struct videomode vm; - - int hpd_gpio; + struct gpio_desc *hpd_gpio; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int hdmic_connect(struct omap_dss_device *dssdev) +static int hdmic_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - dev_dbg(ddata->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node); - if (IS_ERR(in)) { - dev_err(ddata->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.hdmi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void hdmic_disconnect(struct omap_dss_device *dssdev) +static void hdmic_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - dev_dbg(ddata->dev, "disconnect\n"); - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.hdmi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int hdmic_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(ddata->dev, "enable\n"); @@ -109,9 +55,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.hdmi->set_timings(in, &ddata->vm); - - r = in->ops.hdmi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -123,171 +67,58 @@ static int hdmic_enable(struct omap_dss_device *dssdev) static void hdmic_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(ddata->dev, "disable\n"); if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.hdmi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void hdmic_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.hdmi->set_timings(in, vm); -} - -static void hdmic_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static int hdmic_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.hdmi->check_timings(in, vm); -} - -static int hdmic_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.hdmi->read_edid(in, edid, len); -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - bool connected; - - if (gpio_is_valid(ddata->hpd_gpio)) - connected = gpio_get_value_cansleep(ddata->hpd_gpio); - else - connected = in->ops.hdmi->detect(in); - if (!connected && in->ops.hdmi->lost_hotplug) - in->ops.hdmi->lost_hotplug(in); - return connected; + + return gpiod_get_value_cansleep(ddata->hpd_gpio); } -static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, +static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, enum drm_connector_status status), - void *cb_data) + void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (gpio_is_valid(ddata->hpd_gpio)) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = cb; - ddata->hpd_cb_data = cb_data; - mutex_unlock(&ddata->hpd_lock); - return 0; - } else if (in->ops.hdmi->register_hpd_cb) { - return in->ops.hdmi->register_hpd_cb(in, cb, cb_data); - } - return -ENOTSUPP; + mutex_lock(&ddata->hpd_lock); + ddata->hpd_cb = cb; + ddata->hpd_cb_data = cb_data; + mutex_unlock(&ddata->hpd_lock); } static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (gpio_is_valid(ddata->hpd_gpio)) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = NULL; - ddata->hpd_cb_data = NULL; - mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->unregister_hpd_cb) { - in->ops.hdmi->unregister_hpd_cb(in); - } -} - -static void hdmic_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (gpio_is_valid(ddata->hpd_gpio)) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->enable_hpd) { - in->ops.hdmi->enable_hpd(in); - } -} - -static void hdmic_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (gpio_is_valid(ddata->hpd_gpio)) { - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); - } else if (in->ops.hdmi->disable_hpd) { - in->ops.hdmi->disable_hpd(in); - } -} - -static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); -} - -static int hdmic_set_infoframe(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - return in->ops.hdmi->set_infoframe(in, avi); + mutex_lock(&ddata->hpd_lock); + ddata->hpd_cb = NULL; + ddata->hpd_cb_data = NULL; + mutex_unlock(&ddata->hpd_lock); } -static struct omap_dss_driver hdmic_driver = { +static const struct omap_dss_device_ops hdmic_ops = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, .enable = hdmic_enable, .disable = hdmic_disable, - .set_timings = hdmic_set_timings, - .get_timings = hdmic_get_timings, - .check_timings = hdmic_check_timings, - - .read_edid = hdmic_read_edid, .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, - .enable_hpd = hdmic_enable_hpd, - .disable_hpd = hdmic_disable_hpd, - .set_hdmi_mode = hdmic_set_hdmi_mode, - .set_hdmi_infoframe = hdmic_set_infoframe, }; static irqreturn_t hdmic_hpd_isr(int irq, void *data) @@ -295,7 +126,7 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data) struct panel_drv_data *ddata = data; mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { + if (ddata->hpd_cb) { enum drm_connector_status status; if (hdmic_detect(&ddata->dssdev)) @@ -310,26 +141,11 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data) return IRQ_HANDLED; } -static int hdmic_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int gpio; - - /* HPD GPIO */ - gpio = of_get_named_gpio(node, "hpd-gpios", 0); - if (gpio_is_valid(gpio)) - ddata->hpd_gpio = gpio; - else - ddata->hpd_gpio = -ENODEV; - - return 0; -} - static int hdmic_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); @@ -339,20 +155,20 @@ static int hdmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - r = hdmic_probe_of(pdev); - if (r) - return r; - mutex_init(&ddata->hpd_lock); - if (gpio_is_valid(ddata->hpd_gpio)) { - r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, - GPIOF_DIR_IN, "hdmi_hpd"); - if (r) - return r; + /* HPD GPIO */ + gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); + if (IS_ERR(gpio)) { + dev_err(&pdev->dev, "failed to parse HPD gpio\n"); + return PTR_ERR(gpio); + } + + ddata->hpd_gpio = gpio; + if (ddata->hpd_gpio) { r = devm_request_threaded_irq(&pdev->dev, - gpio_to_irq(ddata->hpd_gpio), + gpiod_to_irq(ddata->hpd_gpio), NULL, hdmic_hpd_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, @@ -361,20 +177,18 @@ static int hdmic_probe(struct platform_device *pdev) return r; } - ddata->vm = hdmic_default_vm; - dssdev = &ddata->dssdev; - dssdev->driver = &hdmic_driver; + dssdev->ops = &hdmic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = hdmic_default_vm; + dssdev->of_ports = BIT(0); + dssdev->ops_flags = ddata->hpd_gpio + ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD + : 0; - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -384,10 +198,9 @@ static int __exit hdmic_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(&ddata->dssdev); + omapdss_device_unregister(&ddata->dssdev); hdmic_disable(dssdev); - hdmic_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index afee1b8b457a..4fefd80f53bb 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -23,75 +23,28 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct gpio_desc *enable_gpio; - - struct videomode vm; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int opa362_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int opa362_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - dev_dbg(dssdev->dev, "connect\n"); - - if (omapdss_device_is_connected(dssdev)) - return -EBUSY; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.atv->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - dst->src = dssdev; - dssdev->dst = dst; - - ddata->in = in; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } -static void opa362_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void opa362_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - dev_dbg(dssdev->dev, "disconnect\n"); - - WARN_ON(!omapdss_device_is_connected(dssdev)); - if (!omapdss_device_is_connected(dssdev)) - return; - - WARN_ON(dst != dssdev->dst); - if (dst != dssdev->dst) - return; - - dst->src = NULL; - dssdev->dst = NULL; - - in->ops.atv->disconnect(in, &ddata->dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; + omapdss_device_disconnect(dst, dst->next); } static int opa362_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(dssdev->dev, "enable\n"); @@ -102,9 +55,7 @@ static int opa362_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.atv->set_timings(in, &ddata->vm); - - r = in->ops.atv->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -119,7 +70,7 @@ static int opa362_enable(struct omap_dss_device *dssdev) static void opa362_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "disable\n"); @@ -129,56 +80,16 @@ static void opa362_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops.atv->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void opa362_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - dev_dbg(dssdev->dev, "set_timings\n"); - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.atv->set_timings(in, vm); -} - -static void opa362_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - dev_dbg(dssdev->dev, "get_timings\n"); - - *vm = ddata->vm; -} - -static int opa362_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - dev_dbg(dssdev->dev, "check_timings\n"); - - return in->ops.atv->check_timings(in, vm); -} - -static const struct omapdss_atv_ops opa362_atv_ops = { +static const struct omap_dss_device_ops opa362_ops = { .connect = opa362_connect, .disconnect = opa362_disconnect, - .enable = opa362_enable, .disable = opa362_disable, - - .check_timings = opa362_check_timings, - .set_timings = opa362_set_timings, - .get_timings = opa362_get_timings, }; static int opa362_probe(struct platform_device *pdev) @@ -186,7 +97,6 @@ static int opa362_probe(struct platform_device *pdev) struct panel_drv_data *ddata; struct omap_dss_device *dssdev; struct gpio_desc *gpio; - int r; dev_dbg(&pdev->dev, "probe\n"); @@ -203,18 +113,22 @@ static int opa362_probe(struct platform_device *pdev) ddata->enable_gpio = gpio; dssdev = &ddata->dssdev; - dssdev->ops.atv = &opa362_atv_ops; + dssdev->ops = &opa362_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(1) | BIT(0); - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); } + omapdss_device_register(dssdev); + return 0; } @@ -223,7 +137,9 @@ static int __exit opa362_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + if (dssdev->next) + omapdss_device_put(dssdev->next); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) @@ -231,7 +147,7 @@ static int __exit opa362_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - opa362_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index ed7ae384c3ed..f1a748353279 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -13,77 +13,33 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/of_gpio.h> #include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; - int pd_gpio; - - struct videomode vm; + struct gpio_desc *pd_gpio; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tfp410_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int tfp410_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return -EBUSY; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - dst->src = dssdev; - dssdev->dst = dst; - - ddata->in = in; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } -static void tfp410_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void tfp410_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - WARN_ON(!omapdss_device_is_connected(dssdev)); - if (!omapdss_device_is_connected(dssdev)) - return; - - WARN_ON(dst != dssdev->dst); - if (dst != dssdev->dst) - return; - - dst->src = NULL; - dssdev->dst = NULL; - - in->ops.dpi->disconnect(in, &ddata->dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; + omapdss_device_disconnect(dst, dst->next); } static int tfp410_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -92,14 +48,12 @@ static int tfp410_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; - if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value_cansleep(ddata->pd_gpio, 1); + if (ddata->pd_gpio) + gpiod_set_value_cansleep(ddata->pd_gpio, 0); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; @@ -109,94 +63,31 @@ static int tfp410_enable(struct omap_dss_device *dssdev) static void tfp410_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - if (gpio_is_valid(ddata->pd_gpio)) - gpio_set_value_cansleep(ddata->pd_gpio, 0); + if (ddata->pd_gpio) + gpiod_set_value_cansleep(ddata->pd_gpio, 0); - in->ops.dpi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tfp410_fix_timings(struct videomode *vm) -{ - vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_POSEDGE; -} - -static void tfp410_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - tfp410_fix_timings(vm); - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - -static void tfp410_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static int tfp410_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - tfp410_fix_timings(vm); - - return in->ops.dpi->check_timings(in, vm); -} - -static const struct omapdss_dvi_ops tfp410_dvi_ops = { +static const struct omap_dss_device_ops tfp410_ops = { .connect = tfp410_connect, .disconnect = tfp410_disconnect, - .enable = tfp410_enable, .disable = tfp410_disable, - - .check_timings = tfp410_check_timings, - .set_timings = tfp410_set_timings, - .get_timings = tfp410_get_timings, }; -static int tfp410_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int gpio; - - gpio = of_get_named_gpio(node, "powerdown-gpios", 0); - - if (gpio_is_valid(gpio) || gpio == -ENOENT) { - ddata->pd_gpio = gpio; - } else { - if (gpio != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to parse PD gpio\n"); - return gpio; - } - - return 0; -} - static int tfp410_probe(struct platform_device *pdev) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; - int r; + struct gpio_desc *gpio; ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) @@ -204,34 +95,34 @@ static int tfp410_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); - r = tfp410_probe_of(pdev); - if (r) - return r; - - if (gpio_is_valid(ddata->pd_gpio)) { - r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, - GPIOF_OUT_INIT_LOW, "tfp410 PD"); - if (r) { - dev_err(&pdev->dev, "Failed to request PD GPIO %d\n", - ddata->pd_gpio); - return r; - } + /* Powerdown GPIO */ + gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) { + dev_err(&pdev->dev, "failed to parse powerdown gpio\n"); + return PTR_ERR(gpio); } + ddata->pd_gpio = gpio; + dssdev = &ddata->dssdev; - dssdev->ops.dvi = &tfp410_dvi_ops; + dssdev->ops = &tfp410_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; dssdev->owner = THIS_MODULE; - dssdev->port_num = 1; - - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; + dssdev->of_ports = BIT(1) | BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; + + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); } + omapdss_device_register(dssdev); + return 0; } @@ -240,7 +131,9 @@ static int __exit tfp410_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + if (dssdev->next) + omapdss_device_put(dssdev->next); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) @@ -248,7 +141,7 @@ static int __exit tfp410_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - tfp410_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index d275bf152da5..94de55fd8884 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -21,42 +21,26 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; void (*hpd_cb)(void *cb_data, enum drm_connector_status status); void *hpd_cb_data; - bool hpd_enabled; struct mutex hpd_lock; struct gpio_desc *ct_cp_hpd_gpio; struct gpio_desc *ls_oe_gpio; struct gpio_desc *hpd_gpio; - - struct videomode vm; }; #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) -static int tpd_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int tpd_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; + struct panel_drv_data *ddata = to_panel_data(dst); int r; - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.hdmi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); + r = omapdss_device_connect(dst->dss, dst, dst->next); + if (r) return r; - } - - dst->src = dssdev; - dssdev->dst = dst; gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1); @@ -64,45 +48,29 @@ static int tpd_connect(struct omap_dss_device *dssdev, /* DC-DC converter needs at max 300us to get to 90% of 5V */ udelay(300); - ddata->in = in; return 0; } -static void tpd_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void tpd_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; + struct panel_drv_data *ddata = to_panel_data(dst); gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0); gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0); - dst->src = NULL; - dssdev->dst = NULL; - - in->ops.hdmi->disconnect(in, &ddata->dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; + omapdss_device_disconnect(dst, dst->next); } static int tpd_enable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; - in->ops.hdmi->set_timings(in, &ddata->vm); - - r = in->ops.hdmi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -113,76 +81,27 @@ static int tpd_enable(struct omap_dss_device *dssdev) static void tpd_disable(struct omap_dss_device *dssdev) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return; - in->ops.hdmi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpd_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.hdmi->set_timings(in, vm); -} - -static void tpd_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static int tpd_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - int r; - - r = in->ops.hdmi->check_timings(in, vm); - - return r; -} - -static int tpd_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!gpiod_get_value_cansleep(ddata->hpd_gpio)) - return -ENODEV; - - return in->ops.hdmi->read_edid(in, edid, len); -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio); - if (!connected && in->ops.hdmi->lost_hotplug) - in->ops.hdmi->lost_hotplug(in); - return connected; + return gpiod_get_value_cansleep(ddata->hpd_gpio); } -static int tpd_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, +static void tpd_register_hpd_cb(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, enum drm_connector_status status), - void *cb_data) + void *cb_data) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -190,8 +109,6 @@ static int tpd_register_hpd_cb(struct omap_dss_device *dssdev, ddata->hpd_cb = cb; ddata->hpd_cb_data = cb_data; mutex_unlock(&ddata->hpd_lock); - - return 0; } static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) @@ -204,61 +121,14 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) mutex_unlock(&ddata->hpd_lock); } -static void tpd_enable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = true; - mutex_unlock(&ddata->hpd_lock); -} - -static void tpd_disable_hpd(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_enabled = false; - mutex_unlock(&ddata->hpd_lock); -} - -static int tpd_set_infoframe(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.hdmi->set_infoframe(in, avi); -} - -static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev, - bool hdmi_mode) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode); -} - -static const struct omapdss_hdmi_ops tpd_hdmi_ops = { +static const struct omap_dss_device_ops tpd_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, - .enable = tpd_enable, .disable = tpd_disable, - - .check_timings = tpd_check_timings, - .set_timings = tpd_set_timings, - .get_timings = tpd_get_timings, - - .read_edid = tpd_read_edid, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, - .enable_hpd = tpd_enable_hpd, - .disable_hpd = tpd_disable_hpd, - .set_infoframe = tpd_set_infoframe, - .set_hdmi_mode = tpd_set_hdmi_mode, }; static irqreturn_t tpd_hpd_isr(int irq, void *data) @@ -266,7 +136,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data) struct panel_drv_data *ddata = data; mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { + if (ddata->hpd_cb) { enum drm_connector_status status; if (tpd_detect(&ddata->dssdev)) @@ -283,7 +153,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data) static int tpd_probe(struct platform_device *pdev) { - struct omap_dss_device *in, *dssdev; + struct omap_dss_device *dssdev; struct panel_drv_data *ddata; int r; struct gpio_desc *gpio; @@ -325,21 +195,24 @@ static int tpd_probe(struct platform_device *pdev) return r; dssdev = &ddata->dssdev; - dssdev->ops.hdmi = &tpd_hdmi_ops; + dssdev->ops = &tpd_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; - dssdev->port_num = 1; - - in = ddata->in; - - r = omapdss_register_output(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register output\n"); - return r; + dssdev->of_ports = BIT(1) | BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT + | OMAP_DSS_DEVICE_OP_HPD; + + dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); + if (IS_ERR(dssdev->next)) { + if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to find video sink\n"); + return PTR_ERR(dssdev->next); } + omapdss_device_register(dssdev); + return 0; } @@ -348,7 +221,9 @@ static int __exit tpd_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_output(&ddata->dssdev); + if (dssdev->next) + omapdss_device_put(dssdev->next); + omapdss_device_unregister(&ddata->dssdev); WARN_ON(omapdss_device_is_enabled(dssdev)); if (omapdss_device_is_enabled(dssdev)) @@ -356,7 +231,7 @@ static int __exit tpd_remove(struct platform_device *pdev) WARN_ON(omapdss_device_is_connected(dssdev)); if (omapdss_device_is_connected(dssdev)) - tpd_disconnect(dssdev, dssdev->dst); + omapdss_device_disconnect(NULL, dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index 6cbf570d6727..1f8161b041be 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -23,7 +23,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -35,49 +34,21 @@ struct panel_drv_data { #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int panel_dpi_connect(struct omap_dss_device *dssdev) +static int panel_dpi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void panel_dpi_disconnect(struct omap_dss_device *dssdev) +static void panel_dpi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int panel_dpi_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -86,15 +57,13 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; r = regulator_enable(ddata->vcc_supply); if (r) { - in->ops.dpi->disable(in); + src->ops->disable(src); return r; } @@ -109,7 +78,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) static void panel_dpi_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -119,23 +88,11 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpiod_set_value_cansleep(ddata->enable_gpio, 0); regulator_disable(ddata->vcc_supply); - in->ops.dpi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void panel_dpi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void panel_dpi_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -144,25 +101,14 @@ static void panel_dpi_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int panel_dpi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver panel_dpi_ops = { +static const struct omap_dss_device_ops panel_dpi_ops = { .connect = panel_dpi_connect, .disconnect = panel_dpi_disconnect, .enable = panel_dpi_enable, .disable = panel_dpi_disable, - .set_timings = panel_dpi_set_timings, .get_timings = panel_dpi_get_timings, - .check_timings = panel_dpi_check_timings, }; static int panel_dpi_probe_of(struct platform_device *pdev) @@ -227,16 +173,13 @@ static int panel_dpi_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = &pdev->dev; - dssdev->driver = &panel_dpi_ops; + dssdev->ops = &panel_dpi_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -246,10 +189,9 @@ static int __exit panel_dpi_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); panel_dpi_disable(dssdev); - panel_dpi_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 428de90fced1..29692a5217c5 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -41,7 +41,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -142,11 +141,11 @@ static void hw_guard_wait(struct panel_drv_data *ddata) static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; u8 buf[1]; - r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1); + r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1); if (r < 0) return r; @@ -158,29 +157,30 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { - struct omap_dss_device *in = ddata->in; - return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1); + struct omap_dss_device *src = ddata->dssdev.src; + + return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 buf[2] = { dcs_cmd, param }; - return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2); + return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); } static int dsicm_sleep_in(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 cmd; int r; hw_guard_wait(ddata); cmd = MIPI_DCS_ENTER_SLEEP_MODE; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1); if (r) return r; @@ -228,7 +228,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) static int dsicm_set_update_window(struct panel_drv_data *ddata, u16 x, u16 y, u16 w, u16 h) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; u16 x1 = x; u16 x2 = x + w - 1; @@ -242,7 +242,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; @@ -252,11 +252,11 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata, buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; - r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf)); + r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); if (r) return r; - in->ops.dsi->bta_sync(in, ddata->channel); + src->ops->dsi.bta_sync(src, ddata->channel); return r; } @@ -275,7 +275,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) static int dsicm_enter_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (ddata->ulps_enabled) @@ -290,7 +290,7 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata) if (ddata->ext_te_gpio) disable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - in->ops.dsi->disable(in, false, true); + src->ops->dsi.disable(src, false, true); ddata->ulps_enabled = true; @@ -309,19 +309,19 @@ err: static int dsicm_exit_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (!ddata->ulps_enabled) return 0; - r = in->ops.dsi->enable(in); + r = src->ops->enable(src); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err1; } - in->ops.dsi->enable_hs(in, ddata->channel, true); + src->ops->dsi.enable_hs(src, ddata->channel, true); r = _dsicm_enable_te(ddata, true); if (r) { @@ -366,7 +366,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata) static int dsicm_bl_update_status(struct backlight_device *dev) { struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r = 0; int level; @@ -381,13 +381,13 @@ static int dsicm_bl_update_status(struct backlight_device *dev) mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -414,21 +414,21 @@ static ssize_t dsicm_num_errors_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 errors = 0; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, &errors); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } @@ -446,20 +446,20 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 id1, id2, id3; int r; mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (!r) r = dsicm_get_id(ddata, &id1, &id2, &id3); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } else { r = -ENODEV; } @@ -478,7 +478,7 @@ static ssize_t dsicm_store_ulps(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; unsigned long t; int r; @@ -489,14 +489,14 @@ static ssize_t dsicm_store_ulps(struct device *dev, mutex_lock(&ddata->lock); if (ddata->enabled) { - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); if (t) r = dsicm_enter_ulps(ddata); else r = dsicm_wake_up(ddata); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -528,7 +528,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; unsigned long t; int r; @@ -541,9 +541,9 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, if (ddata->enabled) { /* dsicm_wake_up will restart the timer */ - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } mutex_unlock(&ddata->lock); @@ -603,7 +603,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata) static int dsicm_power_on(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; u8 id1, id2, id3; int r; struct omap_dss_dsi_config dsi_config = { @@ -635,7 +635,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } if (ddata->pin_config.num_pins > 0) { - r = in->ops.dsi->configure_pins(in, &ddata->pin_config); + r = src->ops->dsi.configure_pins(src, &ddata->pin_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI pins\n"); @@ -643,13 +643,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata) } } - r = in->ops.dsi->set_config(in, &dsi_config); + r = src->ops->dsi.set_config(src, &dsi_config); if (r) { dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); goto err_vddi; } - r = in->ops.dsi->enable(in); + r = src->ops->enable(src); if (r) { dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); goto err_vddi; @@ -657,7 +657,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); - in->ops.dsi->enable_hs(in, ddata->channel, false); + src->ops->dsi.enable_hs(src, ddata->channel, false); r = dsicm_sleep_out(ddata); if (r) @@ -689,7 +689,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) if (r) goto err; - r = in->ops.dsi->enable_video_output(in, ddata->channel); + r = src->ops->dsi.enable_video_output(src, ddata->channel); if (r) goto err; @@ -701,7 +701,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) ddata->intro_printed = true; } - in->ops.dsi->enable_hs(in, ddata->channel, true); + src->ops->dsi.enable_hs(src, ddata->channel, true); return 0; err: @@ -709,7 +709,7 @@ err: dsicm_hw_reset(ddata); - in->ops.dsi->disable(in, true, false); + src->ops->dsi.disable(src, true, false); err_vddi: if (ddata->vddi) regulator_disable(ddata->vddi); @@ -722,10 +722,10 @@ err_vpnl: static void dsicm_power_off(struct panel_drv_data *ddata) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; - in->ops.dsi->disable_video_output(in, ddata->channel); + src->ops->dsi.disable_video_output(src, ddata->channel); r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); if (!r) @@ -737,7 +737,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata) dsicm_hw_reset(ddata); } - in->ops.dsi->disable(in, true, false); + src->ops->dsi.disable(src, true, false); if (ddata->vddi) regulator_disable(ddata->vddi); @@ -756,71 +756,41 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata) return dsicm_power_on(ddata); } -static int dsicm_connect(struct omap_dss_device *dssdev) +static int dsicm_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); + struct panel_drv_data *ddata = to_panel_data(dst); struct device *dev = &ddata->pdev->dev; - struct omap_dss_device *in; int r; - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dsi->connect(in, dssdev); - if (r) { - dev_err(dev, "Failed to connect to video source\n"); - goto err_connect; - } - - r = in->ops.dsi->request_vc(in, &ddata->channel); + r = src->ops->dsi.request_vc(src, &ddata->channel); if (r) { dev_err(dev, "failed to get virtual channel\n"); - goto err_req_vc; + return r; } - r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH); + r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH); if (r) { dev_err(dev, "failed to set VC_ID\n"); - goto err_vc_id; + src->ops->dsi.release_vc(src, ddata->channel); + return r; } - ddata->in = in; return 0; - -err_vc_id: - in->ops.dsi->release_vc(in, ddata->channel); -err_req_vc: - in->ops.dsi->disconnect(in, dssdev); -err_connect: - omap_dss_put_device(in); - return r; } -static void dsicm_disconnect(struct omap_dss_device *dssdev) +static void dsicm_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dsi->release_vc(in, ddata->channel); - in->ops.dsi->disconnect(in, dssdev); + struct panel_drv_data *ddata = to_panel_data(dst); - omap_dss_put_device(in); - ddata->in = NULL; + src->ops->dsi.release_vc(src, ddata->channel); } static int dsicm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "enable\n"); @@ -837,11 +807,11 @@ static int dsicm_enable(struct omap_dss_device *dssdev) goto err; } - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_power_on(ddata); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); if (r) goto err; @@ -862,7 +832,7 @@ err: static void dsicm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "disable\n"); @@ -873,7 +843,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_cancel_ulps_work(ddata); - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); if (omapdss_device_is_enabled(dssdev)) { r = dsicm_wake_up(ddata); @@ -881,7 +851,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev) dsicm_power_off(ddata); } - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -891,16 +861,16 @@ static void dsicm_disable(struct omap_dss_device *dssdev) static void dsicm_framedone_cb(int err, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); - in->ops.dsi->bus_unlock(ddata->in); + src->ops->dsi.bus_unlock(src); } static irqreturn_t dsicm_te_isr(int irq, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int old; int r; @@ -909,7 +879,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) if (old) { cancel_delayed_work(&ddata->te_timeout_work); - r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; @@ -918,7 +888,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data) return IRQ_HANDLED; err: dev_err(&ddata->pdev->dev, "start update failed\n"); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); return IRQ_HANDLED; } @@ -926,25 +896,25 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, te_timeout_work.work); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); atomic_set(&ddata->do_update, 0); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); } static int dsicm_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&ddata->lock); - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) @@ -956,9 +926,8 @@ static int dsicm_update(struct omap_dss_device *dssdev, } /* XXX no need to send this every frame, but dsi break if not done */ - r = dsicm_set_update_window(ddata, 0, 0, - dssdev->panel.vm.hactive, - dssdev->panel.vm.vactive); + r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive, + ddata->vm.vactive); if (r) goto err; @@ -967,17 +936,17 @@ static int dsicm_update(struct omap_dss_device *dssdev, msecs_to_jiffies(250)); atomic_set(&ddata->do_update, 1); } else { - r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb, + r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, ddata); if (r) goto err; } - /* note: no bus_unlock here. unlock is in framedone_cb */ + /* note: no bus_unlock here. unlock is src framedone_cb */ mutex_unlock(&ddata->lock); return 0; err: - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; } @@ -985,13 +954,13 @@ err: static int dsicm_sync(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(&ddata->pdev->dev, "sync\n"); mutex_lock(&ddata->lock); - in->ops.dsi->bus_lock(in); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_lock(src); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); dev_dbg(&ddata->pdev->dev, "sync done\n"); @@ -1001,7 +970,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev) static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) { - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = ddata->dssdev.src; int r; if (enable) @@ -1010,7 +979,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); if (!ddata->ext_te_gpio) - in->ops.dsi->enable_te(in, enable); + src->ops->dsi.enable_te(src, enable); /* possible panel bug */ msleep(100); @@ -1021,7 +990,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; mutex_lock(&ddata->lock); @@ -1029,7 +998,7 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) if (ddata->te_enabled == enable) goto end; - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); if (ddata->enabled) { r = dsicm_wake_up(ddata); @@ -1043,13 +1012,13 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) ddata->te_enabled = enable; - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); end: mutex_unlock(&ddata->lock); return 0; err: - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); return r; @@ -1072,7 +1041,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; int first = 1; int plen; @@ -1089,9 +1058,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, } size = min((u32)w * h * 3, - dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3); + ddata->vm.hactive * ddata->vm.vactive * 3); - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); r = dsicm_wake_up(ddata); if (r) @@ -1107,7 +1076,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, dsicm_set_update_window(ddata, x, y, w, h); - r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen); + r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen); if (r) goto err2; @@ -1115,7 +1084,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; - r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, + r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { @@ -1141,9 +1110,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, r = buf_used; err3: - in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1); + src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1); err2: - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); err1: mutex_unlock(&ddata->lock); return r; @@ -1154,7 +1123,7 @@ static void dsicm_ulps_work(struct work_struct *work) struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, ulps_work.work); struct omap_dss_device *dssdev = &ddata->dssdev; - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; mutex_lock(&ddata->lock); @@ -1163,11 +1132,11 @@ static void dsicm_ulps_work(struct work_struct *work) return; } - in->ops.dsi->bus_lock(in); + src->ops->dsi.bus_lock(src); dsicm_enter_ulps(ddata); - in->ops.dsi->bus_unlock(in); + src->ops->dsi.bus_unlock(src); mutex_unlock(&ddata->lock); } @@ -1210,18 +1179,21 @@ static void dsicm_get_size(struct omap_dss_device *dssdev, *height = ddata->height_mm; } -static struct omap_dss_driver dsicm_ops = { +static const struct omap_dss_device_ops dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, .enable = dsicm_enable, .disable = dsicm_disable, + .get_timings = dsicm_get_timings, + .check_timings = dsicm_check_timings, +}; + +static const struct omap_dss_driver dsicm_dss_driver = { .update = dsicm_update, .sync = dsicm_sync, - .get_timings = dsicm_get_timings, - .check_timings = dsicm_check_timings, .get_size = dsicm_get_size, .enable_te = dsicm_enable_te, @@ -1330,20 +1302,17 @@ static int dsicm_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = dev; - dssdev->driver = &dsicm_ops; - dssdev->panel.vm = ddata->vm; + dssdev->ops = &dsicm_ops; + dssdev->driver = &dsicm_dss_driver; dssdev->type = OMAP_DISPLAY_TYPE_DSI; dssdev->owner = THIS_MODULE; + dssdev->of_ports = BIT(0); - dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; - r = omapdss_register_display(dssdev); - if (r) { - dev_err(dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); mutex_init(&ddata->lock); @@ -1414,10 +1383,10 @@ static int __exit dsicm_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "remove\n"); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); dsicm_disable(dssdev); - dsicm_disconnect(dssdev); + omapdss_device_disconnect(dssdev->src, dssdev); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 754197099440..f6ef8ff964dd 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -33,19 +33,11 @@ static const struct videomode lb035q02_vm = { .vfront_porch = 4, .vback_porch = 18, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, - /* - * Note: According to the panel documentation: - * DE is active LOW - * DATA needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct spi_device *spi; @@ -116,51 +108,25 @@ static void init_lb035q02_panel(struct spi_device *spi) lb035q02_write_reg(spi, 0x3b, 0x0806); } -static int lb035q02_connect(struct omap_dss_device *dssdev) +static int lb035q02_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } + struct panel_drv_data *ddata = to_panel_data(dst); init_lb035q02_panel(ddata->spi); - ddata->in = in; return 0; } -static void lb035q02_disconnect(struct omap_dss_device *dssdev) +static void lb035q02_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int lb035q02_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -169,9 +135,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -186,7 +150,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) static void lb035q02_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -194,23 +158,11 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - in->ops.dpi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void lb035q02_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void lb035q02_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -219,25 +171,14 @@ static void lb035q02_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int lb035q02_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver lb035q02_ops = { +static const struct omap_dss_device_ops lb035q02_ops = { .connect = lb035q02_connect, .disconnect = lb035q02_disconnect, .enable = lb035q02_enable, .disable = lb035q02_disable, - .set_timings = lb035q02_set_timings, .get_timings = lb035q02_get_timings, - .check_timings = lb035q02_check_timings, }; static int lb035q02_probe_of(struct spi_device *spi) @@ -278,16 +219,21 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &lb035q02_ops; + dssdev->ops = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + /* + * Note: According to the panel documentation: + * DE is active LOW + * DATA needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; + + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -297,10 +243,9 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi) struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); lb035q02_disable(dssdev); - lb035q02_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 9a3b27fa5cb5..f445de6369f7 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -11,22 +11,19 @@ * (at your option) any later version. */ -#include <linux/module.h> #include <linux/delay.h> -#include <linux/spi/spi.h> #include <linux/gpio/consumer.h> -#include <linux/of_gpio.h> +#include <linux/module.h> +#include <linux/spi/spi.h> #include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; - int res_gpio; - int qvga_gpio; + struct gpio_desc *res_gpio; struct spi_device *spi; }; @@ -74,9 +71,7 @@ static const struct videomode nec_8048_panel_vm = { .vsync_len = 1, .vback_porch = 4, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -112,49 +107,21 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi) return 0; } -static int nec_8048_connect(struct omap_dss_device *dssdev) +static int nec_8048_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void nec_8048_disconnect(struct omap_dss_device *dssdev) +static void nec_8048_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int nec_8048_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -163,14 +130,11 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; - if (gpio_is_valid(ddata->res_gpio)) - gpio_set_value_cansleep(ddata->res_gpio, 1); + gpiod_set_value_cansleep(ddata->res_gpio, 1); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; @@ -180,31 +144,18 @@ static int nec_8048_enable(struct omap_dss_device *dssdev) static void nec_8048_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - if (gpio_is_valid(ddata->res_gpio)) - gpio_set_value_cansleep(ddata->res_gpio, 0); + gpiod_set_value_cansleep(ddata->res_gpio, 0); - in->ops.dpi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void nec_8048_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void nec_8048_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -213,50 +164,21 @@ static void nec_8048_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int nec_8048_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver nec_8048_ops = { +static const struct omap_dss_device_ops nec_8048_ops = { .connect = nec_8048_connect, .disconnect = nec_8048_disconnect, .enable = nec_8048_enable, .disable = nec_8048_disable, - .set_timings = nec_8048_set_timings, .get_timings = nec_8048_get_timings, - .check_timings = nec_8048_check_timings, }; -static int nec_8048_probe_of(struct spi_device *spi) -{ - struct device_node *node = spi->dev.of_node; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - int gpio; - - gpio = of_get_named_gpio(node, "reset-gpios", 0); - if (!gpio_is_valid(gpio)) { - dev_err(&spi->dev, "failed to parse enable gpio\n"); - return gpio; - } - ddata->res_gpio = gpio; - - /* XXX the panel spec doesn't mention any QVGA pin?? */ - ddata->qvga_gpio = -ENOENT; - - return 0; -} - static int nec_8048_probe(struct spi_device *spi) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -280,38 +202,27 @@ static int nec_8048_probe(struct spi_device *spi) ddata->spi = spi; - r = nec_8048_probe_of(spi); - if (r) - return r; - - if (gpio_is_valid(ddata->qvga_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, - GPIOF_OUT_INIT_HIGH, "lcd QVGA"); - if (r) - return r; + gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to get reset gpio\n"); + return PTR_ERR(gpio); } - if (gpio_is_valid(ddata->res_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->res_gpio, - GPIOF_OUT_INIT_LOW, "lcd RES"); - if (r) - return r; - } + ddata->res_gpio = gpio; ddata->vm = nec_8048_panel_vm; dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &nec_8048_ops; + dssdev->ops = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -323,10 +234,9 @@ static int nec_8048_remove(struct spi_device *spi) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); nec_8048_disable(dssdev); - nec_8048_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index bb5b680cabfe..64b1369cb274 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -21,7 +21,6 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct regulator *vcc; struct videomode vm; @@ -47,60 +46,26 @@ static const struct videomode sharp_ls_vm = { .vfront_porch = 1, .vback_porch = 1, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, - /* - * Note: According to the panel documentation: - * DATA needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int sharp_ls_connect(struct omap_dss_device *dssdev) +static int sharp_ls_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void sharp_ls_disconnect(struct omap_dss_device *dssdev) +static void sharp_ls_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int sharp_ls_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -109,15 +74,13 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - if (ddata->vcc) { r = regulator_enable(ddata->vcc); if (r != 0) return r; } - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) { regulator_disable(ddata->vcc); return r; @@ -140,7 +103,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) static void sharp_ls_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -155,7 +118,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) msleep(100); - in->ops.dpi->disable(in); + src->ops->disable(src); if (ddata->vcc) regulator_disable(ddata->vcc); @@ -163,18 +126,6 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void sharp_ls_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void sharp_ls_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -183,25 +134,14 @@ static void sharp_ls_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int sharp_ls_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver sharp_ls_ops = { +static const struct omap_dss_device_ops sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, .enable = sharp_ls_enable, .disable = sharp_ls_disable, - .set_timings = sharp_ls_set_timings, .get_timings = sharp_ls_get_timings, - .check_timings = sharp_ls_check_timings, }; static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, @@ -278,16 +218,20 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev = &ddata->dssdev; dssdev->dev = &pdev->dev; - dssdev->driver = &sharp_ls_ops; + dssdev->ops = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&pdev->dev, "Failed to register panel\n"); - return r; - } + /* + * Note: According to the panel documentation: + * DATA needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; + + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -297,10 +241,9 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) struct panel_drv_data *ddata = platform_get_drvdata(pdev); struct omap_dss_device *dssdev = &ddata->dssdev; - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); sharp_ls_disable(dssdev); - sharp_ls_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index f34c06bb5bd7..e04663856b31 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -20,17 +20,15 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/spi/spi.h> -#include <linux/jiffies.h> #include <linux/sched.h> -#include <linux/backlight.h> -#include <linux/gpio/consumer.h> -#include <linux/of.h> -#include <linux/of_gpio.h> +#include <linux/spi/spi.h> #include "../dss/omapdss.h" @@ -64,9 +62,8 @@ struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; - int reset_gpio; + struct gpio_desc *reset_gpio; struct videomode vm; @@ -100,9 +97,7 @@ static const struct videomode acx565akm_panel_vm = { .vsync_len = 3, .vback_porch = 4, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE | - DISPLAY_FLAGS_PIXDATA_POSEDGE, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -507,56 +502,26 @@ static const struct attribute_group bldev_attr_group = { .attrs = bldev_attrs, }; -static int acx565akm_connect(struct omap_dss_device *dssdev) +static int acx565akm_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.sdi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void acx565akm_disconnect(struct omap_dss_device *dssdev) +static void acx565akm_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.sdi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; dev_dbg(&ddata->spi->dev, "%s\n", __func__); - in->ops.sdi->set_timings(in, &ddata->vm); - - r = in->ops.sdi->enable(in); + r = src->ops->enable(src); if (r) { pr_err("%s sdi enable failed\n", __func__); return r; @@ -565,8 +530,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) /*FIXME tweak me */ msleep(50); - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 1); + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 1); if (ddata->enabled) { dev_dbg(&ddata->spi->dev, "panel already enabled\n"); @@ -597,7 +562,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "%s\n", __func__); @@ -615,13 +580,13 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) */ msleep(50); - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 0); + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 0); /* FIXME need to tweak this delay */ msleep(100); - in->ops.sdi->disable(in); + src->ops->disable(src); } static int acx565akm_enable(struct omap_dss_device *dssdev) @@ -664,18 +629,6 @@ static void acx565akm_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void acx565akm_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.sdi->set_timings(in, vm); -} - static void acx565akm_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -684,37 +637,16 @@ static void acx565akm_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int acx565akm_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.sdi->check_timings(in, vm); -} - -static struct omap_dss_driver acx565akm_ops = { +static const struct omap_dss_device_ops acx565akm_ops = { .connect = acx565akm_connect, .disconnect = acx565akm_disconnect, .enable = acx565akm_enable, .disable = acx565akm_disable, - .set_timings = acx565akm_set_timings, .get_timings = acx565akm_get_timings, - .check_timings = acx565akm_check_timings, }; -static int acx565akm_probe_of(struct spi_device *spi) -{ - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct device_node *np = spi->dev.of_node; - - ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0); - - return 0; -} - static int acx565akm_probe(struct spi_device *spi) { struct panel_drv_data *ddata; @@ -722,6 +654,7 @@ static int acx565akm_probe(struct spi_device *spi) struct backlight_device *bldev; int max_brightness, brightness; struct backlight_properties props; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -738,19 +671,16 @@ static int acx565akm_probe(struct spi_device *spi) mutex_init(&ddata->mutex); - r = acx565akm_probe_of(spi); - if (r) - return r; - - if (gpio_is_valid(ddata->reset_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio, - GPIOF_OUT_INIT_LOW, "lcd reset"); - if (r) - goto err_gpio; + gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to parse reset gpio\n"); + return PTR_ERR(gpio); } - if (gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 1); + ddata->reset_gpio = gpio; + + if (ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 1); /* * After reset we have to wait 5 msec before the first @@ -762,12 +692,12 @@ static int acx565akm_probe(struct spi_device *spi) r = panel_detect(ddata); - if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio)) - gpio_set_value(ddata->reset_gpio, 0); + if (!ddata->enabled && ddata->reset_gpio) + gpiod_set_value(ddata->reset_gpio, 0); if (r) { dev_err(&spi->dev, "%s panel detect error\n", __func__); - goto err_detect; + return r; } memset(&props, 0, sizeof(props)); @@ -777,17 +707,15 @@ static int acx565akm_probe(struct spi_device *spi) bldev = backlight_device_register("acx565akm", &ddata->spi->dev, ddata, &acx565akm_bl_ops, &props); - if (IS_ERR(bldev)) { - r = PTR_ERR(bldev); - goto err_reg_bl; - } + if (IS_ERR(bldev)) + return PTR_ERR(bldev); ddata->bl_dev = bldev; if (ddata->has_cabc) { r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group); if (r) { dev_err(&bldev->dev, "%s failed to create sysfs files\n", __func__); - goto err_sysfs; + goto err_backlight_unregister; } ddata->cabc_mode = get_hw_cabc_mode(ddata); } @@ -809,26 +737,20 @@ static int acx565akm_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &acx565akm_ops; + dssdev->ops = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_POSEDGE; - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - goto err_reg; - } + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; -err_reg: - sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group); -err_sysfs: +err_backlight_unregister: backlight_device_unregister(bldev); -err_reg_bl: -err_detect: -err_gpio: return r; } @@ -842,10 +764,9 @@ static int acx565akm_remove(struct spi_device *spi) sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group); backlight_device_unregister(ddata->bl_dev); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); acx565akm_disable(dssdev); - acx565akm_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index a1f1dc18407a..7ddc8c574a61 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -27,13 +27,11 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/spi/spi.h> -#include <linux/gpio.h> #include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; @@ -51,13 +49,7 @@ static const struct videomode td028ttec1_panel_vm = { .vsync_len = 2, .vback_porch = 2, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE, - /* - * Note: According to the panel documentation: - * SYNC needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define JBT_COMMAND 0x000 @@ -166,49 +158,21 @@ enum jbt_register { #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) -static int td028ttec1_panel_connect(struct omap_dss_device *dssdev) +static int td028ttec1_panel_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev) +static void td028ttec1_panel_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -217,9 +181,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -316,7 +278,7 @@ transfer_err: static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; @@ -328,23 +290,11 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); - in->ops.dpi->disable(in); + src->ops->disable(src); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -353,25 +303,14 @@ static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver td028ttec1_ops = { +static const struct omap_dss_device_ops td028ttec1_ops = { .connect = td028ttec1_panel_connect, .disconnect = td028ttec1_panel_disconnect, .enable = td028ttec1_panel_enable, .disable = td028ttec1_panel_disable, - .set_timings = td028ttec1_panel_set_timings, .get_timings = td028ttec1_panel_get_timings, - .check_timings = td028ttec1_panel_check_timings, }; static int td028ttec1_panel_probe(struct spi_device *spi) @@ -403,16 +342,20 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &td028ttec1_ops; + dssdev->ops = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - return r; - } + /* + * Note: According to the panel documentation: + * SYNC needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); return 0; } @@ -424,10 +367,9 @@ static int td028ttec1_panel_remove(struct spi_device *spi) dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); td028ttec1_panel_disable(dssdev); - td028ttec1_panel_disconnect(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index c08e22b43447..8440fcb744d9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -10,14 +10,13 @@ * (at your option) any later version. */ -#include <linux/module.h> #include <linux/delay.h> -#include <linux/spi/spi.h> -#include <linux/regulator/consumer.h> -#include <linux/gpio/consumer.h> #include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> -#include <linux/of_gpio.h> +#include <linux/spi/spi.h> #include "../dss/omapdss.h" @@ -54,16 +53,14 @@ static const u16 tpo_td043_def_gamma[12] = { struct panel_drv_data { struct omap_dss_device dssdev; - struct omap_dss_device *in; struct videomode vm; struct spi_device *spi; struct regulator *vcc_reg; - int nreset_gpio; + struct gpio_desc *reset_gpio; u16 gamma[12]; u32 mode; - u32 hmirror:1; u32 vmirror:1; u32 powered_on:1; u32 spi_suspended:1; @@ -84,13 +81,7 @@ static const struct videomode tpo_td043_vm = { .vfront_porch = 39, .vback_porch = 34, - .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | - DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE, - /* - * Note: According to the panel documentation: - * SYNC needs to be driven on the FALLING edge - */ + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW, }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -152,22 +143,6 @@ static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v) return tpo_td043_write(spi, 4, reg4); } -static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); - - ddata->hmirror = enable; - return tpo_td043_write_mirror(ddata->spi, ddata->hmirror, - ddata->vmirror); -} - -static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev); - - return ddata->hmirror; -} - static ssize_t tpo_td043_vmirror_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -189,7 +164,7 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev, val = !!val; - ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val); + ret = tpo_td043_write_mirror(ddata->spi, false, val); if (ret < 0) return ret; @@ -300,16 +275,14 @@ static int tpo_td043_power_on(struct panel_drv_data *ddata) /* wait for panel to stabilize */ msleep(160); - if (gpio_is_valid(ddata->nreset_gpio)) - gpio_set_value(ddata->nreset_gpio, 1); + gpiod_set_value(ddata->reset_gpio, 0); tpo_td043_write(ddata->spi, 2, TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING); tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL); tpo_td043_write(ddata->spi, 0x20, 0xf0); tpo_td043_write(ddata->spi, 0x21, 0xf0); - tpo_td043_write_mirror(ddata->spi, ddata->hmirror, - ddata->vmirror); + tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror); tpo_td043_write_gamma(ddata->spi, ddata->gamma); ddata->powered_on = 1; @@ -324,8 +297,7 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata) tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM); - if (gpio_is_valid(ddata->nreset_gpio)) - gpio_set_value(ddata->nreset_gpio, 0); + gpiod_set_value(ddata->reset_gpio, 1); /* wait for at least 2 vsyncs before cutting off power */ msleep(50); @@ -337,49 +309,21 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata) ddata->powered_on = 0; } -static int tpo_td043_connect(struct omap_dss_device *dssdev) +static int tpo_td043_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in; - int r; - - if (omapdss_device_is_connected(dssdev)) - return 0; - - in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node); - if (IS_ERR(in)) { - dev_err(dssdev->dev, "failed to find video source\n"); - return PTR_ERR(in); - } - - r = in->ops.dpi->connect(in, dssdev); - if (r) { - omap_dss_put_device(in); - return r; - } - - ddata->in = in; return 0; } -static void tpo_td043_disconnect(struct omap_dss_device *dssdev) +static void tpo_td043_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - if (!omapdss_device_is_connected(dssdev)) - return; - - in->ops.dpi->disconnect(in, dssdev); - - omap_dss_put_device(in); - ddata->in = NULL; } static int tpo_td043_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; int r; if (!omapdss_device_is_connected(dssdev)) @@ -388,9 +332,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (omapdss_device_is_enabled(dssdev)) return 0; - in->ops.dpi->set_timings(in, &ddata->vm); - - r = in->ops.dpi->enable(in); + r = src->ops->enable(src); if (r) return r; @@ -401,7 +343,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (!ddata->spi_suspended) { r = tpo_td043_power_on(ddata); if (r) { - in->ops.dpi->disable(in); + src->ops->disable(src); return r; } } @@ -414,12 +356,12 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) static void tpo_td043_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; + struct omap_dss_device *src = dssdev->src; if (!omapdss_device_is_enabled(dssdev)) return; - in->ops.dpi->disable(in); + src->ops->disable(src); if (!ddata->spi_suspended) tpo_td043_power_off(ddata); @@ -427,18 +369,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpo_td043_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - ddata->vm = *vm; - dssdev->panel.vm = *vm; - - in->ops.dpi->set_timings(in, vm); -} - static void tpo_td043_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { @@ -447,50 +377,21 @@ static void tpo_td043_get_timings(struct omap_dss_device *dssdev, *vm = ddata->vm; } -static int tpo_td043_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *in = ddata->in; - - return in->ops.dpi->check_timings(in, vm); -} - -static struct omap_dss_driver tpo_td043_ops = { +static const struct omap_dss_device_ops tpo_td043_ops = { .connect = tpo_td043_connect, .disconnect = tpo_td043_disconnect, .enable = tpo_td043_enable, .disable = tpo_td043_disable, - .set_timings = tpo_td043_set_timings, .get_timings = tpo_td043_get_timings, - .check_timings = tpo_td043_check_timings, - - .set_mirror = tpo_td043_set_hmirror, - .get_mirror = tpo_td043_get_hmirror, }; -static int tpo_td043_probe_of(struct spi_device *spi) -{ - struct device_node *node = spi->dev.of_node; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - int gpio; - - gpio = of_get_named_gpio(node, "reset-gpios", 0); - if (!gpio_is_valid(gpio)) { - dev_err(&spi->dev, "failed to parse enable gpio\n"); - return gpio; - } - ddata->nreset_gpio = gpio; - - return 0; -} - static int tpo_td043_probe(struct spi_device *spi) { struct panel_drv_data *ddata; struct omap_dss_device *dssdev; + struct gpio_desc *gpio; int r; dev_dbg(&spi->dev, "%s\n", __func__); @@ -512,59 +413,49 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->spi = spi; - r = tpo_td043_probe_of(spi); - if (r) - return r; - ddata->mode = TPO_R02_MODE_800x480; memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); if (IS_ERR(ddata->vcc_reg)) { dev_err(&spi->dev, "failed to get LCD VCC regulator\n"); - r = PTR_ERR(ddata->vcc_reg); - goto err_regulator; + return PTR_ERR(ddata->vcc_reg); } - if (gpio_is_valid(ddata->nreset_gpio)) { - r = devm_gpio_request_one(&spi->dev, - ddata->nreset_gpio, GPIOF_OUT_INIT_LOW, - "lcd reset"); - if (r < 0) { - dev_err(&spi->dev, "couldn't request reset GPIO\n"); - goto err_gpio_req; - } + gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) { + dev_err(&spi->dev, "failed to get reset gpio\n"); + return PTR_ERR(gpio); } + ddata->reset_gpio = gpio; + r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group); if (r) { dev_err(&spi->dev, "failed to create sysfs files\n"); - goto err_sysfs; + return r; } ddata->vm = tpo_td043_vm; dssdev = &ddata->dssdev; dssdev->dev = &spi->dev; - dssdev->driver = &tpo_td043_ops; + dssdev->ops = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; dssdev->owner = THIS_MODULE; - dssdev->panel.vm = ddata->vm; + dssdev->of_ports = BIT(0); - r = omapdss_register_display(dssdev); - if (r) { - dev_err(&spi->dev, "Failed to register panel\n"); - goto err_reg; - } + /* + * Note: According to the panel documentation: + * SYNC needs to be driven on the FALLING edge + */ + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE + | DRM_BUS_FLAG_PIXDATA_NEGEDGE; - return 0; + omapdss_display_init(dssdev); + omapdss_device_register(dssdev); -err_reg: - sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); -err_sysfs: -err_gpio_req: -err_regulator: - return r; + return 0; } static int tpo_td043_remove(struct spi_device *spi) @@ -574,10 +465,9 @@ static int tpo_td043_remove(struct spi_device *spi) dev_dbg(&ddata->spi->dev, "%s\n", __func__); - omapdss_unregister_display(dssdev); + omapdss_device_unregister(dssdev); tpo_td043_disable(dssdev); - tpo_td043_disconnect(dssdev); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 99e8cb8dc65b..472f56e3de70 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -14,24 +14,17 @@ */ #include <linux/kernel.h> +#include <linux/list.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> -#include <linux/list.h> #include "dss.h" #include "omapdss.h" static struct dss_device *dss_device; -static struct list_head omapdss_comp_list; - -struct omapdss_comp_node { - struct list_head list; - struct device_node *node; - bool dss_core_component; -}; - struct dss_device *omapdss_get_dss(void) { return dss_device; @@ -56,6 +49,208 @@ const struct dispc_ops *dispc_get_ops(struct dss_device *dss) } EXPORT_SYMBOL(dispc_get_ops); + +/* ----------------------------------------------------------------------------- + * OMAP DSS Devices Handling + */ + +static LIST_HEAD(omapdss_devices_list); +static DEFINE_MUTEX(omapdss_devices_lock); + +void omapdss_device_register(struct omap_dss_device *dssdev) +{ + mutex_lock(&omapdss_devices_lock); + list_add_tail(&dssdev->list, &omapdss_devices_list); + mutex_unlock(&omapdss_devices_lock); +} +EXPORT_SYMBOL_GPL(omapdss_device_register); + +void omapdss_device_unregister(struct omap_dss_device *dssdev) +{ + mutex_lock(&omapdss_devices_lock); + list_del(&dssdev->list); + mutex_unlock(&omapdss_devices_lock); +} +EXPORT_SYMBOL_GPL(omapdss_device_unregister); + +static bool omapdss_device_is_registered(struct device_node *node) +{ + struct omap_dss_device *dssdev; + bool found = false; + + mutex_lock(&omapdss_devices_lock); + + list_for_each_entry(dssdev, &omapdss_devices_list, list) { + if (dssdev->dev->of_node == node) { + found = true; + break; + } + } + + mutex_unlock(&omapdss_devices_lock); + return found; +} + +struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev) +{ + if (!try_module_get(dssdev->owner)) + return NULL; + + if (get_device(dssdev->dev) == NULL) { + module_put(dssdev->owner); + return NULL; + } + + return dssdev; +} +EXPORT_SYMBOL(omapdss_device_get); + +void omapdss_device_put(struct omap_dss_device *dssdev) +{ + put_device(dssdev->dev); + module_put(dssdev->owner); +} +EXPORT_SYMBOL(omapdss_device_put); + +struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, + unsigned int port) +{ + struct omap_dss_device *dssdev; + + list_for_each_entry(dssdev, &omapdss_devices_list, list) { + if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port)) + return omapdss_device_get(dssdev); + } + + return NULL; +} + +/* + * Search for the next device starting at @from. The type argument specfies + * which device types to consider when searching. Searching for multiple types + * is supported by and'ing their type flags. Release the reference to the @from + * device, and acquire a reference to the returned device if found. + */ +struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, + enum omap_dss_device_type type) +{ + struct omap_dss_device *dssdev; + struct list_head *list; + + mutex_lock(&omapdss_devices_lock); + + if (list_empty(&omapdss_devices_list)) { + dssdev = NULL; + goto done; + } + + /* + * Start from the from entry if given or from omapdss_devices_list + * otherwise. + */ + list = from ? &from->list : &omapdss_devices_list; + + list_for_each_entry(dssdev, list, list) { + /* + * Stop if we reach the omapdss_devices_list, that's the end of + * the list. + */ + if (&dssdev->list == &omapdss_devices_list) { + dssdev = NULL; + goto done; + } + + /* + * Accept display entities if the display type is requested, + * and output entities if the output type is requested. + */ + if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && + !dssdev->output_type) + goto done; + if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id && + dssdev->next) + goto done; + } + + dssdev = NULL; + +done: + if (from) + omapdss_device_put(from); + if (dssdev) + omapdss_device_get(dssdev); + + mutex_unlock(&omapdss_devices_lock); + return dssdev; +} +EXPORT_SYMBOL(omapdss_device_get_next); + +int omapdss_device_connect(struct dss_device *dss, + struct omap_dss_device *src, + struct omap_dss_device *dst) +{ + int ret; + + dev_dbg(dst->dev, "connect\n"); + + if (omapdss_device_is_connected(dst)) + return -EBUSY; + + dst->dss = dss; + + ret = dst->ops->connect(src, dst); + if (ret < 0) { + dst->dss = NULL; + return ret; + } + + if (src) { + WARN_ON(src->dst); + dst->src = src; + src->dst = dst; + } + + return 0; +} +EXPORT_SYMBOL_GPL(omapdss_device_connect); + +void omapdss_device_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) +{ + dev_dbg(dst->dev, "disconnect\n"); + + if (!dst->id && !omapdss_device_is_connected(dst)) { + WARN_ON(dst->output_type); + return; + } + + if (src) { + if (WARN_ON(dst != src->dst)) + return; + + dst->src = NULL; + src->dst = NULL; + } + + WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); + + dst->ops->disconnect(src, dst); + dst->dss = NULL; +} +EXPORT_SYMBOL_GPL(omapdss_device_disconnect); + +/* ----------------------------------------------------------------------------- + * Components Handling + */ + +static struct list_head omapdss_comp_list; + +struct omapdss_comp_node { + struct list_head list; + struct device_node *node; + bool dss_core_component; +}; + static bool omapdss_list_contains(const struct device_node *node) { struct omapdss_comp_node *comp; @@ -130,9 +325,7 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp) { if (comp->dss_core_component) return true; - if (omapdss_component_is_display(comp->node)) - return true; - if (omapdss_component_is_output(comp->node)) + if (omapdss_device_is_registered(comp->node)) return true; return false; diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 07d00a186f15..a2edabc9f6b3 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -45,36 +45,14 @@ static struct platform_driver * const omap_dss_drivers[] = { #endif }; -static struct platform_device *omap_drm_device; - static int __init omap_dss_init(void) { - int r; - - r = platform_register_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); - if (r) - goto err_reg; - - omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0); - if (IS_ERR(omap_drm_device)) { - r = PTR_ERR(omap_drm_device); - goto err_reg; - } - - return 0; - -err_reg: - platform_unregister_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); - - return r; + return platform_register_drivers(omap_dss_drivers, + ARRAY_SIZE(omap_dss_drivers)); } static void __exit omap_dss_exit(void) { - platform_device_unregister(omap_drm_device); - platform_unregister_drivers(omap_dss_drivers, ARRAY_SIZE(omap_dss_drivers)); } diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 84f274c4a4cb..e61a9592a650 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -2904,13 +2904,6 @@ static int dispc_ovl_enable(struct dispc_device *dispc, return 0; } -static enum omap_dss_output_id -dispc_mgr_get_supported_outputs(struct dispc_device *dispc, - enum omap_channel channel) -{ - return dss_get_supported_outputs(dispc->dss, channel); -} - static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc, bool act_high) { @@ -3120,28 +3113,29 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc, return pclk <= dispc->feat->max_tv_pclk; } -bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel, - const struct videomode *vm) +static int dispc_mgr_check_timings(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm) { if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive)) - return false; + return MODE_BAD; if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock)) - return false; + return MODE_BAD; if (dss_mgr_is_lcd(channel)) { /* TODO: OMAP4+ supports interlace for LCD outputs */ if (vm->flags & DISPLAY_FLAGS_INTERLACED) - return false; + return MODE_BAD; if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len, vm->hfront_porch, vm->hback_porch, vm->vsync_len, vm->vfront_porch, vm->vback_porch)) - return false; + return MODE_BAD; } - return true; + return MODE_OK; } static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc, @@ -3243,7 +3237,7 @@ static void dispc_mgr_set_timings(struct dispc_device *dispc, DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive); - if (!dispc_mgr_timings_ok(dispc, channel, &t)) { + if (dispc_mgr_check_timings(dispc, channel, &t)) { BUG(); return; } @@ -4740,9 +4734,9 @@ static const struct dispc_ops dispc_ops = { .mgr_go_busy = dispc_mgr_go_busy, .mgr_go = dispc_mgr_go, .mgr_set_lcd_config = dispc_mgr_set_lcd_config, + .mgr_check_timings = dispc_mgr_check_timings, .mgr_set_timings = dispc_mgr_set_timings, .mgr_setup = dispc_mgr_setup, - .mgr_get_supported_outputs = dispc_mgr_get_supported_outputs, .mgr_gamma_size = dispc_mgr_gamma_size, .mgr_set_gamma = dispc_mgr_set_gamma, diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 9e7fcbd57e52..34b2a4ef63a4 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -21,27 +21,14 @@ #define DSS_SUBSYS_NAME "DISPLAY" #include <linux/kernel.h> -#include <linux/module.h> -#include <linux/jiffies.h> -#include <linux/platform_device.h> #include <linux/of.h> #include "omapdss.h" -static void omapdss_default_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - *vm = dssdev->panel.vm; -} - -static LIST_HEAD(panel_list); -static DEFINE_MUTEX(panel_list_mutex); static int disp_num_counter; -int omapdss_register_display(struct omap_dss_device *dssdev) +void omapdss_display_init(struct omap_dss_device *dssdev) { - struct omap_dss_driver *drv = dssdev->driver; - struct list_head *cur; int id; /* @@ -52,123 +39,22 @@ int omapdss_register_display(struct omap_dss_device *dssdev) if (id < 0) id = disp_num_counter++; - snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id); + dssdev->alias_id = id; /* Use 'label' property for name, if it exists */ of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name); if (dssdev->name == NULL) - dssdev->name = dssdev->alias; - - if (drv && drv->get_timings == NULL) - drv->get_timings = omapdss_default_get_timings; - - mutex_lock(&panel_list_mutex); - list_for_each(cur, &panel_list) { - struct omap_dss_device *ldev = list_entry(cur, - struct omap_dss_device, - panel_list); - if (strcmp(ldev->alias, dssdev->alias) > 0) - break; - } - list_add_tail(&dssdev->panel_list, cur); - mutex_unlock(&panel_list_mutex); - return 0; -} -EXPORT_SYMBOL(omapdss_register_display); - -void omapdss_unregister_display(struct omap_dss_device *dssdev) -{ - mutex_lock(&panel_list_mutex); - list_del(&dssdev->panel_list); - mutex_unlock(&panel_list_mutex); + dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL, + "display%u", id); } -EXPORT_SYMBOL(omapdss_unregister_display); +EXPORT_SYMBOL_GPL(omapdss_display_init); -bool omapdss_component_is_display(struct device_node *node) +struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output) { - struct omap_dss_device *dssdev; - bool found = false; - - mutex_lock(&panel_list_mutex); - list_for_each_entry(dssdev, &panel_list, panel_list) { - if (dssdev->dev->of_node == node) { - found = true; - goto out; - } - } -out: - mutex_unlock(&panel_list_mutex); - return found; -} -EXPORT_SYMBOL(omapdss_component_is_display); - -struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev) -{ - if (!try_module_get(dssdev->owner)) - return NULL; - - if (get_device(dssdev->dev) == NULL) { - module_put(dssdev->owner); - return NULL; - } - - return dssdev; -} -EXPORT_SYMBOL(omap_dss_get_device); - -void omap_dss_put_device(struct omap_dss_device *dssdev) -{ - put_device(dssdev->dev); - module_put(dssdev->owner); -} -EXPORT_SYMBOL(omap_dss_put_device); - -/* - * ref count of the found device is incremented. - * ref count of from-device is decremented. - */ -struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) -{ - struct list_head *l; - struct omap_dss_device *dssdev; - - mutex_lock(&panel_list_mutex); - - if (list_empty(&panel_list)) { - dssdev = NULL; - goto out; - } - - if (from == NULL) { - dssdev = list_first_entry(&panel_list, struct omap_dss_device, - panel_list); - omap_dss_get_device(dssdev); - goto out; - } - - omap_dss_put_device(from); - - list_for_each(l, &panel_list) { - dssdev = list_entry(l, struct omap_dss_device, panel_list); - if (dssdev == from) { - if (list_is_last(l, &panel_list)) { - dssdev = NULL; - goto out; - } - - dssdev = list_entry(l->next, struct omap_dss_device, - panel_list); - omap_dss_get_device(dssdev); - goto out; - } - } - - WARN(1, "'from' dssdev not found\n"); + while (output->next) + output = output->next; - dssdev = NULL; -out: - mutex_unlock(&panel_list_mutex); - return dssdev; + return omapdss_device_get(output); } -EXPORT_SYMBOL(omap_dss_get_next_device); +EXPORT_SYMBOL_GPL(omapdss_display_get); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 9fcc50217133..ca4f3c4c6318 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -39,6 +39,7 @@ struct dpi_data { struct platform_device *pdev; enum dss_model dss_model; struct dss_device *dss; + unsigned int id; struct regulator *vdds_dsi_reg; enum dss_clk_source clk_src; @@ -346,10 +347,9 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req, static int dpi_set_mode(struct dpi_data *dpi) { - struct videomode *vm = &dpi->vm; + const struct videomode *vm = &dpi->vm; int lck_div = 0, pck_div = 0; unsigned long fck = 0; - unsigned long pck; int r = 0; if (dpi->pll) @@ -361,17 +361,6 @@ static int dpi_set_mode(struct dpi_data *dpi) if (r) return r; - pck = fck / lck_div / pck_div; - - if (pck != vm->pixelclock) { - DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n", - vm->pixelclock, pck); - - vm->pixelclock = pck; - } - - dss_mgr_set_timings(&dpi->output, vm); - return 0; } @@ -413,7 +402,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel); + r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel); if (r) goto err_src_sel; @@ -478,7 +467,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) } static void dpi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); @@ -491,23 +480,10 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, mutex_unlock(&dpi->lock); } -static void dpi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - - mutex_lock(&dpi->lock); - - *vm = dpi->vm; - - mutex_unlock(&dpi->lock); -} - static int dpi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - enum omap_channel channel = dpi->output.dispc_channel; int lck_div, pck_div; unsigned long fck; unsigned long pck; @@ -517,9 +493,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, if (vm->hactive % 8 != 0) return -EINVAL; - if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm)) - return -EINVAL; - if (vm->pixelclock == 0) return -EINVAL; @@ -562,38 +535,6 @@ static int dpi_verify_pll(struct dss_pll *pll) return 0; } -static const struct soc_device_attribute dpi_soc_devices[] = { - { .machine = "OMAP3[456]*" }, - { .machine = "[AD]M37*" }, - { /* sentinel */ } -}; - -static int dpi_init_regulator(struct dpi_data *dpi) -{ - struct regulator *vdds_dsi; - - /* - * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and - * DM37xx only. - */ - if (!soc_device_match(dpi_soc_devices)) - return 0; - - if (dpi->vdds_dsi_reg) - return 0; - - vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi"); - if (IS_ERR(vdds_dsi)) { - if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) - DSSERR("can't get VDDS_DSI regulator\n"); - return PTR_ERR(vdds_dsi); - } - - dpi->vdds_dsi_reg = vdds_dsi; - - return 0; -} - static void dpi_init_pll(struct dpi_data *dpi) { struct dss_pll *pll; @@ -621,7 +562,7 @@ static void dpi_init_pll(struct dpi_data *dpi) * the channel in some more dynamic manner, or get the channel as a user * parameter. */ -static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num) +static enum omap_channel dpi_get_channel(struct dpi_data *dpi) { switch (dpi->dss_model) { case DSS_MODEL_OMAP2: @@ -629,7 +570,7 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num) return OMAP_DSS_CHANNEL_LCD; case DSS_MODEL_DRA7: - switch (port_num) { + switch (dpi->id) { case 2: return OMAP_DSS_CHANNEL_LCD3; case 1: @@ -651,49 +592,31 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num) } } -static int dpi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int dpi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); + struct dpi_data *dpi = dpi_get_data_from_dssdev(dst); int r; - r = dpi_init_regulator(dpi); - if (r) - return r; - dpi_init_pll(dpi); - r = dss_mgr_connect(&dpi->output, dssdev); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - dss_mgr_disconnect(&dpi->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void dpi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void dpi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); - - WARN_ON(dst != dssdev->dst); + dst->dispc_channel_connected = false; - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); - - dss_mgr_disconnect(&dpi->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } -static const struct omapdss_dpi_ops dpi_ops = { +static const struct omap_dss_device_ops dpi_ops = { .connect = dpi_connect, .disconnect = dpi_disconnect, @@ -702,18 +625,16 @@ static const struct omapdss_dpi_ops dpi_ops = { .check_timings = dpi_check_timings, .set_timings = dpi_set_timings, - .get_timings = dpi_get_timings, }; -static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) +static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) { struct omap_dss_device *out = &dpi->output; + u32 port_num = 0; int r; - u32 port_num; - r = of_property_read_u32(port, "reg", &port_num); - if (r) - port_num = 0; + of_property_read_u32(port, "reg", &port_num); + dpi->id = port_num <= 2 ? port_num : 0; switch (port_num) { case 2: @@ -731,12 +652,28 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->dev = &dpi->pdev->dev; out->id = OMAP_DSS_OUTPUT_DPI; out->output_type = OMAP_DISPLAY_TYPE_DPI; - out->dispc_channel = dpi_get_channel(dpi, port_num); - out->port_num = port_num; - out->ops.dpi = &dpi_ops; + out->dispc_channel = dpi_get_channel(dpi); + out->of_ports = BIT(port_num); + out->ops = &dpi_ops; out->owner = THIS_MODULE; - omapdss_register_output(out); + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; } static void dpi_uninit_output_port(struct device_node *port) @@ -744,7 +681,38 @@ static void dpi_uninit_output_port(struct device_node *port) struct dpi_data *dpi = port->data; struct omap_dss_device *out = &dpi->output; - omapdss_unregister_output(out); + if (out->next) + omapdss_device_put(out->next); + omapdss_device_unregister(out); +} + +static const struct soc_device_attribute dpi_soc_devices[] = { + { .machine = "OMAP3[456]*" }, + { .machine = "[AD]M37*" }, + { /* sentinel */ } +}; + +static int dpi_init_regulator(struct dpi_data *dpi) +{ + struct regulator *vdds_dsi; + + /* + * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and + * DM37xx only. + */ + if (!soc_device_match(dpi_soc_devices)) + return 0; + + vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi"); + if (IS_ERR(vdds_dsi)) { + if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) + DSSERR("can't get VDDS_DSI regulator\n"); + return PTR_ERR(vdds_dsi); + } + + dpi->vdds_dsi_reg = vdds_dsi; + + return 0; } int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, @@ -764,15 +732,14 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, return 0; r = of_property_read_u32(ep, "data-lines", &datalines); + of_node_put(ep); if (r) { DSSERR("failed to parse datalines\n"); - goto err_datalines; + return r; } dpi->data_lines = datalines; - of_node_put(ep); - dpi->pdev = pdev; dpi->dss_model = dss_model; dpi->dss = dss; @@ -780,14 +747,11 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev, mutex_init(&dpi->lock); - dpi_init_output_port(dpi, port); - - return 0; - -err_datalines: - of_node_put(ep); + r = dpi_init_regulator(dpi); + if (r) + return r; - return r; + return dpi_init_output_port(dpi, port); } void dpi_uninit_port(struct device_node *port) diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 74467b308721..394c129cfb3b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -403,6 +403,7 @@ struct dsi_data { struct { struct dss_debugfs_entry *irqs; struct dss_debugfs_entry *regs; + struct dss_debugfs_entry *clks; } debugfs; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -442,27 +443,6 @@ static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev) return dev_get_drvdata(dssdev->dev); } -static struct dsi_data *dsi_get_dsi_from_id(int module) -{ - struct omap_dss_device *out; - enum omap_dss_output_id id; - - switch (module) { - case 0: - id = OMAP_DSS_OUTPUT_DSI1; - break; - case 1: - id = OMAP_DSS_OUTPUT_DSI2; - break; - default: - return NULL; - } - - out = omap_dss_get_output(id); - - return out ? to_dsi_data(out) : NULL; -} - static inline void dsi_write_reg(struct dsi_data *dsi, const struct dsi_reg idx, u32 val) { @@ -1157,26 +1137,6 @@ static void dsi_runtime_put(struct dsi_data *dsi) WARN_ON(r < 0 && r != -ENOSYS); } -static int dsi_regulator_init(struct dsi_data *dsi) -{ - struct regulator *vdds_dsi; - - if (dsi->vdds_dsi_reg != NULL) - return 0; - - vdds_dsi = devm_regulator_get(dsi->dev, "vdd"); - - if (IS_ERR(vdds_dsi)) { - if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER) - DSSERR("can't get DSI VDD regulator\n"); - return PTR_ERR(vdds_dsi); - } - - dsi->vdds_dsi_reg = vdds_dsi; - - return 0; -} - static void _dsi_print_reset_status(struct dsi_data *dsi) { u32 l; @@ -1373,10 +1333,6 @@ static int dsi_pll_enable(struct dss_pll *pll) DSSDBG("PLL init\n"); - r = dsi_regulator_init(dsi); - if (r) - return r; - r = dsi_runtime_get(dsi); if (r) return r; @@ -1448,8 +1404,9 @@ static void dsi_pll_disable(struct dss_pll *pll) dsi_pll_uninit(dsi, true); } -static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) { + struct dsi_data *dsi = p; struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; @@ -1459,7 +1416,7 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module); if (dsi_runtime_get(dsi)) - return; + return 0; seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); @@ -1503,23 +1460,14 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s) seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk); dsi_runtime_put(dsi); -} - -void dsi_dump_clocks(struct seq_file *s) -{ - struct dsi_data *dsi; - int i; - for (i = 0; i < MAX_NUM_DSI; i++) { - dsi = dsi_get_dsi_from_id(i); - if (dsi) - dsi_dump_dsi_clocks(dsi, s); - } + return 0; } #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS -static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) { + struct dsi_data *dsi = p; unsigned long flags; struct dsi_irq_stats stats; @@ -1603,33 +1551,20 @@ static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s) PIS(ULPSACTIVENOT_ALL0); PIS(ULPSACTIVENOT_ALL1); #undef PIS -} -static int dsi1_dump_irqs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(0); - - dsi_dump_dsi_irqs(dsi, s); - return 0; -} - -static int dsi2_dump_irqs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(1); - - dsi_dump_dsi_irqs(dsi, s); return 0; } #endif -static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s) +static int dsi_dump_dsi_regs(struct seq_file *s, void *p) { -#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r)) + struct dsi_data *dsi = p; if (dsi_runtime_get(dsi)) - return; + return 0; dsi_enable_scp_clk(dsi); +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r)) DUMPREG(DSI_REVISION); DUMPREG(DSI_SYSCONFIG); DUMPREG(DSI_SYSSTATUS); @@ -1699,25 +1634,11 @@ static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s) DUMPREG(DSI_PLL_GO); DUMPREG(DSI_PLL_CONFIGURATION1); DUMPREG(DSI_PLL_CONFIGURATION2); +#undef DUMPREG dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); -#undef DUMPREG -} - -static int dsi1_dump_regs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(0); - - dsi_dump_dsi_regs(dsi, s); - return 0; -} - -static int dsi2_dump_regs(struct seq_file *s, void *p) -{ - struct dsi_data *dsi = dsi_get_dsi_from_id(1); - dsi_dump_dsi_regs(dsi, s); return 0; } @@ -3344,7 +3265,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi) if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { int bpp = dsi_get_pixel_size(dsi->pix_fmt); - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; /* * Don't use line buffers if width is greater than the video * port's line buffer size @@ -3473,7 +3394,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi) int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat; int tclk_trail, ths_exit, exiths_clk; bool ddr_alwon; - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; int bpp = dsi_get_pixel_size(dsi->pix_fmt); int ndl = dsi->num_lanes_used - 1; int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; @@ -3723,7 +3644,7 @@ static void dsi_proto_timings(struct dsi_data *dsi) int vbp = dsi->vm_timings.vbp; int window_sync = dsi->vm_timings.window_sync; bool hsync_end; - struct videomode *vm = &dsi->vm; + const struct videomode *vm = &dsi->vm; int bpp = dsi_get_pixel_size(dsi->pix_fmt); int tl, t_he, width_bytes; @@ -3980,8 +3901,6 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi) msecs_to_jiffies(250)); BUG_ON(r == 0); - dss_mgr_set_timings(&dsi->output, &dsi->vm); - dss_mgr_start_update(&dsi->output); if (dsi->te_enabled) { @@ -4123,24 +4042,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi) dsi->mgr_config.fifohandcheck = false; } - /* - * override interlace, logic level and edge related parameters in - * videomode with default values - */ - dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED; - dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; - dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; - dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; - dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; - dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; - - dss_mgr_set_timings(&dsi->output, &dsi->vm); - r = dsi_configure_dispc_clocks(dsi); if (r) goto err1; @@ -4840,6 +4741,19 @@ static int dsi_set_config(struct omap_dss_device *dssdev, dsi->user_dispc_cinfo = ctx.dispc_cinfo; dsi->vm = ctx.vm; + + /* + * override interlace, logic level and edge related parameters in + * videomode with default values + */ + dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED; + dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; + dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; + dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; + + dss_mgr_set_timings(&dsi->output, &dsi->vm); + dsi->vm_timings = ctx.dsi_vm; mutex_unlock(&dsi->lock); @@ -4960,163 +4874,71 @@ static int dsi_get_clocks(struct dsi_data *dsi) return 0; } -static int dsi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int dsi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct dsi_data *dsi = to_dsi_data(dssdev); int r; - r = dsi_regulator_init(dsi); - if (r) - return r; - - r = dss_mgr_connect(&dsi->output, dssdev); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dssdev->name); - dss_mgr_disconnect(&dsi->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void dsi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void dsi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct dsi_data *dsi = to_dsi_data(dssdev); + dst->dispc_channel_connected = false; - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); - - dss_mgr_disconnect(&dsi->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } -static const struct omapdss_dsi_ops dsi_ops = { +static const struct omap_dss_device_ops dsi_ops = { .connect = dsi_connect, .disconnect = dsi_disconnect, - - .bus_lock = dsi_bus_lock, - .bus_unlock = dsi_bus_unlock, - .enable = dsi_display_enable, - .disable = dsi_display_disable, - - .enable_hs = dsi_vc_enable_hs, - - .configure_pins = dsi_configure_pins, - .set_config = dsi_set_config, - - .enable_video_output = dsi_enable_video_output, - .disable_video_output = dsi_disable_video_output, - - .update = dsi_update, - - .enable_te = dsi_enable_te, - - .request_vc = dsi_request_vc, - .set_vc_id = dsi_set_vc_id, - .release_vc = dsi_release_vc, - - .dcs_write = dsi_vc_dcs_write, - .dcs_write_nosync = dsi_vc_dcs_write_nosync, - .dcs_read = dsi_vc_dcs_read, - - .gen_write = dsi_vc_generic_write, - .gen_write_nosync = dsi_vc_generic_write_nosync, - .gen_read = dsi_vc_generic_read, - - .bta_sync = dsi_vc_send_bta_sync, - - .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, -}; - -static void dsi_init_output(struct dsi_data *dsi) -{ - struct omap_dss_device *out = &dsi->output; - - out->dev = dsi->dev; - out->id = dsi->module_id == 0 ? - OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; - - out->output_type = OMAP_DISPLAY_TYPE_DSI; - out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; - out->dispc_channel = dsi_get_channel(dsi); - out->ops.dsi = &dsi_ops; - out->owner = THIS_MODULE; - omapdss_register_output(out); -} + .dsi = { + .bus_lock = dsi_bus_lock, + .bus_unlock = dsi_bus_unlock, -static void dsi_uninit_output(struct dsi_data *dsi) -{ - struct omap_dss_device *out = &dsi->output; + .disable = dsi_display_disable, - omapdss_unregister_output(out); -} + .enable_hs = dsi_vc_enable_hs, -static int dsi_probe_of(struct dsi_data *dsi) -{ - struct device_node *node = dsi->dev->of_node; - struct property *prop; - u32 lane_arr[10]; - int len, num_pins; - int r, i; - struct device_node *ep; - struct omap_dsi_pin_config pin_cfg; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; + .configure_pins = dsi_configure_pins, + .set_config = dsi_set_config, - prop = of_find_property(ep, "lanes", &len); - if (prop == NULL) { - dev_err(dsi->dev, "failed to find lane data\n"); - r = -EINVAL; - goto err; - } + .enable_video_output = dsi_enable_video_output, + .disable_video_output = dsi_disable_video_output, - num_pins = len / sizeof(u32); + .update = dsi_update, - if (num_pins < 4 || num_pins % 2 != 0 || - num_pins > dsi->num_lanes_supported * 2) { - dev_err(dsi->dev, "bad number of lanes\n"); - r = -EINVAL; - goto err; - } + .enable_te = dsi_enable_te, - r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); - if (r) { - dev_err(dsi->dev, "failed to read lane data\n"); - goto err; - } + .request_vc = dsi_request_vc, + .set_vc_id = dsi_set_vc_id, + .release_vc = dsi_release_vc, - pin_cfg.num_pins = num_pins; - for (i = 0; i < num_pins; ++i) - pin_cfg.pins[i] = (int)lane_arr[i]; + .dcs_write = dsi_vc_dcs_write, + .dcs_write_nosync = dsi_vc_dcs_write_nosync, + .dcs_read = dsi_vc_dcs_read, - r = dsi_configure_pins(&dsi->output, &pin_cfg); - if (r) { - dev_err(dsi->dev, "failed to configure pins"); - goto err; - } + .gen_write = dsi_vc_generic_write, + .gen_write_nosync = dsi_vc_generic_write_nosync, + .gen_read = dsi_vc_generic_read, - of_node_put(ep); + .bta_sync = dsi_vc_send_bta_sync, - return 0; + .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, + }, +}; -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * PLL + */ static const struct dss_pll_ops dsi_pll_ops = { .enable = dsi_pll_enable, @@ -5231,7 +5053,175 @@ static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi) return 0; } -/* DSI1 HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + +static int dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct dss_device *dss = dss_get_device(master); + struct dsi_data *dsi = dev_get_drvdata(dev); + char name[10]; + u32 rev; + int r; + + dsi->dss = dss; + + dsi_init_pll_data(dss, dsi); + + r = dsi_runtime_get(dsi); + if (r) + return r; + + rev = dsi_read_reg(dsi, DSI_REVISION); + dev_dbg(dev, "OMAP DSI rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dsi->line_buffer_size = dsi_get_line_buf_size(dsi); + + dsi_runtime_put(dsi); + + snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); + dsi->debugfs.regs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_regs, &dsi); +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); + dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_irqs, &dsi); +#endif + snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); + dsi->debugfs.clks = dss_debugfs_create_file(dss, name, + dsi_dump_dsi_clocks, &dsi); + + return 0; +} + +static void dsi_unbind(struct device *dev, struct device *master, void *data) +{ + struct dsi_data *dsi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(dsi->debugfs.clks); + dss_debugfs_remove_file(dsi->debugfs.irqs); + dss_debugfs_remove_file(dsi->debugfs.regs); + + of_platform_depopulate(dev); + + WARN_ON(dsi->scp_clk_refcount > 0); + + dss_pll_unregister(&dsi->pll); +} + +static const struct component_ops dsi_component_ops = { + .bind = dsi_bind, + .unbind = dsi_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static int dsi_init_output(struct dsi_data *dsi) +{ + struct omap_dss_device *out = &dsi->output; + int r; + + out->dev = dsi->dev; + out->id = dsi->module_id == 0 ? + OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; + + out->output_type = OMAP_DISPLAY_TYPE_DSI; + out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; + out->dispc_channel = dsi_get_channel(dsi); + out->ops = &dsi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE + | DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_NEGEDGE; + + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; +} + +static void dsi_uninit_output(struct dsi_data *dsi) +{ + struct omap_dss_device *out = &dsi->output; + + if (out->next) + omapdss_device_put(out->next); + omapdss_device_unregister(out); +} + +static int dsi_probe_of(struct dsi_data *dsi) +{ + struct device_node *node = dsi->dev->of_node; + struct property *prop; + u32 lane_arr[10]; + int len, num_pins; + int r, i; + struct device_node *ep; + struct omap_dsi_pin_config pin_cfg; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + prop = of_find_property(ep, "lanes", &len); + if (prop == NULL) { + dev_err(dsi->dev, "failed to find lane data\n"); + r = -EINVAL; + goto err; + } + + num_pins = len / sizeof(u32); + + if (num_pins < 4 || num_pins % 2 != 0 || + num_pins > dsi->num_lanes_supported * 2) { + dev_err(dsi->dev, "bad number of lanes\n"); + r = -EINVAL; + goto err; + } + + r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins); + if (r) { + dev_err(dsi->dev, "failed to read lane data\n"); + goto err; + } + + pin_cfg.num_pins = num_pins; + for (i = 0; i < num_pins; ++i) + pin_cfg.pins[i] = (int)lane_arr[i]; + + r = dsi_configure_pins(&dsi->output, &pin_cfg); + if (r) { + dev_err(dsi->dev, "failed to configure pins"); + goto err; + } + + of_node_put(ep); + + return 0; + +err: + of_node_put(ep); + return r; +} + static const struct dsi_of_data dsi_of_data_omap34xx = { .model = DSI_MODEL_OMAP3, .pll_hw = &dss_omap3_dsi_pll_hw, @@ -5297,23 +5287,21 @@ static const struct soc_device_attribute dsi_soc_devices[] = { { /* sentinel */ } }; -static int dsi_bind(struct device *dev, struct device *master, void *data) +static int dsi_probe(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev); - struct dss_device *dss = dss_get_device(master); const struct soc_device_attribute *soc; const struct dsi_module_id_data *d; - u32 rev; - int r, i; + struct device *dev = &pdev->dev; struct dsi_data *dsi; struct resource *dsi_mem; struct resource *res; + unsigned int i; + int r; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; - dsi->dss = dss; dsi->dev = dev; dev_set_drvdata(dev, dsi); @@ -5364,6 +5352,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) return r; } + dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd"); + if (IS_ERR(dsi->vdds_dsi_reg)) { + if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER) + DSSERR("can't get DSI VDD regulator\n"); + return PTR_ERR(dsi->vdds_dsi_reg); + } + soc = soc_device_match(dsi_soc_devices); if (soc) dsi->data = soc->data; @@ -5410,18 +5405,8 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) if (r) return r; - dsi_init_pll_data(dss, dsi); - pm_runtime_enable(dev); - r = dsi_runtime_get(dsi); - if (r) - goto err_runtime_get; - - rev = dsi_read_reg(dsi, DSI_REVISION); - dev_dbg(dev, "OMAP DSI rev %d.%d\n", - FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - /* DSI on OMAP3 doesn't have register DSI_GNQ, set number * of data to 3 by default */ if (dsi->data->quirks & DSI_QUIRK_GNQ) @@ -5430,88 +5415,48 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) else dsi->num_lanes_supported = 3; - dsi->line_buffer_size = dsi_get_line_buf_size(dsi); - - dsi_init_output(dsi); + r = dsi_init_output(dsi); + if (r) + goto err_pm_disable; r = dsi_probe_of(dsi); if (r) { DSSERR("Invalid DSI DT data\n"); - goto err_probe_of; + goto err_uninit_output; } r = of_platform_populate(dev->of_node, NULL, NULL, dev); if (r) DSSERR("Failed to populate DSI child devices: %d\n", r); - dsi_runtime_put(dsi); - - if (dsi->module_id == 0) - dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs", - dsi1_dump_regs, - &dsi); - else - dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs", - dsi2_dump_regs, - &dsi); -#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - if (dsi->module_id == 0) - dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs", - dsi1_dump_irqs, - &dsi); - else - dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs", - dsi2_dump_irqs, - &dsi); -#endif + r = component_add(&pdev->dev, &dsi_component_ops); + if (r) + goto err_uninit_output; return 0; -err_probe_of: +err_uninit_output: dsi_uninit_output(dsi); - dsi_runtime_put(dsi); - -err_runtime_get: +err_pm_disable: pm_runtime_disable(dev); return r; } -static void dsi_unbind(struct device *dev, struct device *master, void *data) +static int dsi_remove(struct platform_device *pdev) { - struct dsi_data *dsi = dev_get_drvdata(dev); + struct dsi_data *dsi = platform_get_drvdata(pdev); - dss_debugfs_remove_file(dsi->debugfs.irqs); - dss_debugfs_remove_file(dsi->debugfs.regs); - - of_platform_depopulate(dev); - - WARN_ON(dsi->scp_clk_refcount > 0); - - dss_pll_unregister(&dsi->pll); + component_del(&pdev->dev, &dsi_component_ops); dsi_uninit_output(dsi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { regulator_disable(dsi->vdds_dsi_reg); dsi->vdds_dsi_enabled = false; } -} -static const struct component_ops dsi_component_ops = { - .bind = dsi_bind, - .unbind = dsi_unbind, -}; - -static int dsi_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &dsi_component_ops); -} - -static int dsi_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &dsi_component_ops); return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index 4602a79c6c44..0422597ac6b0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -21,7 +21,8 @@ #include "omapdss.h" -struct device_node *dss_of_port_get_parent_device(struct device_node *port) +static struct device_node * +dss_of_port_get_parent_device(struct device_node *port) { struct device_node *np; int i; @@ -45,41 +46,37 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) return NULL; } -u32 dss_of_port_get_port_number(struct device_node *port) -{ - int r; - u32 reg; - - r = of_property_read_u32(port, "reg", ®); - if (r) - reg = 0; - - return reg; -} - struct omap_dss_device * -omapdss_of_find_source_for_first_ep(struct device_node *node) +omapdss_of_find_connected_device(struct device_node *node, unsigned int port) { - struct device_node *ep; + struct device_node *src_node; struct device_node *src_port; + struct device_node *ep; struct omap_dss_device *src; + u32 port_number = 0; - ep = of_graph_get_endpoint_by_regs(node, 0, 0); + /* Get the endpoint... */ + ep = of_graph_get_endpoint_by_regs(node, port, 0); if (!ep) - return ERR_PTR(-EINVAL); + return NULL; + /* ... and its remote port... */ src_port = of_graph_get_remote_port(ep); - if (!src_port) { - of_node_put(ep); - return ERR_PTR(-EINVAL); - } - of_node_put(ep); + if (!src_port) + return NULL; - src = omap_dss_find_output_by_port_node(src_port); - + /* ... and the remote port's number and parent... */ + of_property_read_u32(src_port, "reg", &port_number); + src_node = dss_of_port_get_parent_device(src_port); of_node_put(src_port); + if (!src_node) + return ERR_PTR(-EINVAL); + + /* ... and finally the connected device. */ + src = omapdss_find_device_by_port(src_node, port_number); + of_node_put(src_node); return src ? src : ERR_PTR(-EPROBE_DEFER); } -EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep); +EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index cb80ddaa19d2..19fc4dfc429e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -394,9 +394,6 @@ static int dss_debug_dump_clocks(struct seq_file *s, void *p) dss_dump_clocks(dss, s); dispc_dump_clocks(dss->dispc, s); -#ifdef CONFIG_OMAP2_DSS_DSI - dsi_dump_clocks(s); -#endif return 0; } @@ -681,12 +678,6 @@ unsigned long dss_get_max_fck_rate(struct dss_device *dss) return dss->feat->fck_freq_max; } -enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss, - enum omap_channel channel) -{ - return dss->feat->outputs[channel]; -} - static int dss_setup_default_clock(struct dss_device *dss) { unsigned long max_dss_fck, prate; @@ -1183,7 +1174,8 @@ static int dss_init_ports(struct dss_device *dss) struct platform_device *pdev = dss->pdev; struct device_node *parent = pdev->dev.of_node; struct device_node *port; - int i; + unsigned int i; + int r; for (i = 0; i < dss->feat->num_ports; i++) { port = of_graph_get_port_by_id(parent, i); @@ -1192,11 +1184,17 @@ static int dss_init_ports(struct dss_device *dss) switch (dss->feat->ports[i]) { case OMAP_DISPLAY_TYPE_DPI: - dpi_init_port(dss, pdev, port, dss->feat->model); + r = dpi_init_port(dss, pdev, port, dss->feat->model); + if (r) + return r; break; + case OMAP_DISPLAY_TYPE_SDI: - sdi_init_port(dss, pdev, port); + r = sdi_init_port(dss, pdev, port); + if (r) + return r; break; + default: break; } @@ -1315,6 +1313,7 @@ static const struct soc_device_attribute dss_soc_devices[] = { static int dss_bind(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); + struct platform_device *drm_pdev; int r; r = component_bind_all(dev, NULL); @@ -1323,14 +1322,25 @@ static int dss_bind(struct device *dev) pm_set_vt_switch(0); - omapdss_gather_components(dev); omapdss_set_dss(dss); + drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0); + if (IS_ERR(drm_pdev)) { + component_unbind_all(dev, NULL); + return PTR_ERR(drm_pdev); + } + + dss->drm_pdev = drm_pdev; + return 0; } static void dss_unbind(struct device *dev) { + struct dss_device *dss = dev_get_drvdata(dev); + + platform_device_unregister(dss->drm_pdev); + omapdss_set_dss(NULL); component_unbind_all(dev, NULL); @@ -1474,6 +1484,8 @@ static int dss_probe(struct platform_device *pdev) dss); /* Add all the child devices as components. */ + omapdss_gather_components(&pdev->dev); + device_for_each_child(&pdev->dev, &match, dss_add_child_component); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); @@ -1539,12 +1551,9 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); - for_each_dss_dev(dssdev) { - if (!dssdev->driver) - continue; - + for_each_dss_display(dssdev) { if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - dssdev->driver->disable(dssdev); + dssdev->ops->disable(dssdev); } } diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 38302631b64b..37790c363128 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -238,6 +238,8 @@ struct dss_device { struct regmap *syscon_pll_ctrl; u32 syscon_pll_ctrl_offset; + struct platform_device *drm_pdev; + struct clk *parent_clk; struct clk *dss_clk; unsigned long dss_clk_rate; @@ -267,6 +269,8 @@ struct dss_device { struct dispc_device *dispc; const struct dispc_ops *dispc_ops; + const struct dss_mgr_ops *mgr_ops; + struct omap_drm_private *mgr_ops_priv; }; /* core */ @@ -313,8 +317,6 @@ void dss_runtime_put(struct dss_device *dss); unsigned long dss_get_dispc_clk_rate(struct dss_device *dss); unsigned long dss_get_max_fck_rate(struct dss_device *dss); -enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss, - enum omap_channel channel); int dss_dpi_select_source(struct dss_device *dss, int port, enum omap_channel channel); void dss_select_hdmi_venc_clk_source(struct dss_device *dss, @@ -374,8 +376,6 @@ static inline void sdi_uninit_port(struct device_node *port) #ifdef CONFIG_OMAP2_DSS_DSI -void dsi_dump_clocks(struct seq_file *s); - void dsi_irq_handler(void); #endif @@ -417,9 +417,6 @@ bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq, unsigned long pck_min, unsigned long pck_max, dispc_div_calc_func func, void *data); -bool dispc_mgr_timings_ok(struct dispc_device *dispc, - enum omap_channel channel, - const struct videomode *vm); int dispc_calc_clock_rates(struct dispc_device *dispc, unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 3aeb4cabd59f..7f0dc490a31d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -313,13 +313,13 @@ void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask); int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val); int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val); void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, - struct hdmi_video_format *video_fmt); + const struct hdmi_video_format *video_fmt); void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, - struct videomode *vm); + const struct videomode *vm); void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, - struct videomode *vm); + const struct videomode *vm); void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, - struct videomode *vm, struct hdmi_config *param); + struct videomode *vm, const struct hdmi_config *param); int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp, unsigned int version); phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 5879f45f6fc9..cf6230eac31a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -108,26 +108,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int hdmi_init_regulator(struct omap_hdmi *hdmi) -{ - struct regulator *reg; - - if (hdmi->vdda_reg != NULL) - return 0; - - reg = devm_regulator_get(&hdmi->pdev->dev, "vdda"); - - if (IS_ERR(reg)) { - if (PTR_ERR(reg) != -EPROBE_DEFER) - DSSERR("can't get VDDA regulator\n"); - return PTR_ERR(reg); - } - - hdmi->vdda_reg = reg; - - return 0; -} - static int hdmi_power_on_core(struct omap_hdmi *hdmi) { int r; @@ -174,7 +154,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi) static int hdmi_power_on_full(struct omap_hdmi *hdmi) { int r; - struct videomode *vm; + const struct videomode *vm; struct hdmi_wp_data *wp = &hdmi->wp; struct dss_pll_clock_info hdmi_cinfo = { 0 }; unsigned int pc; @@ -227,9 +207,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi) hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg); - /* tv size */ - dss_mgr_set_timings(&hdmi->output, vm); - r = dss_mgr_enable(&hdmi->output); if (r) goto err_mgr_enable; @@ -271,19 +248,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timing(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm)) - return -EINVAL; - - return 0; -} - -static void hdmi_display_set_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static void hdmi_display_set_timings(struct omap_dss_device *dssdev, + const struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -296,14 +262,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, mutex_unlock(&hdmi->lock); } -static void hdmi_display_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - *vm = hdmi->cfg.vm; -} - static int hdmi_dump_regs(struct seq_file *s, void *p) { struct omap_hdmi *hdmi = s->private; @@ -456,44 +414,25 @@ void hdmi4_core_disable(struct hdmi_core_data *core) mutex_unlock(&hdmi->lock); } -static int hdmi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int hdmi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); int r; - r = hdmi_init_regulator(hdmi); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = dss_mgr_connect(&hdmi->output, dssdev); - if (r) - return r; - - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - dss_mgr_disconnect(&hdmi->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void hdmi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void hdmi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - WARN_ON(dst != dssdev->dst); + dst->dispc_channel_connected = false; - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); - - dss_mgr_disconnect(&hdmi->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } static int hdmi_read_edid(struct omap_dss_device *dssdev, @@ -548,69 +487,28 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, return 0; } -static const struct omapdss_hdmi_ops hdmi_ops = { +static const struct omap_dss_device_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timing, - .set_timings = hdmi_display_set_timing, - .get_timings = hdmi_display_get_timings, + .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, - .lost_hotplug = hdmi_lost_hotplug, - .set_infoframe = hdmi_set_infoframe, - .set_hdmi_mode = hdmi_set_hdmi_mode, -}; - -static void hdmi_init_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - out->dev = &hdmi->pdev->dev; - out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; - out->name = "hdmi.0"; - out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.hdmi = &hdmi_ops; - out->owner = THIS_MODULE; - - omapdss_register_output(out); -} - -static void hdmi_uninit_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - omapdss_unregister_output(out); -} - -static int hdmi_probe_of(struct omap_hdmi *hdmi) -{ - struct platform_device *pdev = hdmi->pdev; - struct device_node *node = pdev->dev.of_node; - struct device_node *ep; - int r; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; - - r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); - if (r) - goto err; - of_node_put(ep); - return 0; + .hdmi = { + .lost_hotplug = hdmi_lost_hotplug, + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, + }, +}; -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * Audio Callbacks + */ -/* Audio callbacks */ static int hdmi_audio_startup(struct device *dev, void (*abort_cb)(struct device *dev)) { @@ -725,27 +623,143 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi) return 0; } -/* HDMI HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + static int hdmi4_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); struct dss_device *dss = dss_get_device(master); - struct omap_hdmi *hdmi; + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + int r; + + hdmi->dss = dss; + + r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); + if (r) + return r; + + r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); + if (r) + goto err_pll_uninit; + + r = hdmi_audio_register(hdmi); + if (r) { + DSSERR("Registering HDMI audio failed\n"); + goto err_cec_uninit; + } + + hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, + hdmi); + + return 0; + +err_cec_uninit: + hdmi4_cec_uninit(&hdmi->core); +err_pll_uninit: + hdmi_pll_uninit(&hdmi->pll); + return r; +} + +static void hdmi4_unbind(struct device *dev, struct device *master, void *data) +{ + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(hdmi->debugfs); + + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); + + hdmi4_cec_uninit(&hdmi->core); + hdmi_pll_uninit(&hdmi->pll); +} + +static const struct component_ops hdmi4_component_ops = { + .bind = hdmi4_bind, + .unbind = hdmi4_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static int hdmi4_init_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; int r; + + out->dev = &hdmi->pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops = &hdmi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; + + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; +} + +static void hdmi4_uninit_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + if (out->next) + omapdss_device_put(out->next); + omapdss_device_unregister(out); +} + +static int hdmi4_probe_of(struct omap_hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; + int r; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); + of_node_put(ep); + return r; +} + +static int hdmi4_probe(struct platform_device *pdev) +{ + struct omap_hdmi *hdmi; int irq; + int r; hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; hdmi->pdev = pdev; - hdmi->dss = dss; + dev_set_drvdata(&pdev->dev, hdmi); mutex_init(&hdmi->lock); spin_lock_init(&hdmi->audio_playing_lock); - r = hdmi_probe_of(hdmi); + r = hdmi4_probe_of(hdmi); if (r) goto err_free; @@ -753,27 +767,19 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp); - if (r) - goto err_free; - r = hdmi_phy_init(pdev, &hdmi->phy, 4); if (r) - goto err_pll; + goto err_free; r = hdmi4_core_init(pdev, &hdmi->core); if (r) - goto err_pll; - - r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp); - if (r) - goto err_pll; + goto err_free; irq = platform_get_irq(pdev, 0); if (irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto err_pll; + goto err_free; } r = devm_request_threaded_irq(&pdev->dev, irq, @@ -781,66 +787,49 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) IRQF_ONESHOT, "OMAP HDMI", hdmi); if (r) { DSSERR("HDMI IRQ request failed\n"); - goto err_pll; + goto err_free; } - pm_runtime_enable(&pdev->dev); + hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(hdmi->vdda_reg)) { + r = PTR_ERR(hdmi->vdda_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA regulator\n"); + goto err_free; + } - hdmi_init_output(hdmi); + pm_runtime_enable(&pdev->dev); - r = hdmi_audio_register(hdmi); - if (r) { - DSSERR("Registering HDMI audio failed\n"); - hdmi_uninit_output(hdmi); - pm_runtime_disable(&pdev->dev); - return r; - } + r = hdmi4_init_output(hdmi); + if (r) + goto err_pm_disable; - hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, - hdmi); + r = component_add(&pdev->dev, &hdmi4_component_ops); + if (r) + goto err_uninit_output; return 0; -err_pll: - hdmi_pll_uninit(&hdmi->pll); +err_uninit_output: + hdmi4_uninit_output(hdmi); +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_free: kfree(hdmi); return r; } -static void hdmi4_unbind(struct device *dev, struct device *master, void *data) +static int hdmi4_remove(struct platform_device *pdev) { - struct omap_hdmi *hdmi = dev_get_drvdata(dev); - - dss_debugfs_remove_file(hdmi->debugfs); - - if (hdmi->audio_pdev) - platform_device_unregister(hdmi->audio_pdev); + struct omap_hdmi *hdmi = platform_get_drvdata(pdev); - hdmi_uninit_output(hdmi); - - hdmi4_cec_uninit(&hdmi->core); + component_del(&pdev->dev, &hdmi4_component_ops); - hdmi_pll_uninit(&hdmi->pll); + hdmi4_uninit_output(hdmi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(hdmi); -} - -static const struct component_ops hdmi4_component_ops = { - .bind = hdmi4_bind, - .unbind = hdmi4_unbind, -}; - -static int hdmi4_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &hdmi4_component_ops); -} - -static int hdmi4_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &hdmi4_component_ops); return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index ae1a001d1b83..b0e4a7463f8c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -117,24 +117,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int hdmi_init_regulator(struct omap_hdmi *hdmi) -{ - struct regulator *reg; - - if (hdmi->vdda_reg != NULL) - return 0; - - reg = devm_regulator_get(&hdmi->pdev->dev, "vdda"); - if (IS_ERR(reg)) { - DSSERR("can't get VDDA regulator\n"); - return PTR_ERR(reg); - } - - hdmi->vdda_reg = reg; - - return 0; -} - static int hdmi_power_on_core(struct omap_hdmi *hdmi) { int r; @@ -171,7 +153,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi) static int hdmi_power_on_full(struct omap_hdmi *hdmi) { int r; - struct videomode *vm; + const struct videomode *vm; struct dss_pll_clock_info hdmi_cinfo = { 0 }; unsigned int pc; @@ -224,9 +206,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi) hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg); - /* tv size */ - dss_mgr_set_timings(&hdmi->output, vm); - r = dss_mgr_enable(&hdmi->output); if (r) goto err_mgr_enable; @@ -268,19 +247,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) hdmi_power_off_core(hdmi); } -static int hdmi_display_check_timing(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm)) - return -EINVAL; - - return 0; -} - -static void hdmi_display_set_timing(struct omap_dss_device *dssdev, - struct videomode *vm) +static void hdmi_display_set_timings(struct omap_dss_device *dssdev, + const struct videomode *vm) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); @@ -293,14 +261,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev, mutex_unlock(&hdmi->lock); } -static void hdmi_display_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - *vm = hdmi->cfg.vm; -} - static int hdmi_dump_regs(struct seq_file *s, void *p) { struct omap_hdmi *hdmi = s->private; @@ -459,44 +419,25 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) mutex_unlock(&hdmi->lock); } -static int hdmi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int hdmi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); int r; - r = hdmi_init_regulator(hdmi); - if (r) - return r; - - r = dss_mgr_connect(&hdmi->output, dssdev); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - dss_mgr_disconnect(&hdmi->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void hdmi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void hdmi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); - - WARN_ON(dst != dssdev->dst); + dst->dispc_channel_connected = false; - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); - - dss_mgr_disconnect(&hdmi->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } static int hdmi_read_edid(struct omap_dss_device *dssdev, @@ -540,68 +481,27 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev, return 0; } -static const struct omapdss_hdmi_ops hdmi_ops = { +static const struct omap_dss_device_ops hdmi_ops = { .connect = hdmi_connect, .disconnect = hdmi_disconnect, .enable = hdmi_display_enable, .disable = hdmi_display_disable, - .check_timings = hdmi_display_check_timing, - .set_timings = hdmi_display_set_timing, - .get_timings = hdmi_display_get_timings, + .set_timings = hdmi_display_set_timings, .read_edid = hdmi_read_edid, - .set_infoframe = hdmi_set_infoframe, - .set_hdmi_mode = hdmi_set_hdmi_mode, -}; - -static void hdmi_init_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - out->dev = &hdmi->pdev->dev; - out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; - out->name = "hdmi.0"; - out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.hdmi = &hdmi_ops; - out->owner = THIS_MODULE; - - omapdss_register_output(out); -} - -static void hdmi_uninit_output(struct omap_hdmi *hdmi) -{ - struct omap_dss_device *out = &hdmi->output; - - omapdss_unregister_output(out); -} - -static int hdmi_probe_of(struct omap_hdmi *hdmi) -{ - struct platform_device *pdev = hdmi->pdev; - struct device_node *node = pdev->dev.of_node; - struct device_node *ep; - int r; - - ep = of_graph_get_endpoint_by_regs(node, 0, 0); - if (!ep) - return 0; - - r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); - if (r) - goto err; - of_node_put(ep); - return 0; + .hdmi = { + .set_infoframe = hdmi_set_infoframe, + .set_hdmi_mode = hdmi_set_hdmi_mode, + }, +}; -err: - of_node_put(ep); - return r; -} +/* ----------------------------------------------------------------------------- + * Audio Callbacks + */ -/* Audio callbacks */ static int hdmi_audio_startup(struct device *dev, void (*abort_cb)(struct device *dev)) { @@ -722,27 +622,136 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi) return 0; } -/* HDMI HW IP initialisation */ +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ + static int hdmi5_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); struct dss_device *dss = dss_get_device(master); - struct omap_hdmi *hdmi; + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + int r; + + hdmi->dss = dss; + + r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); + if (r) + return r; + + r = hdmi_audio_register(hdmi); + if (r) { + DSSERR("Registering HDMI audio failed %d\n", r); + goto err_pll_uninit; + } + + hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, + hdmi); + + return 0; + +err_pll_uninit: + hdmi_pll_uninit(&hdmi->pll); + return r; +} + +static void hdmi5_unbind(struct device *dev, struct device *master, void *data) +{ + struct omap_hdmi *hdmi = dev_get_drvdata(dev); + + dss_debugfs_remove_file(hdmi->debugfs); + + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); + + hdmi_pll_uninit(&hdmi->pll); +} + +static const struct component_ops hdmi5_component_ops = { + .bind = hdmi5_bind, + .unbind = hdmi5_unbind, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static int hdmi5_init_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + int r; + + out->dev = &hdmi->pdev->dev; + out->id = OMAP_DSS_OUTPUT_HDMI; + out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->name = "hdmi.0"; + out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; + out->ops = &hdmi_ops; + out->owner = THIS_MODULE; + out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; + + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } + + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; +} + +static void hdmi5_uninit_output(struct omap_hdmi *hdmi) +{ + struct omap_dss_device *out = &hdmi->output; + + if (out->next) + omapdss_device_put(out->next); + omapdss_device_unregister(out); +} + +static int hdmi5_probe_of(struct omap_hdmi *hdmi) +{ + struct platform_device *pdev = hdmi->pdev; + struct device_node *node = pdev->dev.of_node; + struct device_node *ep; int r; + + ep = of_graph_get_endpoint_by_regs(node, 0, 0); + if (!ep) + return 0; + + r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy); + of_node_put(ep); + return r; +} + +static int hdmi5_probe(struct platform_device *pdev) +{ + struct omap_hdmi *hdmi; int irq; + int r; hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; hdmi->pdev = pdev; - hdmi->dss = dss; + dev_set_drvdata(&pdev->dev, hdmi); mutex_init(&hdmi->lock); spin_lock_init(&hdmi->audio_playing_lock); - r = hdmi_probe_of(hdmi); + r = hdmi5_probe_of(hdmi); if (r) goto err_free; @@ -750,23 +759,19 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp); - if (r) - goto err_free; - r = hdmi_phy_init(pdev, &hdmi->phy, 5); if (r) - goto err_pll; + goto err_free; r = hdmi5_core_init(pdev, &hdmi->core); if (r) - goto err_pll; + goto err_free; irq = platform_get_irq(pdev, 0); if (irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto err_pll; + goto err_free; } r = devm_request_threaded_irq(&pdev->dev, irq, @@ -774,64 +779,49 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data) IRQF_ONESHOT, "OMAP HDMI", hdmi); if (r) { DSSERR("HDMI IRQ request failed\n"); - goto err_pll; + goto err_free; } - pm_runtime_enable(&pdev->dev); + hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(hdmi->vdda_reg)) { + r = PTR_ERR(hdmi->vdda_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA regulator\n"); + goto err_free; + } - hdmi_init_output(hdmi); + pm_runtime_enable(&pdev->dev); - r = hdmi_audio_register(hdmi); - if (r) { - DSSERR("Registering HDMI audio failed %d\n", r); - hdmi_uninit_output(hdmi); - pm_runtime_disable(&pdev->dev); - return r; - } + r = hdmi5_init_output(hdmi); + if (r) + goto err_pm_disable; - hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, - hdmi); + r = component_add(&pdev->dev, &hdmi5_component_ops); + if (r) + goto err_uninit_output; return 0; -err_pll: - hdmi_pll_uninit(&hdmi->pll); +err_uninit_output: + hdmi5_uninit_output(hdmi); +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_free: kfree(hdmi); return r; } -static void hdmi5_unbind(struct device *dev, struct device *master, void *data) +static int hdmi5_remove(struct platform_device *pdev) { - struct omap_hdmi *hdmi = dev_get_drvdata(dev); - - dss_debugfs_remove_file(hdmi->debugfs); + struct omap_hdmi *hdmi = platform_get_drvdata(pdev); - if (hdmi->audio_pdev) - platform_device_unregister(hdmi->audio_pdev); - - hdmi_uninit_output(hdmi); + component_del(&pdev->dev, &hdmi5_component_ops); - hdmi_pll_uninit(&hdmi->pll); + hdmi5_uninit_output(hdmi); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(hdmi); -} - -static const struct component_ops hdmi5_component_ops = { - .bind = hdmi5_bind, - .unbind = hdmi5_unbind, -}; - -static int hdmi5_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &hdmi5_component_ops); -} - -static int hdmi5_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &hdmi5_component_ops); return 0; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 2282e48574c6..02efabc7ed76 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -287,7 +287,7 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s) } static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, - struct hdmi_config *cfg) + const struct hdmi_config *cfg) { DSSDBG("hdmi_core_init\n"); @@ -325,10 +325,10 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg, /* DSS_HDMI_CORE_VIDEO_CONFIG */ static void hdmi_core_video_config(struct hdmi_core_data *core, - struct hdmi_core_vid_config *cfg) + const struct hdmi_core_vid_config *cfg) { void __iomem *base = core->base; - struct videomode *vm = &cfg->v_fc_config.vm; + const struct videomode *vm = &cfg->v_fc_config.vm; unsigned char r = 0; bool vsync_pol, hsync_pol; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 53bc5f78050c..100efb9f08c6 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -131,7 +131,7 @@ void hdmi_wp_video_stop(struct hdmi_wp_data *wp) } void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, - struct hdmi_video_format *video_fmt) + const struct hdmi_video_format *video_fmt) { u32 l = 0; @@ -144,7 +144,7 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp, } void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, - struct videomode *vm) + const struct videomode *vm) { u32 r; bool vsync_inv, hsync_inv; @@ -164,7 +164,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp, } void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, - struct videomode *vm) + const struct videomode *vm) { u32 timing_h = 0; u32 timing_v = 0; @@ -193,7 +193,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp, } void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt, - struct videomode *vm, struct hdmi_config *param) + struct videomode *vm, const struct hdmi_config *param) { DSSDBG("Enter hdmi_wp_video_init_format\n"); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 14d74adb13fb..1f698a95a94a 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -296,117 +296,14 @@ struct omap_dss_writeback_info { u8 pre_mult_alpha; }; -struct omapdss_dpi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_sdi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_dvi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); -}; - -struct omapdss_atv_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - - int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); - u32 (*get_wss)(struct omap_dss_device *dssdev); -}; - struct omapdss_hdmi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); void (*lost_hotplug)(struct omap_dss_device *dssdev); - bool (*detect)(struct omap_dss_device *dssdev); - - int (*register_hpd_cb)(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data); - void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); - void (*enable_hpd)(struct omap_dss_device *dssdev); - void (*disable_hpd)(struct omap_dss_device *dssdev); - int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); }; struct omapdss_dsi_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - int (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, bool enter_ulps); @@ -457,53 +354,95 @@ struct omapdss_dsi_ops { int channel, u16 plen); }; +struct omap_dss_device_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct videomode *vm); + void (*get_timings)(struct omap_dss_device *dssdev, + struct videomode *vm); + void (*set_timings)(struct omap_dss_device *dssdev, + const struct videomode *vm); + + bool (*detect)(struct omap_dss_device *dssdev); + + void (*register_hpd_cb)(struct omap_dss_device *dssdev, + void (*cb)(void *cb_data, + enum drm_connector_status status), + void *cb_data); + void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + + union { + const struct omapdss_hdmi_ops hdmi; + const struct omapdss_dsi_ops dsi; + }; +}; + +/** + * enum omap_dss_device_ops_flag - Indicates which device ops are supported + * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection + * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations + * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID + */ +enum omap_dss_device_ops_flag { + OMAP_DSS_DEVICE_OP_DETECT = BIT(0), + OMAP_DSS_DEVICE_OP_HPD = BIT(1), + OMAP_DSS_DEVICE_OP_EDID = BIT(2), +}; + +enum omap_dss_device_type { + OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0), + OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1), +}; + struct omap_dss_device { struct kobject kobj; struct device *dev; struct module *owner; - struct list_head panel_list; + struct dss_device *dss; + struct omap_dss_device *src; + struct omap_dss_device *dst; + struct omap_dss_device *next; + + struct list_head list; - /* alias in the form of "display%d" */ - char alias[16]; + unsigned int alias_id; enum omap_display_type type; + /* + * DSS output type that this device generates (for DSS internal devices) + * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE + * for display devices (connectors and panels) and to non-zero value for + * all other devices. + */ enum omap_display_type output_type; - struct { - struct videomode vm; - - enum omap_dss_dsi_pixel_format dsi_pix_fmt; - enum omap_dss_dsi_mode dsi_mode; - } panel; - const char *name; - struct omap_dss_driver *driver; - - union { - const struct omapdss_dpi_ops *dpi; - const struct omapdss_sdi_ops *sdi; - const struct omapdss_dvi_ops *dvi; - const struct omapdss_hdmi_ops *hdmi; - const struct omapdss_atv_ops *atv; - const struct omapdss_dsi_ops *dsi; - } ops; + const struct omap_dss_driver *driver; + const struct omap_dss_device_ops *ops; + unsigned long ops_flags; + unsigned long bus_flags; /* helper variable for driver suspend/resume */ bool activate_after_resume; enum omap_display_caps caps; - struct omap_dss_device *src; - enum omap_dss_display_state state; /* OMAP DSS output specific fields */ - struct list_head list; - /* DISPC channel for this output */ enum omap_channel dispc_channel; bool dispc_channel_connected; @@ -511,24 +450,11 @@ struct omap_dss_device { /* output instance */ enum omap_dss_output_id id; - /* the port number in the DT node */ - int port_num; - - /* dynamic fields */ - struct omap_dss_device *dst; + /* bitmask of port numbers in DT */ + unsigned int of_ports; }; struct omap_dss_driver { - int (*probe)(struct omap_dss_device *); - void (*remove)(struct omap_dss_device *); - - int (*connect)(struct omap_dss_device *dssdev); - void (*disconnect)(struct omap_dss_device *dssdev); - - int (*enable)(struct omap_dss_device *display); - void (*disable)(struct omap_dss_device *display); - int (*run_test)(struct omap_dss_device *display, int test); - int (*update)(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h); int (*sync)(struct omap_dss_device *dssdev); @@ -536,42 +462,12 @@ struct omap_dss_driver { int (*enable_te)(struct omap_dss_device *dssdev, bool enable); int (*get_te)(struct omap_dss_device *dssdev); - u8 (*get_rotate)(struct omap_dss_device *dssdev); - int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); - - bool (*get_mirror)(struct omap_dss_device *dssdev); - int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); - int (*memory_read)(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); - int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*set_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); void (*get_size)(struct omap_dss_device *dssdev, unsigned int *width, unsigned int *height); - - int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); - u32 (*get_wss)(struct omap_dss_device *dssdev); - - int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); - bool (*detect)(struct omap_dss_device *dssdev); - - int (*register_hpd_cb)(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data); - void (*unregister_hpd_cb)(struct omap_dss_device *dssdev); - void (*enable_hpd)(struct omap_dss_device *dssdev); - void (*disable_hpd)(struct omap_dss_device *dssdev); - - int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); - int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, - const struct hdmi_avi_infoframe *avi); }; struct dss_device *omapdss_get_dss(void); @@ -581,27 +477,32 @@ static inline bool omapdss_is_initialized(void) return !!omapdss_get_dss(); } -int omapdss_register_display(struct omap_dss_device *dssdev); -void omapdss_unregister_display(struct omap_dss_device *dssdev); - -struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); -void omap_dss_put_device(struct omap_dss_device *dssdev); -#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) -struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); +#define for_each_dss_display(d) \ + while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL) +void omapdss_display_init(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output); + +void omapdss_device_register(struct omap_dss_device *dssdev); +void omapdss_device_unregister(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); +void omapdss_device_put(struct omap_dss_device *dssdev); +struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, + unsigned int port); +struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, + enum omap_dss_device_type type); +int omapdss_device_connect(struct dss_device *dss, + struct omap_dss_device *src, + struct omap_dss_device *dst); +void omapdss_device_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst); int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); -int omapdss_register_output(struct omap_dss_device *output); -void omapdss_unregister_output(struct omap_dss_device *output); -struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); -struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); -int omapdss_output_set_device(struct omap_dss_device *out, - struct omap_dss_device *dssdev); -int omapdss_output_unset_device(struct omap_dss_device *out); - -struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); +#define for_each_dss_output(d) \ + while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) +int omapdss_output_validate(struct omap_dss_device *out); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); @@ -621,10 +522,7 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) } struct omap_dss_device * -omapdss_of_find_source_for_first_ep(struct device_node *node); - -struct device_node *dss_of_port_get_parent_device(struct device_node *port); -u32 dss_of_port_get_port_number(struct device_node *port); +omapdss_of_find_connected_device(struct device_node *node, unsigned int port); enum dss_writeback_channel { DSS_WB_LCD1_MGR = 0, @@ -638,13 +536,6 @@ enum dss_writeback_channel { }; struct dss_mgr_ops { - int (*connect)(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst); - void (*start_update)(struct omap_drm_private *priv, enum omap_channel channel); int (*enable)(struct omap_drm_private *priv, @@ -665,14 +556,11 @@ struct dss_mgr_ops { void (*handler)(void *), void *data); }; -int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops, +int dss_install_mgr_ops(struct dss_device *dss, + const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv); -void dss_uninstall_mgr_ops(void); +void dss_uninstall_mgr_ops(struct dss_device *dss); -int dss_mgr_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); -void dss_mgr_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, @@ -720,13 +608,14 @@ struct dispc_ops { void (*mgr_set_lcd_config)(struct dispc_device *dispc, enum omap_channel channel, const struct dss_lcd_mgr_config *config); + int (*mgr_check_timings)(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm); void (*mgr_set_timings)(struct dispc_device *dispc, enum omap_channel channel, const struct videomode *vm); void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel, const struct omap_overlay_manager_info *info); - enum omap_dss_output_id (*mgr_get_supported_outputs)( - struct dispc_device *dispc, enum omap_channel channel); u32 (*mgr_gamma_size)(struct dispc_device *dispc, enum omap_channel channel); void (*mgr_set_gamma)(struct dispc_device *dispc, @@ -757,9 +646,6 @@ struct dispc_ops { struct dispc_device *dispc_get_dispc(struct dss_device *dss); const struct dispc_ops *dispc_get_ops(struct dss_device *dss); -bool omapdss_component_is_display(struct device_node *node); -bool omapdss_component_is_output(struct device_node *node); - bool omapdss_stack_is_ready(void); void omapdss_gather_components(struct device *dev); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 96b9d4cd505f..18505bc70f7e 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -21,238 +21,96 @@ #include <linux/slab.h> #include <linux/of.h> +#include "dss.h" #include "omapdss.h" -static LIST_HEAD(output_list); -static DEFINE_MUTEX(output_lock); - -int omapdss_output_set_device(struct omap_dss_device *out, - struct omap_dss_device *dssdev) +int omapdss_output_validate(struct omap_dss_device *out) { - int r; - - mutex_lock(&output_lock); - - if (out->dst) { - dev_err(out->dev, - "output already has device %s connected to it\n", - out->dst->name); - r = -EINVAL; - goto err; - } - - if (out->output_type != dssdev->type) { + if (out->next && out->output_type != out->next->type) { dev_err(out->dev, "output type and display type don't match\n"); - r = -EINVAL; - goto err; - } - - out->dst = dssdev; - dssdev->src = out; - - mutex_unlock(&output_lock); - - return 0; -err: - mutex_unlock(&output_lock); - - return r; -} -EXPORT_SYMBOL(omapdss_output_set_device); - -int omapdss_output_unset_device(struct omap_dss_device *out) -{ - int r; - - mutex_lock(&output_lock); - - if (!out->dst) { - dev_err(out->dev, - "output doesn't have a device connected to it\n"); - r = -EINVAL; - goto err; + return -EINVAL; } - if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) { - dev_err(out->dev, - "device %s is not disabled, cannot unset device\n", - out->dst->name); - r = -EINVAL; - goto err; - } - - out->dst->src = NULL; - out->dst = NULL; - - mutex_unlock(&output_lock); - - return 0; -err: - mutex_unlock(&output_lock); - - return r; -} -EXPORT_SYMBOL(omapdss_output_unset_device); - -int omapdss_register_output(struct omap_dss_device *out) -{ - list_add_tail(&out->list, &output_list); return 0; } -EXPORT_SYMBOL(omapdss_register_output); - -void omapdss_unregister_output(struct omap_dss_device *out) -{ - list_del(&out->list); -} -EXPORT_SYMBOL(omapdss_unregister_output); - -bool omapdss_component_is_output(struct device_node *node) -{ - struct omap_dss_device *out; - - list_for_each_entry(out, &output_list, list) { - if (out->dev->of_node == node) - return true; - } +EXPORT_SYMBOL(omapdss_output_validate); - return false; -} -EXPORT_SYMBOL(omapdss_component_is_output); - -struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id) -{ - struct omap_dss_device *out; - - list_for_each_entry(out, &output_list, list) { - if (out->id == id) - return out; - } - - return NULL; -} -EXPORT_SYMBOL(omap_dss_get_output); - -struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port) -{ - struct device_node *src_node; - struct omap_dss_device *out; - u32 reg; - - src_node = dss_of_port_get_parent_device(port); - if (!src_node) - return NULL; - - reg = dss_of_port_get_port_number(port); - - list_for_each_entry(out, &output_list, list) { - if (out->dev->of_node == src_node && out->port_num == reg) { - of_node_put(src_node); - return omap_dss_get_device(out); - } - } - - of_node_put(src_node); - - return NULL; -} - -struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev) -{ - while (dssdev->src) - dssdev = dssdev->src; - - if (dssdev->id != 0) - return omap_dss_get_device(dssdev); - - return NULL; -} -EXPORT_SYMBOL(omapdss_find_output_from_display); - -static const struct dss_mgr_ops *dss_mgr_ops; -static struct omap_drm_private *dss_mgr_ops_priv; - -int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops, +int dss_install_mgr_ops(struct dss_device *dss, + const struct dss_mgr_ops *mgr_ops, struct omap_drm_private *priv) { - if (dss_mgr_ops) + if (dss->mgr_ops) return -EBUSY; - dss_mgr_ops = mgr_ops; - dss_mgr_ops_priv = priv; + dss->mgr_ops = mgr_ops; + dss->mgr_ops_priv = priv; return 0; } EXPORT_SYMBOL(dss_install_mgr_ops); -void dss_uninstall_mgr_ops(void) +void dss_uninstall_mgr_ops(struct dss_device *dss) { - dss_mgr_ops = NULL; - dss_mgr_ops_priv = NULL; + dss->mgr_ops = NULL; + dss->mgr_ops_priv = NULL; } EXPORT_SYMBOL(dss_uninstall_mgr_ops); -int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst) -{ - return dss_mgr_ops->connect(dss_mgr_ops_priv, - dssdev->dispc_channel, dst); -} -EXPORT_SYMBOL(dss_mgr_connect); - -void dss_mgr_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) -{ - dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst); -} -EXPORT_SYMBOL(dss_mgr_disconnect); - void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm); + dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, vm); } EXPORT_SYMBOL(dss_mgr_set_timings); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, const struct dss_lcd_mgr_config *config) { - dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv, - dssdev->dispc_channel, config); + dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel, config); } EXPORT_SYMBOL(dss_mgr_set_lcd_config); int dss_mgr_enable(struct omap_dss_device *dssdev) { - return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel); + return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_enable); void dss_mgr_disable(struct omap_dss_device *dssdev) { - dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel); + dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_disable); void dss_mgr_start_update(struct omap_dss_device *dssdev) { - dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel); + dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv, + dssdev->dispc_channel); } EXPORT_SYMBOL(dss_mgr_start_update); int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { - return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv, - dssdev->dispc_channel, - handler, data); + struct dss_device *dss = dssdev->dss; + + return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv, + dssdev->dispc_channel, + handler, data); } EXPORT_SYMBOL(dss_mgr_register_framedone_handler); void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { - dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv, - dssdev->dispc_channel, - handler, data); + struct dss_device *dss = dssdev->dss; + + dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv, + dssdev->dispc_channel, + handler, data); } EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler); diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 69c3b7a3d5c7..b2fe2387037a 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -132,10 +132,8 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) static int sdi_display_enable(struct omap_dss_device *dssdev) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - struct videomode *vm = &sdi->vm; - unsigned long fck; struct dispc_clock_info dispc_cinfo; - unsigned long pck; + unsigned long fck; int r; if (!sdi->output.dispc_channel_connected) { @@ -151,27 +149,12 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - /* 15.5.9.1.2 */ - vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE; - - r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo); if (r) goto err_calc_clock_div; sdi->mgr_config.clock_info = dispc_cinfo; - pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; - - if (pck != vm->pixelclock) { - DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n", - vm->pixelclock, pck); - - vm->pixelclock = pck; - } - - - dss_mgr_set_timings(&sdi->output, vm); - r = dss_set_fck_rate(sdi->dss, fck); if (r) goto err_set_dss_clock_div; @@ -230,96 +213,63 @@ static void sdi_display_disable(struct omap_dss_device *dssdev) } static void sdi_set_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + const struct videomode *vm) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); sdi->vm = *vm; } -static void sdi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct sdi_device *sdi = dssdev_to_sdi(dssdev); - - *vm = sdi->vm; -} - static int sdi_check_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - enum omap_channel channel = dssdev->dispc_channel; - - if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm)) - return -EINVAL; + struct dispc_clock_info dispc_cinfo; + unsigned long fck; + unsigned long pck; + int r; if (vm->pixelclock == 0) return -EINVAL; - return 0; -} + r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + if (r) + return r; -static int sdi_init_regulator(struct sdi_device *sdi) -{ - struct regulator *vdds_sdi; + pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; - if (sdi->vdds_sdi_reg) - return 0; + if (pck != vm->pixelclock) { + DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n", + vm->pixelclock, pck); - vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi"); - if (IS_ERR(vdds_sdi)) { - if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER) - DSSERR("can't get VDDS_SDI regulator\n"); - return PTR_ERR(vdds_sdi); + vm->pixelclock = pck; } - sdi->vdds_sdi_reg = vdds_sdi; - return 0; } -static int sdi_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int sdi_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct sdi_device *sdi = dssdev_to_sdi(dssdev); int r; - r = sdi_init_regulator(sdi); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = dss_mgr_connect(&sdi->output, dssdev); - if (r) - return r; - - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - dss_mgr_disconnect(&sdi->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void sdi_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void sdi_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct sdi_device *sdi = dssdev_to_sdi(dssdev); + dst->dispc_channel_connected = false; - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); - - dss_mgr_disconnect(&sdi->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } -static const struct omapdss_sdi_ops sdi_ops = { +static const struct omap_dss_device_ops sdi_ops = { .connect = sdi_connect, .disconnect = sdi_disconnect, @@ -328,12 +278,12 @@ static const struct omapdss_sdi_ops sdi_ops = { .check_timings = sdi_check_timings, .set_timings = sdi_set_timings, - .get_timings = sdi_get_timings, }; -static void sdi_init_output(struct sdi_device *sdi) +static int sdi_init_output(struct sdi_device *sdi) { struct omap_dss_device *out = &sdi->output; + int r; out->dev = &sdi->pdev->dev; out->id = OMAP_DSS_OUTPUT_SDI; @@ -341,16 +291,36 @@ static void sdi_init_output(struct sdi_device *sdi) out->name = "sdi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ - out->port_num = 1; - out->ops.sdi = &sdi_ops; + out->of_ports = BIT(1); + out->ops = &sdi_ops; out->owner = THIS_MODULE; + out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */ + | DRM_BUS_FLAG_SYNC_POSEDGE; + + out->next = omapdss_of_find_connected_device(out->dev->of_node, 1); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } - omapdss_register_output(out); + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; } static void sdi_uninit_output(struct sdi_device *sdi) { - omapdss_unregister_output(&sdi->output); + if (sdi->output.next) + omapdss_device_put(sdi->output.next); + omapdss_device_unregister(&sdi->output); } int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, @@ -372,25 +342,32 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, } r = of_property_read_u32(ep, "datapairs", &datapairs); + of_node_put(ep); if (r) { DSSERR("failed to parse datapairs\n"); - goto err_datapairs; + goto err_free; } sdi->datapairs = datapairs; sdi->dss = dss; - of_node_put(ep); - sdi->pdev = pdev; port->data = sdi; - sdi_init_output(sdi); + sdi->vdds_sdi_reg = devm_regulator_get(&pdev->dev, "vdds_sdi"); + if (IS_ERR(sdi->vdds_sdi_reg)) { + r = PTR_ERR(sdi->vdds_sdi_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDS_SDI regulator\n"); + goto err_free; + } + + r = sdi_init_output(sdi); + if (r) + goto err_free; return 0; -err_datapairs: - of_node_put(ep); err_free: kfree(sdi); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index ac01907dcc34..ff0b18c8e4ac 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -452,7 +452,7 @@ static void venc_runtime_put(struct venc_device *venc) WARN_ON(r < 0 && r != -ENOSYS); } -static const struct venc_config *venc_timings_to_config(struct videomode *vm) +static const struct venc_config *venc_timings_to_config(const struct videomode *vm) { switch (venc_get_videomode(vm)) { default: @@ -491,8 +491,6 @@ static int venc_power_on(struct venc_device *venc) venc_write_reg(venc, VENC_OUTPUT_CONTROL, l); - dss_mgr_set_timings(&venc->output, &venc->vm); - r = regulator_enable(venc->vdda_dac_reg); if (r) goto err1; @@ -568,32 +566,30 @@ static void venc_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&venc->venc_lock); } -static void venc_set_timings(struct omap_dss_device *dssdev, +static void venc_get_timings(struct omap_dss_device *dssdev, struct videomode *vm) { struct venc_device *venc = dssdev_to_venc(dssdev); - struct videomode actual_vm; + + mutex_lock(&venc->venc_lock); + *vm = venc->vm; + mutex_unlock(&venc->venc_lock); +} + +static void venc_set_timings(struct omap_dss_device *dssdev, + const struct videomode *vm) +{ + struct venc_device *venc = dssdev_to_venc(dssdev); DSSDBG("venc_set_timings\n"); mutex_lock(&venc->venc_lock); - switch (venc_get_videomode(vm)) { - default: - WARN_ON_ONCE(1); - case VENC_MODE_PAL: - actual_vm = omap_dss_pal_vm; - break; - case VENC_MODE_NTSC: - actual_vm = omap_dss_ntsc_vm; - break; - } - /* Reset WSS data when the TV standard changes. */ - if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm))) + if (memcmp(&venc->vm, vm, sizeof(*vm))) venc->wss_data = 0; - venc->vm = actual_vm; + venc->vm = *vm; dispc_set_tv_pclk(venc->dss->dispc, 13500000); @@ -607,80 +603,16 @@ static int venc_check_timings(struct omap_dss_device *dssdev, switch (venc_get_videomode(vm)) { case VENC_MODE_PAL: - case VENC_MODE_NTSC: + *vm = omap_dss_pal_vm; return 0; - default: - return -EINVAL; - } -} - -static void venc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - - mutex_lock(&venc->venc_lock); - - *vm = venc->vm; - - mutex_unlock(&venc->venc_lock); -} - -static u32 venc_get_wss(struct omap_dss_device *dssdev) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - - /* Invert due to VENC_L21_WC_CTL:INV=1 */ - return (venc->wss_data >> 8) ^ 0xfffff; -} - -static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) -{ - struct venc_device *venc = dssdev_to_venc(dssdev); - const struct venc_config *config; - int r; - DSSDBG("venc_set_wss\n"); - - mutex_lock(&venc->venc_lock); - - config = venc_timings_to_config(&venc->vm); - - /* Invert due to VENC_L21_WC_CTL:INV=1 */ - venc->wss_data = (wss ^ 0xfffff) << 8; - - r = venc_runtime_get(venc); - if (r) - goto err; - - venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | - venc->wss_data); - - venc_runtime_put(venc); - -err: - mutex_unlock(&venc->venc_lock); - - return r; -} - -static int venc_init_regulator(struct venc_device *venc) -{ - struct regulator *vdda_dac; - - if (venc->vdda_dac_reg != NULL) + case VENC_MODE_NTSC: + *vm = omap_dss_ntsc_vm; return 0; - vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda"); - if (IS_ERR(vdda_dac)) { - if (PTR_ERR(vdda_dac) != -EPROBE_DEFER) - DSSERR("can't get VDDA_DAC regulator\n"); - return PTR_ERR(vdda_dac); + default: + return -EINVAL; } - - venc->vdda_dac_reg = vdda_dac; - - return 0; } static int venc_dump_regs(struct seq_file *s, void *p) @@ -760,47 +692,28 @@ static int venc_get_clocks(struct venc_device *venc) return 0; } -static int venc_connect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static int venc_connect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct venc_device *venc = dssdev_to_venc(dssdev); int r; - r = venc_init_regulator(venc); - if (r) - return r; - - r = dss_mgr_connect(&venc->output, dssdev); + r = omapdss_device_connect(dst->dss, dst, dst->next); if (r) return r; - r = omapdss_output_set_device(dssdev, dst); - if (r) { - DSSERR("failed to connect output to new device: %s\n", - dst->name); - dss_mgr_disconnect(&venc->output, dssdev); - return r; - } - + dst->dispc_channel_connected = true; return 0; } -static void venc_disconnect(struct omap_dss_device *dssdev, - struct omap_dss_device *dst) +static void venc_disconnect(struct omap_dss_device *src, + struct omap_dss_device *dst) { - struct venc_device *venc = dssdev_to_venc(dssdev); - - WARN_ON(dst != dssdev->dst); - - if (dst != dssdev->dst) - return; - - omapdss_output_unset_device(dssdev); + dst->dispc_channel_connected = false; - dss_mgr_disconnect(&venc->output, dssdev); + omapdss_device_disconnect(dst, dst->next); } -static const struct omapdss_atv_ops venc_ops = { +static const struct omap_dss_device_ops venc_ops = { .connect = venc_connect, .disconnect = venc_disconnect, @@ -808,31 +721,92 @@ static const struct omapdss_atv_ops venc_ops = { .disable = venc_display_disable, .check_timings = venc_check_timings, - .set_timings = venc_set_timings, .get_timings = venc_get_timings, + .set_timings = venc_set_timings, +}; + +/* ----------------------------------------------------------------------------- + * Component Bind & Unbind + */ - .set_wss = venc_set_wss, - .get_wss = venc_get_wss, +static int venc_bind(struct device *dev, struct device *master, void *data) +{ + struct dss_device *dss = dss_get_device(master); + struct venc_device *venc = dev_get_drvdata(dev); + u8 rev_id; + int r; + + venc->dss = dss; + + r = venc_runtime_get(venc); + if (r) + return r; + + rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff); + dev_dbg(dev, "OMAP VENC rev %d\n", rev_id); + + venc_runtime_put(venc); + + venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs, + venc); + + return 0; +} + +static void venc_unbind(struct device *dev, struct device *master, void *data) +{ + struct venc_device *venc = dev_get_drvdata(dev); + + dss_debugfs_remove_file(venc->debugfs); +} + +static const struct component_ops venc_component_ops = { + .bind = venc_bind, + .unbind = venc_unbind, }; -static void venc_init_output(struct venc_device *venc) +/* ----------------------------------------------------------------------------- + * Probe & Remove, Suspend & Resume + */ + +static int venc_init_output(struct venc_device *venc) { struct omap_dss_device *out = &venc->output; + int r; out->dev = &venc->pdev->dev; out->id = OMAP_DSS_OUTPUT_VENC; out->output_type = OMAP_DISPLAY_TYPE_VENC; out->name = "venc.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->ops.atv = &venc_ops; + out->ops = &venc_ops; out->owner = THIS_MODULE; + out->of_ports = BIT(0); + + out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); + if (IS_ERR(out->next)) { + if (PTR_ERR(out->next) != -EPROBE_DEFER) + dev_err(out->dev, "failed to find video sink\n"); + return PTR_ERR(out->next); + } - omapdss_register_output(out); + r = omapdss_output_validate(out); + if (r) { + omapdss_device_put(out->next); + out->next = NULL; + return r; + } + + omapdss_device_register(out); + + return 0; } static void venc_uninit_output(struct venc_device *venc) { - omapdss_unregister_output(&venc->output); + if (venc->output.next) + omapdss_device_put(venc->output.next); + omapdss_device_unregister(&venc->output); } static int venc_probe_of(struct venc_device *venc) @@ -878,19 +852,15 @@ err: return r; } -/* VENC HW IP initialisation */ static const struct soc_device_attribute venc_soc_devices[] = { { .machine = "OMAP3[45]*" }, { .machine = "AM35*" }, { /* sentinel */ } }; -static int venc_bind(struct device *dev, struct device *master, void *data) +static int venc_probe(struct platform_device *pdev) { - struct platform_device *pdev = to_platform_device(dev); - struct dss_device *dss = dss_get_device(master); struct venc_device *venc; - u8 rev_id; struct resource *venc_mem; int r; @@ -899,8 +869,8 @@ static int venc_bind(struct device *dev, struct device *master, void *data) return -ENOMEM; venc->pdev = pdev; - venc->dss = dss; - dev_set_drvdata(dev, venc); + + platform_set_drvdata(pdev, venc); /* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */ if (soc_device_match(venc_soc_devices)) @@ -909,6 +879,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data) mutex_init(&venc->venc_lock); venc->wss_data = 0; + venc->vm = omap_dss_pal_vm; venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); @@ -917,68 +888,54 @@ static int venc_bind(struct device *dev, struct device *master, void *data) goto err_free; } + venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda"); + if (IS_ERR(venc->vdda_dac_reg)) { + r = PTR_ERR(venc->vdda_dac_reg); + if (r != -EPROBE_DEFER) + DSSERR("can't get VDDA_DAC regulator\n"); + goto err_free; + } + r = venc_get_clocks(venc); if (r) goto err_free; - pm_runtime_enable(&pdev->dev); - - r = venc_runtime_get(venc); + r = venc_probe_of(venc); if (r) - goto err_runtime_get; - - rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff); - dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); - - venc_runtime_put(venc); + goto err_free; - r = venc_probe_of(venc); - if (r) { - DSSERR("Invalid DT data\n"); - goto err_probe_of; - } + pm_runtime_enable(&pdev->dev); - venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs, - venc); + r = venc_init_output(venc); + if (r) + goto err_pm_disable; - venc_init_output(venc); + r = component_add(&pdev->dev, &venc_component_ops); + if (r) + goto err_uninit_output; return 0; -err_probe_of: -err_runtime_get: +err_uninit_output: + venc_uninit_output(venc); +err_pm_disable: pm_runtime_disable(&pdev->dev); err_free: kfree(venc); return r; } -static void venc_unbind(struct device *dev, struct device *master, void *data) +static int venc_remove(struct platform_device *pdev) { - struct venc_device *venc = dev_get_drvdata(dev); + struct venc_device *venc = platform_get_drvdata(pdev); - dss_debugfs_remove_file(venc->debugfs); + component_del(&pdev->dev, &venc_component_ops); venc_uninit_output(venc); - pm_runtime_disable(dev); + pm_runtime_disable(&pdev->dev); kfree(venc); -} - -static const struct component_ops venc_component_ops = { - .bind = venc_bind, - .unbind = venc_unbind, -}; - -static int venc_probe(struct platform_device *pdev) -{ - return component_add(&pdev->dev, &venc_component_ops); -} - -static int venc_remove(struct platform_device *pdev) -{ - component_del(&pdev->dev, &venc_component_ops); return 0; } diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 2ddb856666c4..98f5ca29444a 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -29,10 +29,28 @@ struct omap_connector { struct drm_connector base; - struct omap_dss_device *dssdev; + struct omap_dss_device *output; + struct omap_dss_device *display; + struct omap_dss_device *hpd; bool hdmi_mode; }; +static void omap_connector_hpd_notify(struct drm_connector *connector, + struct omap_dss_device *src, + enum drm_connector_status status) +{ + if (status == connector_status_disconnected) { + /* + * If the source is an HDMI encoder, notify it of disconnection. + * This is required to let the HDMI encoder reset any internal + * state related to connection status, such as the CEC address. + */ + if (src && src->type == OMAP_DISPLAY_TYPE_HDMI && + src->ops->hdmi.lost_hotplug) + src->ops->hdmi.lost_hotplug(src); + } +} + static void omap_connector_hpd_cb(void *cb_data, enum drm_connector_status status) { @@ -46,8 +64,31 @@ static void omap_connector_hpd_cb(void *cb_data, connector->status = status; mutex_unlock(&dev->mode_config.mutex); - if (old_status != status) - drm_kms_helper_hotplug_event(dev); + if (old_status == status) + return; + + omap_connector_hpd_notify(connector, omap_connector->hpd, status); + + drm_kms_helper_hotplug_event(dev); +} + +void omap_connector_enable_hpd(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *hpd = omap_connector->hpd; + + if (hpd) + hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb, + omap_connector); +} + +void omap_connector_disable_hpd(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *hpd = omap_connector->hpd; + + if (hpd) + hpd->ops->unregister_hpd_cb(hpd); } bool omap_connector_get_hdmi_mode(struct drm_connector *connector) @@ -57,120 +98,179 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector) return omap_connector->hdmi_mode; } +static struct omap_dss_device * +omap_connector_find_device(struct drm_connector *connector, + enum omap_dss_device_ops_flag op) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *dssdev; + + for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { + if (dssdev->ops_flags & op) + return dssdev; + } + + return NULL; +} + static enum drm_connector_status omap_connector_detect( struct drm_connector *connector, bool force) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; - enum drm_connector_status ret; - - if (dssdrv->detect) { - if (dssdrv->detect(dssdev)) - ret = connector_status_connected; - else - ret = connector_status_disconnected; - } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI || - dssdev->type == OMAP_DISPLAY_TYPE_DBI || - dssdev->type == OMAP_DISPLAY_TYPE_SDI || - dssdev->type == OMAP_DISPLAY_TYPE_DSI) { - ret = connector_status_connected; + struct omap_dss_device *dssdev; + enum drm_connector_status status; + + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_DETECT); + + if (dssdev) { + status = dssdev->ops->detect(dssdev) + ? connector_status_connected + : connector_status_disconnected; + + omap_connector_hpd_notify(connector, dssdev->src, status); } else { - ret = connector_status_unknown; + switch (omap_connector->display->type) { + case OMAP_DISPLAY_TYPE_DPI: + case OMAP_DISPLAY_TYPE_DBI: + case OMAP_DISPLAY_TYPE_SDI: + case OMAP_DISPLAY_TYPE_DSI: + status = connector_status_connected; + break; + default: + status = connector_status_unknown; + break; + } } - VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force); + VERB("%s: %d (force=%d)", omap_connector->display->name, status, force); - return ret; + return status; } static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - DBG("%s", omap_connector->dssdev->name); - if (connector->polled == DRM_CONNECTOR_POLL_HPD && - dssdev->driver->unregister_hpd_cb) { - dssdev->driver->unregister_hpd_cb(dssdev); + DBG("%s", omap_connector->display->name); + + if (omap_connector->hpd) { + struct omap_dss_device *hpd = omap_connector->hpd; + + hpd->ops->unregister_hpd_cb(hpd); + omapdss_device_put(hpd); + omap_connector->hpd = NULL; } + drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(omap_connector); - omap_dss_put_device(dssdev); + omapdss_device_put(omap_connector->output); + omapdss_device_put(omap_connector->display); } #define MAX_EDID 512 -static int omap_connector_get_modes(struct drm_connector *connector) +static int omap_connector_get_modes_edid(struct drm_connector *connector, + struct omap_dss_device *dssdev) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; - struct drm_device *dev = connector->dev; - int n = 0; + enum drm_connector_status status; + void *edid; + int n; - DBG("%s", omap_connector->dssdev->name); + status = omap_connector_detect(connector, false); + if (status != connector_status_connected) + goto no_edid; - /* if display exposes EDID, then we parse that in the normal way to - * build table of supported modes.. otherwise (ie. fixed resolution - * LCD panels) we just return a single mode corresponding to the - * currently configured timings: - */ - if (dssdrv->read_edid) { - void *edid = kzalloc(MAX_EDID, GFP_KERNEL); - - if (!edid) - return 0; - - if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) && - drm_edid_is_valid(edid)) { - drm_connector_update_edid_property( - connector, edid); - n = drm_add_edid_modes(connector, edid); - - omap_connector->hdmi_mode = - drm_detect_hdmi_monitor(edid); - } else { - drm_connector_update_edid_property( - connector, NULL); - } + edid = kzalloc(MAX_EDID, GFP_KERNEL); + if (!edid) + goto no_edid; + if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 || + !drm_edid_is_valid(edid)) { kfree(edid); - } else { - struct drm_display_mode *mode = drm_mode_create(dev); - struct videomode vm = {0}; + goto no_edid; + } - if (!mode) - return 0; + drm_connector_update_edid_property(connector, edid); + n = drm_add_edid_modes(connector, edid); - dssdrv->get_timings(dssdev, &vm); + omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid); - drm_display_mode_from_videomode(&vm, mode); + kfree(edid); + return n; - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); +no_edid: + drm_connector_update_edid_property(connector, NULL); + return 0; +} - if (dssdrv->get_size) { - dssdrv->get_size(dssdev, +static int omap_connector_get_modes(struct drm_connector *connector) +{ + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *dssdev; + struct drm_display_mode *mode; + struct videomode vm = {0}; + + DBG("%s", omap_connector->display->name); + + /* + * If display exposes EDID, then we parse that in the normal way to + * build table of supported modes. + */ + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_EDID); + if (dssdev) + return omap_connector_get_modes_edid(connector, dssdev); + + /* + * Otherwise we have either a fixed resolution panel or an output that + * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus + * unconnected, or analog TV). Start by querying the size. + */ + dssdev = omap_connector->display; + if (dssdev->driver && dssdev->driver->get_size) + dssdev->driver->get_size(dssdev, &connector->display_info.width_mm, &connector->display_info.height_mm); - } - n = 1; + /* + * Iterate over the pipeline to find the first device that can provide + * timing information. If we can't find any, we just let the KMS core + * add the default modes. + */ + for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { + if (dssdev->ops->get_timings) + break; } + if (!dssdev) + return 0; - return n; + /* Add a single mode corresponding to the fixed panel timings. */ + mode = drm_mode_create(connector->dev); + if (!mode) + return 0; + + dssdev->ops->get_timings(dssdev, &vm); + + drm_display_mode_from_videomode(&vm, mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return 1; } static int omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = omap_connector->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + enum omap_channel channel = omap_connector->output->dispc_channel; + struct omap_drm_private *priv = connector->dev->dev_private; + struct omap_dss_device *dssdev; struct videomode vm = {0}; struct drm_device *dev = connector->dev; struct drm_display_mode *new_mode; @@ -179,44 +279,31 @@ static int omap_connector_mode_valid(struct drm_connector *connector, drm_display_mode_to_videomode(mode, &vm); mode->vrefresh = drm_mode_vrefresh(mode); - /* - * if the panel driver doesn't have a check_timings, it's most likely - * a fixed resolution panel, check if the timings match with the - * panel's timings - */ - if (dssdrv->check_timings) { - r = dssdrv->check_timings(dssdev, &vm); - } else { - struct videomode t = {0}; + r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); + if (r) + goto done; - dssdrv->get_timings(dssdev, &t); + for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; - /* - * Ignore the flags, as we don't get them from - * drm_display_mode_to_videomode. - */ - t.flags = 0; - - if (memcmp(&vm, &t, sizeof(vm))) - r = -EINVAL; - else - r = 0; + r = dssdev->ops->check_timings(dssdev, &vm); + if (r) + goto done; } - if (!r) { - /* check if vrefresh is still valid */ - new_mode = drm_mode_duplicate(dev, mode); + /* check if vrefresh is still valid */ + new_mode = drm_mode_duplicate(dev, mode); + if (!new_mode) + return MODE_BAD; - if (!new_mode) - return MODE_BAD; - - new_mode->clock = vm.pixelclock / 1000; - new_mode->vrefresh = 0; - if (mode->vrefresh == drm_mode_vrefresh(new_mode)) - ret = MODE_OK; - drm_mode_destroy(dev, new_mode); - } + new_mode->clock = vm.pixelclock / 1000; + new_mode->vrefresh = 0; + if (mode->vrefresh == drm_mode_vrefresh(new_mode)) + ret = MODE_OK; + drm_mode_destroy(dev, new_mode); +done: DBG("connector: mode %s: " "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", (ret == MODE_OK) ? "valid" : "invalid", @@ -243,52 +330,72 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .mode_valid = omap_connector_mode_valid, }; +static int omap_connector_get_type(struct omap_dss_device *display) +{ + switch (display->type) { + case OMAP_DISPLAY_TYPE_HDMI: + return DRM_MODE_CONNECTOR_HDMIA; + case OMAP_DISPLAY_TYPE_DVI: + return DRM_MODE_CONNECTOR_DVID; + case OMAP_DISPLAY_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; + case OMAP_DISPLAY_TYPE_DPI: + case OMAP_DISPLAY_TYPE_DBI: + return DRM_MODE_CONNECTOR_DPI; + case OMAP_DISPLAY_TYPE_VENC: + /* TODO: This could also be composite */ + return DRM_MODE_CONNECTOR_SVIDEO; + case OMAP_DISPLAY_TYPE_SDI: + return DRM_MODE_CONNECTOR_LVDS; + default: + return DRM_MODE_CONNECTOR_Unknown; + } +} + /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *dssdev, - struct drm_encoder *encoder) + struct omap_dss_device *output, + struct omap_dss_device *display, + struct drm_encoder *encoder) { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; - bool hpd_supported = false; - - DBG("%s", dssdev->name); + struct omap_dss_device *dssdev; - omap_dss_get_device(dssdev); + DBG("%s", display->name); omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; - omap_connector->dssdev = dssdev; + omap_connector->output = omapdss_device_get(output); + omap_connector->display = omapdss_device_get(display); connector = &omap_connector->base; + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; drm_connector_init(dev, connector, &omap_connector_funcs, - connector_type); + omap_connector_get_type(display)); drm_connector_helper_add(connector, &omap_connector_helper_funcs); - if (dssdev->driver->register_hpd_cb) { - int ret = dssdev->driver->register_hpd_cb(dssdev, - omap_connector_hpd_cb, - omap_connector); - if (!ret) - hpd_supported = true; - else if (ret != -ENOTSUPP) - DBG("%s: Failed to register HPD callback (%d).", - dssdev->name, ret); - } - - if (hpd_supported) + /* + * Initialize connector status handling. First try to find a device that + * supports hot-plug reporting. If it fails, fall back to a device that + * support polling. If that fails too, we don't support hot-plug + * detection at all. + */ + dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD); + if (dssdev) { + omap_connector->hpd = omapdss_device_get(dssdev); connector->polled = DRM_CONNECTOR_POLL_HPD; - else if (dssdev->driver->detect) - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; - else - connector->polled = 0; - - connector->interlace_allowed = 1; - connector->doublescan_allowed = 0; + } else { + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_DETECT); + if (dssdev) + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } return connector; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 98bbc779b302..854099801649 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -28,10 +28,13 @@ struct drm_encoder; struct omap_dss_device; struct drm_connector *omap_connector_init(struct drm_device *dev, - int connector_type, struct omap_dss_device *dssdev, - struct drm_encoder *encoder); + struct omap_dss_device *output, + struct omap_dss_device *display, + struct drm_encoder *encoder); struct drm_encoder *omap_connector_attached_encoder( struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); +void omap_connector_enable_hpd(struct drm_connector *connector); +void omap_connector_disable_hpd(struct drm_connector *connector); #endif /* __OMAPDRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 6c4d40b824e4..62928ec0e7db 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -41,6 +41,7 @@ struct omap_crtc { struct drm_crtc base; const char *name; + struct omap_drm_pipeline *pipe; enum omap_channel channel; struct videomode vm; @@ -108,38 +109,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) * job of sequencing the setup of the video pipe in the proper order */ -/* ovl-mgr-id -> crtc */ -static struct omap_crtc *omap_crtcs[8]; -static struct omap_dss_device *omap_crtc_output[8]; - /* we can probably ignore these until we support command-mode panels: */ -static int omap_crtc_dss_connect(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst) -{ - const struct dispc_ops *dispc_ops = priv->dispc_ops; - struct dispc_device *dispc = priv->dispc; - - if (omap_crtc_output[channel]) - return -EINVAL; - - if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id)) - return -EINVAL; - - omap_crtc_output[channel] = dst; - dst->dispc_channel_connected = true; - - return 0; -} - -static void omap_crtc_dss_disconnect(struct omap_drm_private *priv, - enum omap_channel channel, - struct omap_dss_device *dst) -{ - omap_crtc_output[channel] = NULL; - dst->dispc_channel_connected = false; -} - static void omap_crtc_dss_start_update(struct omap_drm_private *priv, enum omap_channel channel) { @@ -159,7 +129,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) if (WARN_ON(omap_crtc->enabled == enable)) return; - if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) { + if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) { priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; return; @@ -215,7 +185,8 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) static int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel, &omap_crtc->vm); @@ -227,7 +198,8 @@ static int omap_crtc_dss_enable(struct omap_drm_private *priv, static void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); omap_crtc_set_enabled(&omap_crtc->base, false); } @@ -236,7 +208,9 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv, enum omap_channel channel, const struct videomode *vm) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + DBG("%s", omap_crtc->name); omap_crtc->vm = *vm; } @@ -245,7 +219,8 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, enum omap_channel channel, const struct dss_lcd_mgr_config *config) { - struct omap_crtc *omap_crtc = omap_crtcs[channel]; + struct drm_crtc *crtc = priv->channels[channel]->crtc; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%s", omap_crtc->name); priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel, @@ -266,8 +241,6 @@ static void omap_crtc_dss_unregister_framedone( } static const struct dss_mgr_ops mgr_ops = { - .connect = omap_crtc_dss_connect, - .disconnect = omap_crtc_dss_disconnect, .start_update = omap_crtc_dss_start_update, .enable = omap_crtc_dss_enable, .disable = omap_crtc_dss_disable, @@ -447,11 +420,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct omap_drm_private *priv = crtc->dev->dev_private; - const u32 flags_mask = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_PIXDATA_NEGEDGE | - DISPLAY_FLAGS_SYNC_POSEDGE | DISPLAY_FLAGS_SYNC_NEGEDGE; - unsigned int i; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", omap_crtc->name, mode->base.id, mode->name, @@ -461,38 +429,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) mode->type, mode->flags); drm_display_mode_to_videomode(mode, &omap_crtc->vm); - - /* - * HACK: This fixes the vm flags. - * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags - * and they get lost when converting back and forth between - * struct drm_display_mode and struct videomode. The hack below - * goes and fetches the missing flags from the panel drivers. - * - * Correct solution would be to use DRM's bus-flags, but that's not - * easily possible before the omapdrm's panel/encoder driver model - * has been changed to the DRM model. - */ - - for (i = 0; i < priv->num_encoders; ++i) { - struct drm_encoder *encoder = priv->encoders[i]; - - if (encoder->crtc == crtc) { - struct omap_dss_device *dssdev; - - dssdev = omap_encoder_get_dssdev(encoder); - - if (dssdev) { - struct videomode vm = {0}; - - dssdev->driver->get_timings(dssdev, &vm); - - omap_crtc->vm.flags |= vm.flags & flags_mask; - } - - break; - } - } } static int omap_crtc_atomic_check(struct drm_crtc *crtc, @@ -681,37 +617,29 @@ static const char *channel_names[] = { void omap_crtc_pre_init(struct omap_drm_private *priv) { - memset(omap_crtcs, 0, sizeof(omap_crtcs)); - - dss_install_mgr_ops(&mgr_ops, priv); + dss_install_mgr_ops(priv->dss, &mgr_ops, priv); } -void omap_crtc_pre_uninit(void) +void omap_crtc_pre_uninit(struct omap_drm_private *priv) { - dss_uninstall_mgr_ops(); + dss_uninstall_mgr_ops(priv->dss); } /* initialize crtc */ struct drm_crtc *omap_crtc_init(struct drm_device *dev, - struct drm_plane *plane, struct omap_dss_device *dssdev) + struct omap_drm_pipeline *pipe, + struct drm_plane *plane) { struct omap_drm_private *priv = dev->dev_private; struct drm_crtc *crtc = NULL; struct omap_crtc *omap_crtc; enum omap_channel channel; - struct omap_dss_device *out; int ret; - out = omapdss_find_output_from_display(dssdev); - channel = out->dispc_channel; - omap_dss_put_device(out); + channel = pipe->output->dispc_channel; DBG("%s", channel_names[channel]); - /* Multiple displays on same channel is not allowed */ - if (WARN_ON(omap_crtcs[channel] != NULL)) - return ERR_PTR(-EINVAL); - omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL); if (!omap_crtc) return ERR_PTR(-ENOMEM); @@ -720,6 +648,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, init_waitqueue_head(&omap_crtc->pending_wait); + omap_crtc->pipe = pipe; omap_crtc->channel = channel; omap_crtc->name = channel_names[channel]; @@ -727,7 +656,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, &omap_crtc_funcs, NULL); if (ret < 0) { dev_err(dev->dev, "%s(): could not init crtc for: %s\n", - __func__, dssdev->name); + __func__, pipe->display->name); kfree(omap_crtc); return ERR_PTR(ret); } @@ -750,7 +679,5 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, omap_plane_install_properties(crtc->primary, &crtc->base); - omap_crtcs[channel] = omap_crtc; - return crtc; } diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h index eaab2d7f0324..d9de437ba9dd 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.h +++ b/drivers/gpu/drm/omapdrm/omap_crtc.h @@ -27,15 +27,17 @@ enum omap_channel; struct drm_crtc; struct drm_device; struct drm_plane; +struct omap_drm_pipeline; struct omap_dss_device; struct videomode; struct videomode *omap_crtc_timings(struct drm_crtc *crtc); enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); void omap_crtc_pre_init(struct omap_drm_private *priv); -void omap_crtc_pre_uninit(void); +void omap_crtc_pre_uninit(struct omap_drm_private *priv); struct drm_crtc *omap_crtc_init(struct drm_device *dev, - struct drm_plane *plane, struct omap_dss_device *dssdev); + struct omap_drm_pipeline *pipe, + struct drm_plane *plane); int omap_crtc_wait_pending(struct drm_crtc *crtc); void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus); void omap_crtc_vblank_irq(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 1b6601e9b107..5f98506ac2c5 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -15,6 +15,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/of.h> +#include <linux/sort.h> #include <linux/sys_soc.h> #include <drm/drm_atomic.h> @@ -127,55 +129,92 @@ static const struct drm_mode_config_funcs omap_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static int get_connector_type(struct omap_dss_device *dssdev) +static void omap_disconnect_pipelines(struct drm_device *ddev) { - switch (dssdev->type) { - case OMAP_DISPLAY_TYPE_HDMI: - return DRM_MODE_CONNECTOR_HDMIA; - case OMAP_DISPLAY_TYPE_DVI: - return DRM_MODE_CONNECTOR_DVID; - case OMAP_DISPLAY_TYPE_DSI: - return DRM_MODE_CONNECTOR_DSI; - case OMAP_DISPLAY_TYPE_DPI: - case OMAP_DISPLAY_TYPE_DBI: - return DRM_MODE_CONNECTOR_DPI; - case OMAP_DISPLAY_TYPE_VENC: - /* TODO: This could also be composite */ - return DRM_MODE_CONNECTOR_SVIDEO; - case OMAP_DISPLAY_TYPE_SDI: - return DRM_MODE_CONNECTOR_LVDS; - default: - return DRM_MODE_CONNECTOR_Unknown; + struct omap_drm_private *priv = ddev->dev_private; + unsigned int i; + + for (i = 0; i < priv->num_pipes; i++) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + + omapdss_device_disconnect(NULL, pipe->output); + + omapdss_device_put(pipe->output); + omapdss_device_put(pipe->display); + pipe->output = NULL; + pipe->display = NULL; } + + memset(&priv->channels, 0, sizeof(priv->channels)); + + priv->num_pipes = 0; } -static void omap_disconnect_dssdevs(void) +static int omap_compare_pipes(const void *a, const void *b) { - struct omap_dss_device *dssdev = NULL; + const struct omap_drm_pipeline *pipe1 = a; + const struct omap_drm_pipeline *pipe2 = b; - for_each_dss_dev(dssdev) - dssdev->driver->disconnect(dssdev); + if (pipe1->display->alias_id > pipe2->display->alias_id) + return 1; + else if (pipe1->display->alias_id < pipe2->display->alias_id) + return -1; + return 0; } -static int omap_connect_dssdevs(void) +static int omap_connect_pipelines(struct drm_device *ddev) { + struct omap_drm_private *priv = ddev->dev_private; + struct omap_dss_device *output = NULL; + unsigned int i; int r; - struct omap_dss_device *dssdev = NULL; if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - for_each_dss_dev(dssdev) { - r = dssdev->driver->connect(dssdev); + for_each_dss_output(output) { + r = omapdss_device_connect(priv->dss, NULL, output); if (r == -EPROBE_DEFER) { - omap_dss_put_device(dssdev); + omapdss_device_put(output); goto cleanup; } else if (r) { - dev_warn(dssdev->dev, "could not connect display: %s\n", - dssdev->name); + dev_warn(output->dev, "could not connect output %s\n", + output->name); + } else { + struct omap_drm_pipeline *pipe; + + pipe = &priv->pipes[priv->num_pipes++]; + pipe->output = omapdss_device_get(output); + pipe->display = omapdss_display_get(output); + + if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) { + /* To balance the 'for_each_dss_output' loop */ + omapdss_device_put(output); + break; + } } } + /* Sort the list by DT aliases */ + sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), + omap_compare_pipes, NULL); + + /* + * Populate the pipeline lookup table by DISPC channel. Only one display + * is allowed per channel. + */ + for (i = 0; i < priv->num_pipes; ++i) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + enum omap_channel channel = pipe->output->dispc_channel; + + if (WARN_ON(priv->channels[channel] != NULL)) { + r = -EINVAL; + goto cleanup; + } + + priv->channels[channel] = pipe; + } + return 0; cleanup: @@ -183,7 +222,7 @@ cleanup: * if we are deferring probe, we disconnect the devices we previously * connected */ - omap_disconnect_dssdevs(); + omap_disconnect_pipelines(ddev); return r; } @@ -204,10 +243,9 @@ static int omap_modeset_init_properties(struct drm_device *dev) static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - struct omap_dss_device *dssdev = NULL; int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc); int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); - int num_crtcs, crtc_idx, plane_idx; + unsigned int i; int ret; u32 plane_crtc_mask; @@ -225,87 +263,62 @@ static int omap_modeset_init(struct drm_device *dev) * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ - num_crtcs = 0; - for_each_dss_dev(dssdev) - if (omapdss_device_is_connected(dssdev)) - num_crtcs++; - - if (num_crtcs > num_mgrs || num_crtcs > num_ovls || - num_crtcs > ARRAY_SIZE(priv->crtcs) || - num_crtcs > ARRAY_SIZE(priv->planes) || - num_crtcs > ARRAY_SIZE(priv->encoders) || - num_crtcs > ARRAY_SIZE(priv->connectors)) { + if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) { dev_err(dev->dev, "%s(): Too many connected displays\n", __func__); return -EINVAL; } - /* All planes can be put to any CRTC */ - plane_crtc_mask = (1 << num_crtcs) - 1; + /* Create all planes first. They can all be put to any CRTC. */ + plane_crtc_mask = (1 << priv->num_pipes) - 1; + + for (i = 0; i < num_ovls; i++) { + enum drm_plane_type type = i < priv->num_pipes + ? DRM_PLANE_TYPE_PRIMARY + : DRM_PLANE_TYPE_OVERLAY; + struct drm_plane *plane; + + if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) + return -EINVAL; + + plane = omap_plane_init(dev, i, type, plane_crtc_mask); + if (IS_ERR(plane)) + return PTR_ERR(plane); - dssdev = NULL; + priv->planes[priv->num_planes++] = plane; + } - crtc_idx = 0; - plane_idx = 0; - for_each_dss_dev(dssdev) { + /* Create the CRTCs, encoders and connectors. */ + for (i = 0; i < priv->num_pipes; i++) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + struct omap_dss_device *display = pipe->display; struct drm_connector *connector; struct drm_encoder *encoder; - struct drm_plane *plane; struct drm_crtc *crtc; - if (!omapdss_device_is_connected(dssdev)) - continue; - - encoder = omap_encoder_init(dev, dssdev); + encoder = omap_encoder_init(dev, pipe->output, display); if (!encoder) return -ENOMEM; - connector = omap_connector_init(dev, - get_connector_type(dssdev), dssdev, encoder); + connector = omap_connector_init(dev, pipe->output, display, + encoder); if (!connector) return -ENOMEM; - plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_PRIMARY, - plane_crtc_mask); - if (IS_ERR(plane)) - return PTR_ERR(plane); - - crtc = omap_crtc_init(dev, plane, dssdev); + crtc = omap_crtc_init(dev, pipe, priv->planes[i]); if (IS_ERR(crtc)) return PTR_ERR(crtc); drm_connector_attach_encoder(connector, encoder); - encoder->possible_crtcs = (1 << crtc_idx); - - priv->crtcs[priv->num_crtcs++] = crtc; - priv->planes[priv->num_planes++] = plane; - priv->encoders[priv->num_encoders++] = encoder; - priv->connectors[priv->num_connectors++] = connector; + encoder->possible_crtcs = 1 << i; - plane_idx++; - crtc_idx++; + pipe->crtc = crtc; + pipe->encoder = encoder; + pipe->connector = connector; } - /* - * Create normal planes for the remaining overlays: - */ - for (; plane_idx < num_ovls; plane_idx++) { - struct drm_plane *plane; - - if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) - return -EINVAL; - - plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_OVERLAY, - plane_crtc_mask); - if (IS_ERR(plane)) - return PTR_ERR(plane); - - priv->planes[priv->num_planes++] = plane; - } - - DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n", - priv->num_planes, priv->num_crtcs, priv->num_encoders, - priv->num_connectors); + DBG("registered %u planes, %u crtcs/encoders/connectors\n", + priv->num_planes, priv->num_pipes); dev->mode_config.min_width = 8; dev->mode_config.min_height = 2; @@ -335,27 +348,25 @@ static int omap_modeset_init(struct drm_device *dev) /* * Enable the HPD in external components if supported */ -static void omap_modeset_enable_external_hpd(void) +static void omap_modeset_enable_external_hpd(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; - for_each_dss_dev(dssdev) { - if (dssdev->driver->enable_hpd) - dssdev->driver->enable_hpd(dssdev); - } + for (i = 0; i < priv->num_pipes; i++) + omap_connector_enable_hpd(priv->pipes[i].connector); } /* * Disable the HPD in external components if supported */ -static void omap_modeset_disable_external_hpd(void) +static void omap_modeset_disable_external_hpd(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; - for_each_dss_dev(dssdev) { - if (dssdev->driver->disable_hpd) - dssdev->driver->disable_hpd(dssdev); - } + for (i = 0; i < priv->num_pipes; i++) + omap_connector_disable_hpd(priv->pipes[i].connector); } /* @@ -525,6 +536,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) DBG("%s", dev_name(dev)); + /* Allocate and initialize the DRM device. */ + ddev = drm_dev_alloc(&omap_drm_driver, dev); + if (IS_ERR(ddev)) + return PTR_ERR(ddev); + + priv->ddev = ddev; + ddev->dev_private = priv; + priv->dev = dev; priv->dss = omapdss_get_dss(); priv->dispc = dispc_get_dispc(priv->dss); @@ -532,7 +551,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_crtc_pre_init(priv); - ret = omap_connect_dssdevs(); + ret = omap_connect_pipelines(ddev); if (ret) goto err_crtc_uninit; @@ -543,16 +562,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) mutex_init(&priv->list_lock); INIT_LIST_HEAD(&priv->obj_list); - /* Allocate and initialize the DRM device. */ - ddev = drm_dev_alloc(&omap_drm_driver, priv->dev); - if (IS_ERR(ddev)) { - ret = PTR_ERR(ddev); - goto err_destroy_wq; - } - - priv->ddev = ddev; - ddev->dev_private = priv; - /* Get memory bandwidth limits */ if (priv->dispc_ops->get_memory_bandwidth_limit) priv->max_bandwidth = @@ -563,23 +572,23 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) ret = omap_modeset_init(ddev); if (ret) { dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); - goto err_free_drm_dev; + goto err_gem_deinit; } /* Initialize vblank handling, start with all CRTCs disabled. */ - ret = drm_vblank_init(ddev, priv->num_crtcs); + ret = drm_vblank_init(ddev, priv->num_pipes); if (ret) { dev_err(priv->dev, "could not init vblank\n"); goto err_cleanup_modeset; } - for (i = 0; i < priv->num_crtcs; i++) - drm_crtc_vblank_off(priv->crtcs[i]); + for (i = 0; i < priv->num_pipes; i++) + drm_crtc_vblank_off(priv->pipes[i].crtc); omap_fbdev_init(ddev); drm_kms_helper_poll_init(ddev); - omap_modeset_enable_external_hpd(); + omap_modeset_enable_external_hpd(ddev); /* * Register the DRM device with the core and the connectors with @@ -592,21 +601,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) return 0; err_cleanup_helpers: - omap_modeset_disable_external_hpd(); + omap_modeset_disable_external_hpd(ddev); drm_kms_helper_poll_fini(ddev); omap_fbdev_fini(ddev); err_cleanup_modeset: drm_mode_config_cleanup(ddev); omap_drm_irq_uninstall(ddev); -err_free_drm_dev: +err_gem_deinit: omap_gem_deinit(ddev); - drm_dev_unref(ddev); -err_destroy_wq: destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(); + omap_disconnect_pipelines(ddev); err_crtc_uninit: - omap_crtc_pre_uninit(); + omap_crtc_pre_uninit(priv); + drm_dev_unref(ddev); return ret; } @@ -618,7 +626,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) drm_dev_unregister(ddev); - omap_modeset_disable_external_hpd(); + omap_modeset_disable_external_hpd(ddev); drm_kms_helper_poll_fini(ddev); omap_fbdev_fini(ddev); @@ -630,12 +638,12 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) omap_drm_irq_uninstall(ddev); omap_gem_deinit(ddev); - drm_dev_unref(ddev); - destroy_workqueue(priv->wq); - omap_disconnect_dssdevs(); - omap_crtc_pre_uninit(); + omap_disconnect_pipelines(ddev); + omap_crtc_pre_uninit(priv); + + drm_dev_unref(ddev); } static int pdev_probe(struct platform_device *pdev) @@ -677,36 +685,36 @@ static int pdev_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int omap_drm_suspend_all_displays(void) +static int omap_drm_suspend_all_displays(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; - for_each_dss_dev(dssdev) { - if (!dssdev->driver) - continue; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { - dssdev->driver->disable(dssdev); - dssdev->activate_after_resume = true; + if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { + display->ops->disable(display); + display->activate_after_resume = true; } else { - dssdev->activate_after_resume = false; + display->activate_after_resume = false; } } return 0; } -static int omap_drm_resume_all_displays(void) +static int omap_drm_resume_all_displays(struct drm_device *ddev) { - struct omap_dss_device *dssdev = NULL; + struct omap_drm_private *priv = ddev->dev_private; + int i; - for_each_dss_dev(dssdev) { - if (!dssdev->driver) - continue; + for (i = 0; i < priv->num_pipes; i++) { + struct omap_dss_device *display = priv->pipes[i].display; - if (dssdev->activate_after_resume) { - dssdev->driver->enable(dssdev); - dssdev->activate_after_resume = false; + if (display->activate_after_resume) { + display->ops->enable(display); + display->activate_after_resume = false; } } @@ -721,7 +729,7 @@ static int omap_drm_suspend(struct device *dev) drm_kms_helper_poll_disable(drm_dev); drm_modeset_lock_all(drm_dev); - omap_drm_suspend_all_displays(); + omap_drm_suspend_all_displays(drm_dev); drm_modeset_unlock_all(drm_dev); return 0; @@ -733,7 +741,7 @@ static int omap_drm_resume(struct device *dev) struct drm_device *drm_dev = priv->ddev; drm_modeset_lock_all(drm_dev); - omap_drm_resume_all_displays(); + omap_drm_resume_all_displays(drm_dev); drm_modeset_unlock_all(drm_dev); drm_kms_helper_poll_enable(drm_dev); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index f27c8e216adf..bd7f2c227a25 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -45,6 +45,14 @@ struct omap_drm_usergart; +struct omap_drm_pipeline { + struct drm_crtc *crtc; + struct drm_encoder *encoder; + struct drm_connector *connector; + struct omap_dss_device *output; + struct omap_dss_device *display; +}; + struct omap_drm_private { struct drm_device *ddev; struct device *dev; @@ -54,18 +62,13 @@ struct omap_drm_private { struct dispc_device *dispc; const struct dispc_ops *dispc_ops; - unsigned int num_crtcs; - struct drm_crtc *crtcs[8]; + unsigned int num_pipes; + struct omap_drm_pipeline pipes[8]; + struct omap_drm_pipeline *channels[8]; unsigned int num_planes; struct drm_plane *planes[8]; - unsigned int num_encoders; - struct drm_encoder *encoders[8]; - - unsigned int num_connectors; - struct drm_connector *connectors[8]; - struct drm_fb_helper *fbdev; struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index fcdf4b0a8eec..452e625f6ce3 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -36,16 +36,10 @@ */ struct omap_encoder { struct drm_encoder base; - struct omap_dss_device *dssdev; + struct omap_dss_device *output; + struct omap_dss_device *display; }; -struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder) -{ - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - - return omap_encoder->dssdev; -} - static void omap_encoder_destroy(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); @@ -59,16 +53,65 @@ static const struct drm_encoder_funcs omap_encoder_funcs = { }; static void omap_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; struct drm_connector *connector; + struct omap_dss_device *dssdev; + struct videomode vm = { 0 }; bool hdmi_mode; int r; + drm_display_mode_to_videomode(adjusted_mode, &vm); + + /* + * HACK: This fixes the vm flags. + * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags and + * they get lost when converting back and forth between struct + * drm_display_mode and struct videomode. The hack below goes and + * fetches the missing flags. + * + * A better solution is to use DRM's bus-flags through the whole driver. + */ + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + unsigned long bus_flags = dssdev->bus_flags; + + if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm.flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm.flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } + + if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) + vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) + vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; + } + } + + /* Set timings for all devices in the display pipeline. */ + dss_mgr_set_timings(omap_encoder->output, &vm); + + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + if (dssdev->ops->set_timings) + dssdev->ops->set_timings(dssdev, &vm); + } + + /* Set the HDMI mode and HDMI infoframe if applicable. */ hdmi_mode = false; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { @@ -77,73 +120,36 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, } } - if (dssdev->driver->set_hdmi_mode) - dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode); + dssdev = omap_encoder->output; + + if (dssdev->ops->hdmi.set_hdmi_mode) + dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); - if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) { + if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) { struct hdmi_avi_infoframe avi; r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode, false); if (r == 0) - dssdev->driver->set_hdmi_infoframe(dssdev, &avi); + dssdev->ops->hdmi.set_infoframe(dssdev, &avi); } } static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + struct omap_dss_device *dssdev = omap_encoder->display; - dssdrv->disable(dssdev); -} - -static int omap_encoder_update(struct drm_encoder *encoder, - enum omap_channel channel, - struct videomode *vm) -{ - struct drm_device *dev = encoder->dev; - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; - int ret; - - if (dssdrv->check_timings) { - ret = dssdrv->check_timings(dssdev, vm); - } else { - struct videomode t = {0}; - - dssdrv->get_timings(dssdev, &t); - - if (memcmp(vm, &t, sizeof(*vm))) - ret = -EINVAL; - else - ret = 0; - } - - if (ret) { - dev_err(dev->dev, "could not set timings: %d\n", ret); - return ret; - } - - if (dssdrv->set_timings) - dssdrv->set_timings(dssdev, vm); - - return 0; + dssdev->ops->disable(dssdev); } static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->dssdev; - struct omap_dss_driver *dssdrv = dssdev->driver; + struct omap_dss_device *dssdev = omap_encoder->display; int r; - omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc), - omap_crtc_timings(encoder->crtc)); - - r = dssdrv->enable(dssdev); + r = dssdev->ops->enable(dssdev); if (r) dev_err(encoder->dev->dev, "Failed to enable display '%s': %d\n", @@ -154,7 +160,36 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - return 0; + struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + enum omap_channel channel = omap_encoder->output->dispc_channel; + struct drm_device *dev = encoder->dev; + struct omap_drm_private *priv = dev->dev_private; + struct omap_dss_device *dssdev; + struct videomode vm = { 0 }; + int ret; + + drm_display_mode_to_videomode(&crtc_state->mode, &vm); + + ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); + if (ret) + goto done; + + for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; + + ret = dssdev->ops->check_timings(dssdev, &vm); + if (ret) + goto done; + } + + drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode); + +done: + if (ret) + dev_err(dev->dev, "invalid timings: %d\n", ret); + + return ret; } static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { @@ -166,7 +201,8 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *dssdev) + struct omap_dss_device *output, + struct omap_dss_device *display) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; @@ -175,7 +211,8 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev, if (!omap_encoder) goto fail; - omap_encoder->dssdev = dssdev; + omap_encoder->output = output; + omap_encoder->display = display; encoder = &omap_encoder->base; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h index d2f308bec494..a7b5dde63ecb 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.h +++ b/drivers/gpu/drm/omapdrm/omap_encoder.h @@ -25,9 +25,7 @@ struct drm_encoder; struct omap_dss_device; struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *dssdev); - -/* map crtc to vblank mask */ -struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); + struct omap_dss_device *output, + struct omap_dss_device *display); #endif /* __OMAPDRM_ENCODER_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index d958cc813a94..b445309b0143 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -243,7 +243,7 @@ void omap_fbdev_init(struct drm_device *dev) struct drm_fb_helper *helper; int ret = 0; - if (!priv->num_crtcs || !priv->num_connectors) + if (!priv->num_pipes) return; fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); @@ -256,7 +256,7 @@ void omap_fbdev_init(struct drm_device *dev) drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs); - ret = drm_fb_helper_init(dev, helper, priv->num_connectors); + ret = drm_fb_helper_init(dev, helper, priv->num_pipes); if (ret) goto fail; diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index c85115049f86..329ad26d6d50 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -206,8 +206,8 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) VERB("irqs: %08x", irqstatus); - for (id = 0; id < priv->num_crtcs; id++) { - struct drm_crtc *crtc = priv->crtcs[id]; + for (id = 0; id < priv->num_pipes; id++) { + struct drm_crtc *crtc = priv->pipes[id].crtc; enum omap_channel channel = omap_crtc_channel(crtc); if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) { |